From 63a045f4ff09b1d5097c84ecac325998795de734 Mon Sep 17 00:00:00 2001 From: Max Englander Date: Wed, 23 Sep 2015 03:43:48 -0400 Subject: [PATCH 0001/1238] #2087: add consul agent_service and catalog_entry resources; create resource works; TODO support more fields and add tests --- builtin/providers/consul/config.go | 4 + builtin/providers/consul/get_dc.go | 17 ++ .../consul/resource_consul_agent_service.go | 79 +++++++ .../consul/resource_consul_catalog_entry.go | 199 ++++++++++++++++++ .../providers/consul/resource_consul_keys.go | 10 - builtin/providers/consul/resource_provider.go | 7 + 6 files changed, 306 insertions(+), 10 deletions(-) create mode 100644 builtin/providers/consul/get_dc.go create mode 100644 builtin/providers/consul/resource_consul_agent_service.go create mode 100644 builtin/providers/consul/resource_consul_catalog_entry.go diff --git a/builtin/providers/consul/config.go b/builtin/providers/consul/config.go index 7983018c6..8465bd9f6 100644 --- a/builtin/providers/consul/config.go +++ b/builtin/providers/consul/config.go @@ -9,6 +9,7 @@ import ( type Config struct { Datacenter string `mapstructure:"datacenter"` Address string `mapstructure:"address"` + Token string `mapstructure:"token"` Scheme string `mapstructure:"scheme"` } @@ -25,6 +26,9 @@ func (c *Config) Client() (*consulapi.Client, error) { if c.Scheme != "" { config.Scheme = c.Scheme } + if c.Token != "" { + config.Token = c.Token + } client, err := consulapi.NewClient(config) log.Printf("[INFO] Consul Client configured with address: '%s', scheme: '%s', datacenter: '%s'", diff --git a/builtin/providers/consul/get_dc.go b/builtin/providers/consul/get_dc.go new file mode 100644 index 000000000..0909276aa --- /dev/null +++ b/builtin/providers/consul/get_dc.go @@ -0,0 +1,17 @@ +package consul + +import ( + "fmt" + + consulapi "github.com/hashicorp/consul/api" +) + +// getDC is used to get the datacenter of the local agent +func getDC(client *consulapi.Client) (string, error) { + info, err := client.Agent().Self() + if err != nil { + return "", fmt.Errorf("Failed to get datacenter from Consul agent: %v", err) + } + dc := info["Config"]["Datacenter"].(string) + return dc, nil +} diff --git a/builtin/providers/consul/resource_consul_agent_service.go b/builtin/providers/consul/resource_consul_agent_service.go new file mode 100644 index 000000000..6584770c0 --- /dev/null +++ b/builtin/providers/consul/resource_consul_agent_service.go @@ -0,0 +1,79 @@ +package consul + +import ( + "fmt" + + consulapi "github.com/hashicorp/consul/api" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceConsulAgentService() *schema.Resource { + return &schema.Resource{ + Create: resourceConsulAgentServiceCreate, + Update: resourceConsulAgentServiceCreate, + Read: resourceConsulAgentServiceRead, + Delete: resourceConsulAgentServiceDelete, + + Schema: map[string]*schema.Schema{ + "address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func resourceConsulAgentServiceCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*consulapi.Client) + agent := client.Agent() + + name := d.Get("name").(string) + + registration := consulapi.AgentServiceRegistration{Name: name} + + if err := agent.ServiceRegister(®istration); err != nil { + return fmt.Errorf("Failed to register service '%s' with Consul agent: %v", name, err) + } + + // Update the resource + d.SetId(fmt.Sprintf("consul-agent-service-%s", name)) + return nil +} + +func resourceConsulAgentServiceRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*consulapi.Client) + agent := client.Agent() + + name := d.Get("name").(string) + + if services, err := agent.Services(); err != nil { + return fmt.Errorf("Failed to get services from Consul agent: %v", err) + } else { + if _, ok := services[name]; !ok { + return fmt.Errorf("Failed to get service '%s' from Consul agent: %v", name, err) + } + } + + return nil +} + +func resourceConsulAgentServiceDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*consulapi.Client) + catalog := client.Agent() + + name := d.Get("name").(string) + + if err := catalog.ServiceDeregister(name); err != nil { + return fmt.Errorf("Failed to deregister service '%s' from Consul agent: %v", name, err) + } + + // Clear the ID + d.SetId("") + return nil +} diff --git a/builtin/providers/consul/resource_consul_catalog_entry.go b/builtin/providers/consul/resource_consul_catalog_entry.go new file mode 100644 index 000000000..86ab6874c --- /dev/null +++ b/builtin/providers/consul/resource_consul_catalog_entry.go @@ -0,0 +1,199 @@ +package consul + +import ( + "bytes" + "fmt" + "log" + + consulapi "github.com/hashicorp/consul/api" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceConsulCatalogEntry() *schema.Resource { + return &schema.Resource{ + Create: resourceConsulCatalogEntryCreate, + Update: resourceConsulCatalogEntryCreate, + Read: resourceConsulCatalogEntryRead, + Delete: resourceConsulCatalogEntryDelete, + + Schema: map[string]*schema.Schema{ + "address": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "datacenter": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "node": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "service": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource { + Schema: map[string]*schema.Schema{ + "service": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + Set: resourceConsulCatalogEntryServicesHash, + }, + + "token": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +func resourceConsulCatalogEntryServicesHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["service"].(string))) + return hashcode.String(buf.String()) +} + +func resourceConsulCatalogEntryCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*consulapi.Client) + catalog := client.Catalog() + + // Resolve the datacenter first, all the other keys are dependent on this + var dc string + if v, ok := d.GetOk("datacenter"); ok { + dc = v.(string) + log.Printf("[DEBUG] Consul datacenter: %s", dc) + } else { + log.Printf("[DEBUG] Resolving Consul datacenter...") + var err error + dc, err = getDC(client) + if err != nil { + return err + } + } + + var token string + if v, ok := d.GetOk("token"); ok { + token = v.(string) + } + + // Setup the operations using the datacenter + wOpts := consulapi.WriteOptions{Datacenter: dc, Token: token} + + address := d.Get("address").(string) + node := d.Get("node").(string) + + if rawServiceDefinition, ok := d.GetOk("service"); ok { + rawServiceList := rawServiceDefinition.(*schema.Set).List() + for _, rawService := range rawServiceList { + service, ok := rawService.(map[string]interface{}) + + if !ok { + return fmt.Errorf("Failed to unroll: %#v", rawService) + } + + serviceName := service["service"].(string) + + registration := consulapi.CatalogRegistration{ + Node: node, Address: address, Datacenter: dc, + Service: &consulapi.AgentService{Service: serviceName}, + } + + if _, err := catalog.Register(®istration, &wOpts); err != nil { + return fmt.Errorf("Failed to register Consul catalog entry with node '%s' at address '%s' with service %s in %s: %v", + node, address, serviceName, dc, err) + } + } + } else { + registration := consulapi.CatalogRegistration{ + Node: node, Address: address, Datacenter: dc, + } + + if _, err := catalog.Register(®istration, &wOpts); err != nil { + return fmt.Errorf("Failed to register Consul catalog entry with node '%s' at address '%s' in %s: %v", + node, address, dc, err) + } + } + + // Update the resource + d.SetId(fmt.Sprintf("consul-catalog-node-%s-%s", node, address)) + d.Set("datacenter", dc) + return nil +} + +func resourceConsulCatalogEntryRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*consulapi.Client) + catalog := client.Catalog() + + // Get the DC, error if not available. + var dc string + if v, ok := d.GetOk("datacenter"); ok { + dc = v.(string) + log.Printf("[DEBUG] Consul datacenter: %s", dc) + } else { + return fmt.Errorf("Missing datacenter configuration") + } + var token string + if v, ok := d.GetOk("token"); ok { + token = v.(string) + } + + node := d.Get("node").(string) + + // Setup the operations using the datacenter + qOpts := consulapi.QueryOptions{Datacenter: dc, Token: token} + + if _, _, err := catalog.Node(node, &qOpts); err != nil { + return fmt.Errorf("Failed to get node '%s' from Consul catalog: %v", node, err) + } + + return nil +} + +func resourceConsulCatalogEntryDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*consulapi.Client) + catalog := client.Catalog() + + // Get the DC, error if not available. + var dc string + if v, ok := d.GetOk("datacenter"); ok { + dc = v.(string) + log.Printf("[DEBUG] Consul datacenter: %s", dc) + } else { + return fmt.Errorf("Missing datacenter configuration") + } + var token string + if v, ok := d.GetOk("token"); ok { + token = v.(string) + } + + // Setup the operations using the datacenter + wOpts := consulapi.WriteOptions{Datacenter: dc, Token: token} + + address := d.Get("address").(string) + node := d.Get("node").(string) + + deregistration := consulapi.CatalogDeregistration{ + Node: node, Address: address, Datacenter: dc, + } + + if _, err := catalog.Deregister(&deregistration, &wOpts); err != nil { + return fmt.Errorf("Failed to deregister Consul catalog entry with node '%s' at address '%s' in %s: %v", + node, address, dc, err) + } + + // Clear the ID + d.SetId("") + return nil +} + diff --git a/builtin/providers/consul/resource_consul_keys.go b/builtin/providers/consul/resource_consul_keys.go index 58000d7f7..7ad6d8d69 100644 --- a/builtin/providers/consul/resource_consul_keys.go +++ b/builtin/providers/consul/resource_consul_keys.go @@ -283,13 +283,3 @@ func attributeValue(sub map[string]interface{}, key string, pair *consulapi.KVPa // No value return "" } - -// getDC is used to get the datacenter of the local agent -func getDC(client *consulapi.Client) (string, error) { - info, err := client.Agent().Self() - if err != nil { - return "", fmt.Errorf("Failed to get datacenter from Consul agent: %v", err) - } - dc := info["Config"]["Datacenter"].(string) - return dc, nil -} diff --git a/builtin/providers/consul/resource_provider.go b/builtin/providers/consul/resource_provider.go index ec5cd43a5..b109b9fe8 100644 --- a/builtin/providers/consul/resource_provider.go +++ b/builtin/providers/consul/resource_provider.go @@ -26,9 +26,16 @@ func Provider() terraform.ResourceProvider { Type: schema.TypeString, Optional: true, }, + + "token": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, }, ResourcesMap: map[string]*schema.Resource{ + "consul_agent_service": resourceConsulAgentService(), + "consul_catalog_entry": resourceConsulCatalogEntry(), "consul_keys": resourceConsulKeys(), }, From 65ebbda7767ab33eee9d0caa156a6f5b4006615a Mon Sep 17 00:00:00 2001 From: Max Englander Date: Thu, 24 Sep 2015 01:30:06 -0400 Subject: [PATCH 0002/1238] #2087 agent_service resource supports address, port, and tag keys, can be read (refreshed) and deleted --- .../consul/resource_consul_agent_service.go | 66 +++++++++++++++++-- 1 file changed, 59 insertions(+), 7 deletions(-) diff --git a/builtin/providers/consul/resource_consul_agent_service.go b/builtin/providers/consul/resource_consul_agent_service.go index 6584770c0..bbd83aab8 100644 --- a/builtin/providers/consul/resource_consul_agent_service.go +++ b/builtin/providers/consul/resource_consul_agent_service.go @@ -16,15 +16,32 @@ func resourceConsulAgentService() *schema.Resource { Schema: map[string]*schema.Schema{ "address": &schema.Schema{ - Type: schema.TypeString, + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "id": &schema.Schema{ + Type: schema.TypeString, Optional: true, Computed: true, }, "name": &schema.Schema{ - Type: schema.TypeString, + Type: schema.TypeString, Required: true, }, + + "port": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + + "tags": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, }, } } @@ -34,15 +51,46 @@ func resourceConsulAgentServiceCreate(d *schema.ResourceData, meta interface{}) agent := client.Agent() name := d.Get("name").(string) - registration := consulapi.AgentServiceRegistration{Name: name} + if address, ok := d.GetOk("address"); ok { + registration.Address = address.(string) + } + + if id, ok := d.GetOk("id"); ok { + registration.ID = id.(string) + } + + if port, ok := d.GetOk("port"); ok { + registration.Port = port.(int) + } + + if v, ok := d.GetOk("tags"); ok { + vs := v.([]interface{}) + s := make([]string, len(vs)) + for i, raw := range vs { + s[i] = raw.(string) + } + registration.Tags = s + } + if err := agent.ServiceRegister(®istration); err != nil { return fmt.Errorf("Failed to register service '%s' with Consul agent: %v", name, err) } // Update the resource - d.SetId(fmt.Sprintf("consul-agent-service-%s", name)) + if serviceMap, err := agent.Services(); err != nil { + return fmt.Errorf("Failed to read services from Consul agent: %v", err) + } else if service, ok := serviceMap[name]; !ok { + return fmt.Errorf("Failed to read service '%s' from Consul agent: %v", name, err) + } else { + d.Set("address", service.Address) + d.Set("name", service.Service) + d.Set("port", service.Port) + d.Set("tags", service.Tags) + d.SetId(service.ID) + } + return nil } @@ -54,10 +102,14 @@ func resourceConsulAgentServiceRead(d *schema.ResourceData, meta interface{}) er if services, err := agent.Services(); err != nil { return fmt.Errorf("Failed to get services from Consul agent: %v", err) + } else if service, ok := services[name]; !ok { + return fmt.Errorf("Failed to get service '%s' from Consul agent: %v", name, err) } else { - if _, ok := services[name]; !ok { - return fmt.Errorf("Failed to get service '%s' from Consul agent: %v", name, err) - } + d.Set("address", service.Address) + d.Set("name", service.Service) + d.Set("port", service.Port) + d.Set("tags", service.Tags) + d.SetId(service.ID) } return nil From 929e3cdbe25ea453d00451f5f7d68de898573640 Mon Sep 17 00:00:00 2001 From: Max Englander Date: Mon, 28 Sep 2015 01:12:20 -0400 Subject: [PATCH 0003/1238] #2087: add consul_agent_service acceptance test --- .../consul/resource_consul_agent_service.go | 2 +- .../resource_consul_agent_service_test.go | 85 +++++++++++++++++++ 2 files changed, 86 insertions(+), 1 deletion(-) create mode 100644 builtin/providers/consul/resource_consul_agent_service_test.go diff --git a/builtin/providers/consul/resource_consul_agent_service.go b/builtin/providers/consul/resource_consul_agent_service.go index bbd83aab8..c4cf0f272 100644 --- a/builtin/providers/consul/resource_consul_agent_service.go +++ b/builtin/providers/consul/resource_consul_agent_service.go @@ -103,7 +103,7 @@ func resourceConsulAgentServiceRead(d *schema.ResourceData, meta interface{}) er if services, err := agent.Services(); err != nil { return fmt.Errorf("Failed to get services from Consul agent: %v", err) } else if service, ok := services[name]; !ok { - return fmt.Errorf("Failed to get service '%s' from Consul agent: %v", name, err) + return fmt.Errorf("Failed to get service '%s' from Consul agent", name) } else { d.Set("address", service.Address) d.Set("name", service.Service) diff --git a/builtin/providers/consul/resource_consul_agent_service_test.go b/builtin/providers/consul/resource_consul_agent_service_test.go new file mode 100644 index 000000000..5d5b022d8 --- /dev/null +++ b/builtin/providers/consul/resource_consul_agent_service_test.go @@ -0,0 +1,85 @@ +package consul + +import ( + "fmt" + "testing" + + consulapi "github.com/hashicorp/consul/api" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccConsulAgentService_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() {}, + Providers: testAccProviders, + CheckDestroy: testAccCheckConsulAgentServiceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccConsulAgentServiceConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckConsulAgentServiceExists(), + testAccCheckConsulAgentServiceValue("consul_agent_service.app", "address", "www.google.com"), + testAccCheckConsulAgentServiceValue("consul_agent_service.app", "id", "google"), + testAccCheckConsulAgentServiceValue("consul_agent_service.app", "name", "google"), + ), + }, + }, + }) +} + +func testAccCheckConsulAgentServiceDestroy(s *terraform.State) error { + agent := testAccProvider.Meta().(*consulapi.Client).Agent() + services, err := agent.Services() + if err != nil { + return fmt.Errorf("Could not retrieve services: %#v", err) + } + _, ok := services["google"] + if ok { + return fmt.Errorf("Service still exists: %#v", "google") + } + return nil +} + +func testAccCheckConsulAgentServiceExists() resource.TestCheckFunc { + return func(s *terraform.State) error { + agent := testAccProvider.Meta().(*consulapi.Client).Agent() + services, err := agent.Services() + if err != nil { + return err + } + _, ok := services["google"] + if !ok { + return fmt.Errorf("Service does not exist: %#v", "google") + } + return nil + } +} + +func testAccCheckConsulAgentServiceValue(n, attr, val string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rn, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Resource not found") + } + out, ok := rn.Primary.Attributes[attr] + if !ok { + return fmt.Errorf("Attribute '%s' not found: %#v", attr, rn.Primary.Attributes) + } + if val != "" && out != val { + return fmt.Errorf("Attribute '%s' value '%s' != '%s'", attr, out, val) + } + if val == "" && out == "" { + return fmt.Errorf("Attribute '%s' value '%s'", attr, out) + } + return nil + } +} + +const testAccConsulAgentServiceConfig = ` +resource "consul_agent_service" "app" { + name = "google" + address = "www.google.com" + port = 80 +} +` From 7b9ec5925801239236d2d1c8ceccd79633b06690 Mon Sep 17 00:00:00 2001 From: Max Englander Date: Thu, 15 Oct 2015 04:06:52 -0400 Subject: [PATCH 0004/1238] #2087: consul_catalog_entry can set a service --- builtin/providers/consul/config.go | 2 +- .../consul/resource_consul_catalog_entry.go | 104 +++++++++++------- 2 files changed, 64 insertions(+), 42 deletions(-) diff --git a/builtin/providers/consul/config.go b/builtin/providers/consul/config.go index 8465bd9f6..cb6d7af79 100644 --- a/builtin/providers/consul/config.go +++ b/builtin/providers/consul/config.go @@ -27,7 +27,7 @@ func (c *Config) Client() (*consulapi.Client, error) { config.Scheme = c.Scheme } if c.Token != "" { - config.Token = c.Token + config.Token = c.Token } client, err := consulapi.NewClient(config) diff --git a/builtin/providers/consul/resource_consul_catalog_entry.go b/builtin/providers/consul/resource_consul_catalog_entry.go index 86ab6874c..09d9e62ff 100644 --- a/builtin/providers/consul/resource_consul_catalog_entry.go +++ b/builtin/providers/consul/resource_consul_catalog_entry.go @@ -3,7 +3,6 @@ package consul import ( "bytes" "fmt" - "log" consulapi "github.com/hashicorp/consul/api" "github.com/hashicorp/terraform/helper/hashcode" @@ -19,38 +18,60 @@ func resourceConsulCatalogEntry() *schema.Resource { Schema: map[string]*schema.Schema{ "address": &schema.Schema{ - Type: schema.TypeString, + Type: schema.TypeString, Required: true, }, "datacenter": &schema.Schema{ - Type: schema.TypeString, + Type: schema.TypeString, Optional: true, Computed: true, ForceNew: true, }, "node": &schema.Schema{ - Type: schema.TypeString, + Type: schema.TypeString, Required: true, }, "service": &schema.Schema{ - Type: schema.TypeSet, + Type: schema.TypeSet, Optional: true, - Elem: &schema.Resource { + Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "service": &schema.Schema{ - Type: schema.TypeString, + "address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, Required: true, }, + + "port": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + + "tags": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, }, }, Set: resourceConsulCatalogEntryServicesHash, }, "token": &schema.Schema{ - Type: schema.TypeString, + Type: schema.TypeString, Optional: true, }, }, @@ -60,7 +81,7 @@ func resourceConsulCatalogEntry() *schema.Resource { func resourceConsulCatalogEntryServicesHash(v interface{}) int { var buf bytes.Buffer m := v.(map[string]interface{}) - buf.WriteString(fmt.Sprintf("%s-", m["service"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["id"].(string))) return hashcode.String(buf.String()) } @@ -68,16 +89,12 @@ func resourceConsulCatalogEntryCreate(d *schema.ResourceData, meta interface{}) client := meta.(*consulapi.Client) catalog := client.Catalog() - // Resolve the datacenter first, all the other keys are dependent on this var dc string if v, ok := d.GetOk("datacenter"); ok { dc = v.(string) - log.Printf("[DEBUG] Consul datacenter: %s", dc) } else { - log.Printf("[DEBUG] Resolving Consul datacenter...") var err error - dc, err = getDC(client) - if err != nil { + if dc, err = getDC(client); err != nil { return err } } @@ -93,30 +110,39 @@ func resourceConsulCatalogEntryCreate(d *schema.ResourceData, meta interface{}) address := d.Get("address").(string) node := d.Get("node").(string) - if rawServiceDefinition, ok := d.GetOk("service"); ok { - rawServiceList := rawServiceDefinition.(*schema.Set).List() - for _, rawService := range rawServiceList { - service, ok := rawService.(map[string]interface{}) + if services, ok := d.GetOk("service"); ok { + for _, rawService := range services.(*schema.Set).List() { + serviceData := rawService.(map[string]interface{}) - if !ok { - return fmt.Errorf("Failed to unroll: %#v", rawService) + rawTags := serviceData["tags"].([]interface{}) + tags := make([]string, len(rawTags)) + for i, v := range rawTags { + tags[i] = v.(string) } - serviceName := service["service"].(string) - registration := consulapi.CatalogRegistration{ - Node: node, Address: address, Datacenter: dc, - Service: &consulapi.AgentService{Service: serviceName}, + Address: address, + Datacenter: dc, + Node: node, + Service: &consulapi.AgentService{ + Address: serviceData["address"].(string), + ID: serviceData["id"].(string), + Service: serviceData["name"].(string), + Port: serviceData["port"].(int), + Tags: tags, + }, } if _, err := catalog.Register(®istration, &wOpts); err != nil { - return fmt.Errorf("Failed to register Consul catalog entry with node '%s' at address '%s' with service %s in %s: %v", - node, address, serviceName, dc, err) + return fmt.Errorf("Failed to register Consul catalog entry with node '%s' at address '%s' in %s: %v", + node, address, dc, err) } } } else { registration := consulapi.CatalogRegistration{ - Node: node, Address: address, Datacenter: dc, + Address: address, + Datacenter: dc, + Node: node, } if _, err := catalog.Register(®istration, &wOpts); err != nil { @@ -126,8 +152,14 @@ func resourceConsulCatalogEntryCreate(d *schema.ResourceData, meta interface{}) } // Update the resource + qOpts := consulapi.QueryOptions{Datacenter: dc} + if _, _, err := catalog.Node(node, &qOpts); err != nil { + return fmt.Errorf("Failed to read Consul catalog entry for node '%s' at address '%s' in %s: %v", + node, address, dc, err) + } else { + d.Set("datacenter", dc) + } d.SetId(fmt.Sprintf("consul-catalog-node-%s-%s", node, address)) - d.Set("datacenter", dc) return nil } @@ -139,19 +171,12 @@ func resourceConsulCatalogEntryRead(d *schema.ResourceData, meta interface{}) er var dc string if v, ok := d.GetOk("datacenter"); ok { dc = v.(string) - log.Printf("[DEBUG] Consul datacenter: %s", dc) - } else { - return fmt.Errorf("Missing datacenter configuration") - } - var token string - if v, ok := d.GetOk("token"); ok { - token = v.(string) } node := d.Get("node").(string) // Setup the operations using the datacenter - qOpts := consulapi.QueryOptions{Datacenter: dc, Token: token} + qOpts := consulapi.QueryOptions{Datacenter: dc} if _, _, err := catalog.Node(node, &qOpts); err != nil { return fmt.Errorf("Failed to get node '%s' from Consul catalog: %v", node, err) @@ -168,10 +193,8 @@ func resourceConsulCatalogEntryDelete(d *schema.ResourceData, meta interface{}) var dc string if v, ok := d.GetOk("datacenter"); ok { dc = v.(string) - log.Printf("[DEBUG] Consul datacenter: %s", dc) - } else { - return fmt.Errorf("Missing datacenter configuration") } + var token string if v, ok := d.GetOk("token"); ok { token = v.(string) @@ -196,4 +219,3 @@ func resourceConsulCatalogEntryDelete(d *schema.ResourceData, meta interface{}) d.SetId("") return nil } - From 8d6b71e2ae4626ead4e5e7d3e9bf72262c89b583 Mon Sep 17 00:00:00 2001 From: Max Englander Date: Thu, 29 Oct 2015 05:04:25 -0400 Subject: [PATCH 0005/1238] use ForceNew on just about all catalog entry attributes; struggle in the hell of 'diffs mismatch' --- .../consul/resource_consul_catalog_entry.go | 72 +++++++++++++++---- 1 file changed, 58 insertions(+), 14 deletions(-) diff --git a/builtin/providers/consul/resource_consul_catalog_entry.go b/builtin/providers/consul/resource_consul_catalog_entry.go index 09d9e62ff..73ed90307 100644 --- a/builtin/providers/consul/resource_consul_catalog_entry.go +++ b/builtin/providers/consul/resource_consul_catalog_entry.go @@ -3,6 +3,8 @@ package consul import ( "bytes" "fmt" + "sort" + "strings" consulapi "github.com/hashicorp/consul/api" "github.com/hashicorp/terraform/helper/hashcode" @@ -20,6 +22,7 @@ func resourceConsulCatalogEntry() *schema.Resource { "address": &schema.Schema{ Type: schema.TypeString, Required: true, + ForceNew: true, }, "datacenter": &schema.Schema{ @@ -32,37 +35,44 @@ func resourceConsulCatalogEntry() *schema.Resource { "node": &schema.Schema{ Type: schema.TypeString, Required: true, + ForceNew: true, }, "service": &schema.Schema{ Type: schema.TypeSet, Optional: true, + ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "address": &schema.Schema{ Type: schema.TypeString, Optional: true, + ForceNew: true, }, "id": &schema.Schema{ Type: schema.TypeString, Optional: true, Computed: true, + ForceNew: true, }, "name": &schema.Schema{ Type: schema.TypeString, Required: true, + ForceNew: true, }, "port": &schema.Schema{ Type: schema.TypeInt, Optional: true, + ForceNew: true, }, "tags": &schema.Schema{ Type: schema.TypeList, Optional: true, + ForceNew: true, Elem: &schema.Schema{Type: schema.TypeString}, }, }, @@ -82,6 +92,21 @@ func resourceConsulCatalogEntryServicesHash(v interface{}) int { var buf bytes.Buffer m := v.(map[string]interface{}) buf.WriteString(fmt.Sprintf("%s-", m["id"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["address"].(string))) + buf.WriteString(fmt.Sprintf("%d-", m["port"].(int))) + if v, ok := m["tags"]; ok { + vs := v.([]interface{}) + s := make([]string, len(vs)) + for i, raw := range vs { + s[i] = raw.(string) + } + sort.Strings(s) + + for _, v := range s { + buf.WriteString(fmt.Sprintf("%s-", v)) + } + } return hashcode.String(buf.String()) } @@ -110,42 +135,50 @@ func resourceConsulCatalogEntryCreate(d *schema.ResourceData, meta interface{}) address := d.Get("address").(string) node := d.Get("node").(string) - if services, ok := d.GetOk("service"); ok { - for _, rawService := range services.(*schema.Set).List() { + var serviceIDs []string + if service, ok := d.GetOk("service"); ok { + serviceList := service.(*schema.Set).List() + serviceIDs = make([]string, len(serviceList)) + for i, rawService := range serviceList { serviceData := rawService.(map[string]interface{}) - rawTags := serviceData["tags"].([]interface{}) - tags := make([]string, len(rawTags)) - for i, v := range rawTags { - tags[i] = v.(string) + serviceID := serviceData["id"].(string) + serviceIDs[i] = serviceID + + var tags []string + if v := serviceData["tags"].([]interface{}); len(v) > 0 { + tags = make([]string, len(v)) + for i, raw := range v { + tags[i] = raw.(string) + } } - registration := consulapi.CatalogRegistration{ + registration := &consulapi.CatalogRegistration{ Address: address, Datacenter: dc, Node: node, Service: &consulapi.AgentService{ Address: serviceData["address"].(string), - ID: serviceData["id"].(string), + ID: serviceID, Service: serviceData["name"].(string), Port: serviceData["port"].(int), Tags: tags, }, } - if _, err := catalog.Register(®istration, &wOpts); err != nil { + if _, err := catalog.Register(registration, &wOpts); err != nil { return fmt.Errorf("Failed to register Consul catalog entry with node '%s' at address '%s' in %s: %v", node, address, dc, err) } } } else { - registration := consulapi.CatalogRegistration{ + registration := &consulapi.CatalogRegistration{ Address: address, Datacenter: dc, Node: node, } - if _, err := catalog.Register(®istration, &wOpts); err != nil { + if _, err := catalog.Register(registration, &wOpts); err != nil { return fmt.Errorf("Failed to register Consul catalog entry with node '%s' at address '%s' in %s: %v", node, address, dc, err) } @@ -159,7 +192,12 @@ func resourceConsulCatalogEntryCreate(d *schema.ResourceData, meta interface{}) } else { d.Set("datacenter", dc) } - d.SetId(fmt.Sprintf("consul-catalog-node-%s-%s", node, address)) + + sort.Strings(serviceIDs) + serviceIDsJoined := strings.Join(serviceIDs, ",") + + d.SetId(fmt.Sprintf("%s-%s-[%s]", node, address, serviceIDsJoined)) + return nil } @@ -189,10 +227,14 @@ func resourceConsulCatalogEntryDelete(d *schema.ResourceData, meta interface{}) client := meta.(*consulapi.Client) catalog := client.Catalog() - // Get the DC, error if not available. var dc string if v, ok := d.GetOk("datacenter"); ok { dc = v.(string) + } else { + var err error + if dc, err = getDC(client); err != nil { + return err + } } var token string @@ -207,7 +249,9 @@ func resourceConsulCatalogEntryDelete(d *schema.ResourceData, meta interface{}) node := d.Get("node").(string) deregistration := consulapi.CatalogDeregistration{ - Node: node, Address: address, Datacenter: dc, + Address: address, + Datacenter: dc, + Node: node, } if _, err := catalog.Deregister(&deregistration, &wOpts); err != nil { From 5f5fd7c6e61ef8195cd363b350be0305a0a2c9ff Mon Sep 17 00:00:00 2001 From: Max Englander Date: Thu, 29 Oct 2015 11:39:27 -0400 Subject: [PATCH 0006/1238] make good use of ForceNew in consul_agent_service --- builtin/providers/consul/resource_consul_agent_service.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/builtin/providers/consul/resource_consul_agent_service.go b/builtin/providers/consul/resource_consul_agent_service.go index c4cf0f272..1a15bf6d8 100644 --- a/builtin/providers/consul/resource_consul_agent_service.go +++ b/builtin/providers/consul/resource_consul_agent_service.go @@ -19,12 +19,14 @@ func resourceConsulAgentService() *schema.Resource { Type: schema.TypeString, Optional: true, Computed: true, + ForceNew: true, }, "id": &schema.Schema{ Type: schema.TypeString, Optional: true, Computed: true, + ForceNew: true, }, "name": &schema.Schema{ @@ -35,12 +37,14 @@ func resourceConsulAgentService() *schema.Resource { "port": &schema.Schema{ Type: schema.TypeInt, Optional: true, + ForceNew: true, }, "tags": &schema.Schema{ Type: schema.TypeList, Optional: true, Elem: &schema.Schema{Type: schema.TypeString}, + ForceNew: true, }, }, } From adda285caecb524e12f6a4c0842591b34f0d5a0a Mon Sep 17 00:00:00 2001 From: Max Englander Date: Mon, 25 Jan 2016 00:15:09 +0000 Subject: [PATCH 0007/1238] finish tests and add docs for consul_{agent_service,catalog_entry} --- .../consul/resource_consul_agent_service.go | 30 +++--- .../resource_consul_agent_service_test.go | 7 +- .../consul/resource_consul_catalog_entry.go | 11 +- .../resource_consul_catalog_entry_test.go | 100 ++++++++++++++++++ builtin/providers/consul/resource_provider.go | 2 +- .../consul/r/agent_service.html.markdown | 47 ++++++++ .../consul/r/catalog_entry.html.markdown | 58 ++++++++++ 7 files changed, 237 insertions(+), 18 deletions(-) create mode 100644 builtin/providers/consul/resource_consul_catalog_entry_test.go create mode 100644 website/source/docs/providers/consul/r/agent_service.html.markdown create mode 100644 website/source/docs/providers/consul/r/catalog_entry.html.markdown diff --git a/builtin/providers/consul/resource_consul_agent_service.go b/builtin/providers/consul/resource_consul_agent_service.go index 1a15bf6d8..9ede63bf3 100644 --- a/builtin/providers/consul/resource_consul_agent_service.go +++ b/builtin/providers/consul/resource_consul_agent_service.go @@ -24,9 +24,7 @@ func resourceConsulAgentService() *schema.Resource { "id": &schema.Schema{ Type: schema.TypeString, - Optional: true, Computed: true, - ForceNew: true, }, "name": &schema.Schema{ @@ -61,10 +59,6 @@ func resourceConsulAgentServiceCreate(d *schema.ResourceData, meta interface{}) registration.Address = address.(string) } - if id, ok := d.GetOk("id"); ok { - registration.ID = id.(string) - } - if port, ok := d.GetOk("port"); ok { registration.Port = port.(int) } @@ -89,10 +83,15 @@ func resourceConsulAgentServiceCreate(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Failed to read service '%s' from Consul agent: %v", name, err) } else { d.Set("address", service.Address) + d.Set("id", service.ID) + d.SetId(service.ID) d.Set("name", service.Service) d.Set("port", service.Port) - d.Set("tags", service.Tags) - d.SetId(service.ID) + tags := make([]string, 0, len(service.Tags)) + for _, tag := range service.Tags { + tags = append(tags, tag) + } + d.Set("tags", tags) } return nil @@ -110,10 +109,15 @@ func resourceConsulAgentServiceRead(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Failed to get service '%s' from Consul agent", name) } else { d.Set("address", service.Address) + d.Set("id", service.ID) + d.SetId(service.ID) d.Set("name", service.Service) d.Set("port", service.Port) - d.Set("tags", service.Tags) - d.SetId(service.ID) + tags := make([]string, 0, len(service.Tags)) + for _, tag := range service.Tags { + tags = append(tags, tag) + } + d.Set("tags", tags) } return nil @@ -123,10 +127,10 @@ func resourceConsulAgentServiceDelete(d *schema.ResourceData, meta interface{}) client := meta.(*consulapi.Client) catalog := client.Agent() - name := d.Get("name").(string) + id := d.Get("id").(string) - if err := catalog.ServiceDeregister(name); err != nil { - return fmt.Errorf("Failed to deregister service '%s' from Consul agent: %v", name, err) + if err := catalog.ServiceDeregister(id); err != nil { + return fmt.Errorf("Failed to deregister service '%s' from Consul agent: %v", id, err) } // Clear the ID diff --git a/builtin/providers/consul/resource_consul_agent_service_test.go b/builtin/providers/consul/resource_consul_agent_service_test.go index 5d5b022d8..5150c4e85 100644 --- a/builtin/providers/consul/resource_consul_agent_service_test.go +++ b/builtin/providers/consul/resource_consul_agent_service_test.go @@ -22,6 +22,10 @@ func TestAccConsulAgentService_basic(t *testing.T) { testAccCheckConsulAgentServiceValue("consul_agent_service.app", "address", "www.google.com"), testAccCheckConsulAgentServiceValue("consul_agent_service.app", "id", "google"), testAccCheckConsulAgentServiceValue("consul_agent_service.app", "name", "google"), + testAccCheckConsulAgentServiceValue("consul_agent_service.app", "port", "80"), + testAccCheckConsulAgentServiceValue("consul_agent_service.app", "tags.#", "2"), + testAccCheckConsulAgentServiceValue("consul_agent_service.app", "tags.0", "tag0"), + testAccCheckConsulAgentServiceValue("consul_agent_service.app", "tags.1", "tag1"), ), }, }, @@ -78,8 +82,9 @@ func testAccCheckConsulAgentServiceValue(n, attr, val string) resource.TestCheck const testAccConsulAgentServiceConfig = ` resource "consul_agent_service" "app" { - name = "google" address = "www.google.com" + name = "google" port = 80 + tags = ["tag0", "tag1"] } ` diff --git a/builtin/providers/consul/resource_consul_catalog_entry.go b/builtin/providers/consul/resource_consul_catalog_entry.go index 73ed90307..263c21f71 100644 --- a/builtin/providers/consul/resource_consul_catalog_entry.go +++ b/builtin/providers/consul/resource_consul_catalog_entry.go @@ -70,10 +70,11 @@ func resourceConsulCatalogEntry() *schema.Resource { }, "tags": &schema.Schema{ - Type: schema.TypeList, + Type: schema.TypeSet, Optional: true, ForceNew: true, Elem: &schema.Schema{Type: schema.TypeString}, + Set: resourceConsulCatalogEntryServiceTagsHash, }, }, }, @@ -88,6 +89,10 @@ func resourceConsulCatalogEntry() *schema.Resource { } } +func resourceConsulCatalogEntryServiceTagsHash(v interface{}) int { + return hashcode.String(v.(string)) +} + func resourceConsulCatalogEntryServicesHash(v interface{}) int { var buf bytes.Buffer m := v.(map[string]interface{}) @@ -96,7 +101,7 @@ func resourceConsulCatalogEntryServicesHash(v interface{}) int { buf.WriteString(fmt.Sprintf("%s-", m["address"].(string))) buf.WriteString(fmt.Sprintf("%d-", m["port"].(int))) if v, ok := m["tags"]; ok { - vs := v.([]interface{}) + vs := v.(*schema.Set).List() s := make([]string, len(vs)) for i, raw := range vs { s[i] = raw.(string) @@ -146,7 +151,7 @@ func resourceConsulCatalogEntryCreate(d *schema.ResourceData, meta interface{}) serviceIDs[i] = serviceID var tags []string - if v := serviceData["tags"].([]interface{}); len(v) > 0 { + if v := serviceData["tags"].(*schema.Set).List(); len(v) > 0 { tags = make([]string, len(v)) for i, raw := range v { tags[i] = raw.(string) diff --git a/builtin/providers/consul/resource_consul_catalog_entry_test.go b/builtin/providers/consul/resource_consul_catalog_entry_test.go new file mode 100644 index 000000000..0a28b675c --- /dev/null +++ b/builtin/providers/consul/resource_consul_catalog_entry_test.go @@ -0,0 +1,100 @@ +package consul + +import ( + "fmt" + "testing" + + consulapi "github.com/hashicorp/consul/api" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccConsulCatalogEntry_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() {}, + Providers: testAccProviders, + CheckDestroy: testAccCheckConsulCatalogEntryDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccConsulCatalogEntryConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckConsulCatalogEntryExists(), + testAccCheckConsulCatalogEntryValue("consul_catalog_entry.app", "address", "127.0.0.1"), + testAccCheckConsulCatalogEntryValue("consul_catalog_entry.app", "node", "bastion"), + testAccCheckConsulCatalogEntryValue("consul_catalog_entry.app", "service.#", "1"), + testAccCheckConsulCatalogEntryValue("consul_catalog_entry.app", "service.3112399829.address", "www.google.com"), + testAccCheckConsulCatalogEntryValue("consul_catalog_entry.app", "service.3112399829.id", "google1"), + testAccCheckConsulCatalogEntryValue("consul_catalog_entry.app", "service.3112399829.name", "google"), + testAccCheckConsulCatalogEntryValue("consul_catalog_entry.app", "service.3112399829.port", "80"), + testAccCheckConsulCatalogEntryValue("consul_catalog_entry.app", "service.3112399829.tags.#", "2"), + testAccCheckConsulCatalogEntryValue("consul_catalog_entry.app", "service.3112399829.tags.2154398732", "tag0"), + testAccCheckConsulCatalogEntryValue("consul_catalog_entry.app", "service.3112399829.tags.4151227546", "tag1"), + ), + }, + }, + }) +} + +func testAccCheckConsulCatalogEntryDestroy(s *terraform.State) error { + catalog := testAccProvider.Meta().(*consulapi.Client).Catalog() + qOpts := consulapi.QueryOptions{} + services, _, err := catalog.Services(&qOpts) + if err != nil { + return fmt.Errorf("Could not retrieve services: %#v", err) + } + _, ok := services["google"] + if ok { + return fmt.Errorf("Service still exists: %#v", "google") + } + return nil +} + +func testAccCheckConsulCatalogEntryExists() resource.TestCheckFunc { + return func(s *terraform.State) error { + catalog := testAccProvider.Meta().(*consulapi.Client).Catalog() + qOpts := consulapi.QueryOptions{} + services, _, err := catalog.Services(&qOpts) + if err != nil { + return err + } + _, ok := services["google"] + if !ok { + return fmt.Errorf("Service does not exist: %#v", "google") + } + return nil + } +} + +func testAccCheckConsulCatalogEntryValue(n, attr, val string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rn, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Resource not found") + } + out, ok := rn.Primary.Attributes[attr] + if !ok { + return fmt.Errorf("Attribute '%s' not found: %#v", attr, rn.Primary.Attributes) + } + if val != "" && out != val { + return fmt.Errorf("Attribute '%s' value '%s' != '%s'", attr, out, val) + } + if val == "" && out == "" { + return fmt.Errorf("Attribute '%s' value '%s'", attr, out) + } + return nil + } +} + +const testAccConsulCatalogEntryConfig = ` +resource "consul_catalog_entry" "app" { + address = "127.0.0.1" + node = "bastion" + service = { + address = "www.google.com" + id = "google1" + name = "google" + port = 80 + tags = ["tag0", "tag1"] + } +} +` diff --git a/builtin/providers/consul/resource_provider.go b/builtin/providers/consul/resource_provider.go index b109b9fe8..08a153046 100644 --- a/builtin/providers/consul/resource_provider.go +++ b/builtin/providers/consul/resource_provider.go @@ -36,7 +36,7 @@ func Provider() terraform.ResourceProvider { ResourcesMap: map[string]*schema.Resource{ "consul_agent_service": resourceConsulAgentService(), "consul_catalog_entry": resourceConsulCatalogEntry(), - "consul_keys": resourceConsulKeys(), + "consul_keys": resourceConsulKeys(), }, ConfigureFunc: providerConfigure, diff --git a/website/source/docs/providers/consul/r/agent_service.html.markdown b/website/source/docs/providers/consul/r/agent_service.html.markdown new file mode 100644 index 000000000..edf7524dd --- /dev/null +++ b/website/source/docs/providers/consul/r/agent_service.html.markdown @@ -0,0 +1,47 @@ +--- +layout: "consul" +page_title: "Consul: consul_agent_service" +sidebar_current: "docs-consul-resource-agent-service" +description: |- + Provides access to Agent Service data in Consul. This can be used to define a service associated with a particular agent. Currently, defining health checks for an agent service is not supported. +--- + +# consul\_agent\_service + +Provides access to Agent Service data in Consul. This can be used to define a service associated with a particular agent. Currently, defining health checks for an agent service is not supported. + +## Example Usage + +``` +resource "consul_agent_service" "app" { + address = "www.google.com" + name = "google" + port = 80 + tags = ["tag0", "tag1"] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `address` - (Optional) The address of the service. Defaults to the + address of the agent. + +* `name` - (Required) The name of the service. + +* `port` - (Optional) The port of the service. + +* `tags` - (Optional) A list of values that are opaque to Consul, + but can be used to distinguish between services or nodes. + + +## Attributes Reference + +The following attributes are exported: + +* `address` - The address of the service. +* `id` - The id of the service, defaults to the value of `name`. +* `name` - The name of the service. +* `port` - The port of the service. +* `tags` - The tags of the service. diff --git a/website/source/docs/providers/consul/r/catalog_entry.html.markdown b/website/source/docs/providers/consul/r/catalog_entry.html.markdown new file mode 100644 index 000000000..d750642c5 --- /dev/null +++ b/website/source/docs/providers/consul/r/catalog_entry.html.markdown @@ -0,0 +1,58 @@ +--- +layout: "consul" +page_title: "Consul: consul_catalog_entry" +sidebar_current: "docs-consul-resource-catalog-entry" +description: |- + Provides access to Catalog data in Consul. This can be used to define a node or a service. Currently, defining health checks is not supported. +--- + +# consul\_catalog\_entry_ + +Provides access to Catalog data in Consul. This can be used to define a node or a service. Currently, defining health checks is not supported. + +## Example Usage + +``` +resource "consul_catalog_entry" "app" { + address = "192.168.10.10" + name = "foobar" + service = { + address = "127.0.0.1" + id = "redis1" + name = "redis" + port = 8000 + tags = ["master", "v1"] + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `address` - (Required) The address of the node being added to + or referenced in the catalog. + +* `node` - (Required) The name of the node being added to or + referenced in the catalog. + +* `service` - (Optional) A service to optionally associated with + the node. Supported values documented below. + +The `service` block supports the following: + +* `address` - (Optional) The address of the service. Defaults to the + node address. +* `id` - (Optional) The ID of the service. Defaults to the `name`. +* `name` - (Required) The name of the service +* `port` - (Optional) The port of the service. +* `tags` - (Optional) A list of values that are opaque to Consul, + but can be used to distinguish between services or nodes. + + +## Attributes Reference + +The following attributes are exported: + +* `address` - The address of the service. +* `node` - The id of the service, defaults to the value of `name`. From 8c8b58400ceb37cace0aef553bf6ab0d0bde77fa Mon Sep 17 00:00:00 2001 From: Max Englander Date: Mon, 25 Jan 2016 00:18:13 +0000 Subject: [PATCH 0008/1238] consul_catalog_entry docs: remove errant underscore --- .../source/docs/providers/consul/r/catalog_entry.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/providers/consul/r/catalog_entry.html.markdown b/website/source/docs/providers/consul/r/catalog_entry.html.markdown index d750642c5..354301a33 100644 --- a/website/source/docs/providers/consul/r/catalog_entry.html.markdown +++ b/website/source/docs/providers/consul/r/catalog_entry.html.markdown @@ -6,7 +6,7 @@ description: |- Provides access to Catalog data in Consul. This can be used to define a node or a service. Currently, defining health checks is not supported. --- -# consul\_catalog\_entry_ +# consul\_catalog\_entry Provides access to Catalog data in Consul. This can be used to define a node or a service. Currently, defining health checks is not supported. From 296f8be10a4c50bcc2d5926e46748bc3b4167c0e Mon Sep 17 00:00:00 2001 From: Max Englander Date: Sun, 27 Mar 2016 03:58:12 +0000 Subject: [PATCH 0009/1238] #2087 add consul_node and consul_service resources --- .../providers/consul/resource_consul_node.go | 156 ++++++++++++++++++ .../consul/resource_consul_node_test.go | 87 ++++++++++ .../consul/resource_consul_service.go | 139 ++++++++++++++++ .../consul/resource_consul_service_test.go | 90 ++++++++++ builtin/providers/consul/resource_provider.go | 2 + 5 files changed, 474 insertions(+) create mode 100644 builtin/providers/consul/resource_consul_node.go create mode 100644 builtin/providers/consul/resource_consul_node_test.go create mode 100644 builtin/providers/consul/resource_consul_service.go create mode 100644 builtin/providers/consul/resource_consul_service_test.go diff --git a/builtin/providers/consul/resource_consul_node.go b/builtin/providers/consul/resource_consul_node.go new file mode 100644 index 000000000..c81544ccb --- /dev/null +++ b/builtin/providers/consul/resource_consul_node.go @@ -0,0 +1,156 @@ +package consul + +import ( + "fmt" + + consulapi "github.com/hashicorp/consul/api" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceConsulNode() *schema.Resource { + return &schema.Resource{ + Create: resourceConsulNodeCreate, + Update: resourceConsulNodeCreate, + Read: resourceConsulNodeRead, + Delete: resourceConsulNodeDelete, + + Schema: map[string]*schema.Schema{ + "address": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "datacenter": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "token": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +func resourceConsulNodeCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*consulapi.Client) + catalog := client.Catalog() + + var dc string + if v, ok := d.GetOk("datacenter"); ok { + dc = v.(string) + } else { + var err error + if dc, err = getDC(d, client); err != nil { + return err + } + } + + var token string + if v, ok := d.GetOk("token"); ok { + token = v.(string) + } + + // Setup the operations using the datacenter + wOpts := consulapi.WriteOptions{Datacenter: dc, Token: token} + + address := d.Get("address").(string) + name := d.Get("name").(string) + + registration := &consulapi.CatalogRegistration{ + Address: address, + Datacenter: dc, + Node: name, + } + + if _, err := catalog.Register(registration, &wOpts); err != nil { + return fmt.Errorf("Failed to register Consul catalog node with name '%s' at address '%s' in %s: %v", + name, address, dc, err) + } + + // Update the resource + qOpts := consulapi.QueryOptions{Datacenter: dc} + if _, _, err := catalog.Node(name, &qOpts); err != nil { + return fmt.Errorf("Failed to read Consul catalog node with name '%s' at address '%s' in %s: %v", + name, address, dc, err) + } else { + d.Set("datacenter", dc) + } + + d.SetId(fmt.Sprintf("%s-%s", name, address)) + + return nil +} + +func resourceConsulNodeRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*consulapi.Client) + catalog := client.Catalog() + + // Get the DC, error if not available. + var dc string + if v, ok := d.GetOk("datacenter"); ok { + dc = v.(string) + } + + name := d.Get("name").(string) + + // Setup the operations using the datacenter + qOpts := consulapi.QueryOptions{Datacenter: dc} + + if _, _, err := catalog.Node(name, &qOpts); err != nil { + return fmt.Errorf("Failed to get name '%s' from Consul catalog: %v", name, err) + } + + return nil +} + +func resourceConsulNodeDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*consulapi.Client) + catalog := client.Catalog() + + var dc string + if v, ok := d.GetOk("datacenter"); ok { + dc = v.(string) + } else { + var err error + if dc, err = getDC(d, client); err != nil { + return err + } + } + + var token string + if v, ok := d.GetOk("token"); ok { + token = v.(string) + } + + // Setup the operations using the datacenter + wOpts := consulapi.WriteOptions{Datacenter: dc, Token: token} + + address := d.Get("address").(string) + name := d.Get("name").(string) + + deregistration := consulapi.CatalogDeregistration{ + Address: address, + Datacenter: dc, + Node: name, + } + + if _, err := catalog.Deregister(&deregistration, &wOpts); err != nil { + return fmt.Errorf("Failed to deregister Consul catalog node with name '%s' at address '%s' in %s: %v", + name, address, dc, err) + } + + // Clear the ID + d.SetId("") + return nil +} diff --git a/builtin/providers/consul/resource_consul_node_test.go b/builtin/providers/consul/resource_consul_node_test.go new file mode 100644 index 000000000..a24991d45 --- /dev/null +++ b/builtin/providers/consul/resource_consul_node_test.go @@ -0,0 +1,87 @@ +package consul + +import ( + "fmt" + "testing" + + consulapi "github.com/hashicorp/consul/api" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccConsulNode_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() {}, + Providers: testAccProviders, + CheckDestroy: testAccCheckConsulNodeDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccConsulNodeConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckConsulNodeExists(), + testAccCheckConsulNodeValue("consul_catalog_entry.app", "address", "127.0.0.1"), + testAccCheckConsulNodeValue("consul_catalog_entry.app", "name", "foo"), + ), + }, + }, + }) +} + +func testAccCheckConsulNodeDestroy(s *terraform.State) error { + catalog := testAccProvider.Meta().(*consulapi.Client).Catalog() + qOpts := consulapi.QueryOptions{} + nodes, _, err := catalog.Nodes(&qOpts) + if err != nil { + return fmt.Errorf("Could not retrieve services: %#v", err) + } + for i := range nodes { + if nodes[i].Node == "foo" { + return fmt.Errorf("Node still exists: %#v", "foo") + } + } + return nil +} + +func testAccCheckConsulNodeExists() resource.TestCheckFunc { + return func(s *terraform.State) error { + catalog := testAccProvider.Meta().(*consulapi.Client).Catalog() + qOpts := consulapi.QueryOptions{} + nodes, _, err := catalog.Nodes(&qOpts) + if err != nil { + return err + } + for i := range nodes { + if nodes[i].Node == "foo" { + return nil + } + } + return fmt.Errorf("Service does not exist: %#v", "google") + } +} + +func testAccCheckConsulNodeValue(n, attr, val string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rn, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Resource not found") + } + out, ok := rn.Primary.Attributes[attr] + if !ok { + return fmt.Errorf("Attribute '%s' not found: %#v", attr, rn.Primary.Attributes) + } + if val != "" && out != val { + return fmt.Errorf("Attribute '%s' value '%s' != '%s'", attr, out, val) + } + if val == "" && out == "" { + return fmt.Errorf("Attribute '%s' value '%s'", attr, out) + } + return nil + } +} + +const testAccConsulNodeConfig = ` +resource "consul_catalog_entry" "foo" { + address = "127.0.0.1" + name = "foo" +} +` diff --git a/builtin/providers/consul/resource_consul_service.go b/builtin/providers/consul/resource_consul_service.go new file mode 100644 index 000000000..57f95a856 --- /dev/null +++ b/builtin/providers/consul/resource_consul_service.go @@ -0,0 +1,139 @@ +package consul + +import ( + "fmt" + + consulapi "github.com/hashicorp/consul/api" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceConsulService() *schema.Resource { + return &schema.Resource{ + Create: resourceConsulServiceCreate, + Update: resourceConsulServiceCreate, + Read: resourceConsulServiceRead, + Delete: resourceConsulServiceDelete, + + Schema: map[string]*schema.Schema{ + "address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "port": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + + "tags": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + ForceNew: true, + }, + }, + } +} + +func resourceConsulServiceCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*consulapi.Client) + agent := client.Agent() + + name := d.Get("name").(string) + registration := consulapi.AgentServiceRegistration{Name: name} + + if address, ok := d.GetOk("address"); ok { + registration.Address = address.(string) + } + + if port, ok := d.GetOk("port"); ok { + registration.Port = port.(int) + } + + if v, ok := d.GetOk("tags"); ok { + vs := v.([]interface{}) + s := make([]string, len(vs)) + for i, raw := range vs { + s[i] = raw.(string) + } + registration.Tags = s + } + + if err := agent.ServiceRegister(®istration); err != nil { + return fmt.Errorf("Failed to register service '%s' with Consul agent: %v", name, err) + } + + // Update the resource + if serviceMap, err := agent.Services(); err != nil { + return fmt.Errorf("Failed to read services from Consul agent: %v", err) + } else if service, ok := serviceMap[name]; !ok { + return fmt.Errorf("Failed to read service '%s' from Consul agent: %v", name, err) + } else { + d.Set("address", service.Address) + d.Set("id", service.ID) + d.SetId(service.ID) + d.Set("name", service.Service) + d.Set("port", service.Port) + tags := make([]string, 0, len(service.Tags)) + for _, tag := range service.Tags { + tags = append(tags, tag) + } + d.Set("tags", tags) + } + + return nil +} + +func resourceConsulServiceRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*consulapi.Client) + agent := client.Agent() + + name := d.Get("name").(string) + + if services, err := agent.Services(); err != nil { + return fmt.Errorf("Failed to get services from Consul agent: %v", err) + } else if service, ok := services[name]; !ok { + return fmt.Errorf("Failed to get service '%s' from Consul agent", name) + } else { + d.Set("address", service.Address) + d.Set("id", service.ID) + d.SetId(service.ID) + d.Set("name", service.Service) + d.Set("port", service.Port) + tags := make([]string, 0, len(service.Tags)) + for _, tag := range service.Tags { + tags = append(tags, tag) + } + d.Set("tags", tags) + } + + return nil +} + +func resourceConsulServiceDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*consulapi.Client) + catalog := client.Agent() + + id := d.Get("id").(string) + + if err := catalog.ServiceDeregister(id); err != nil { + return fmt.Errorf("Failed to deregister service '%s' from Consul agent: %v", id, err) + } + + // Clear the ID + d.SetId("") + return nil +} diff --git a/builtin/providers/consul/resource_consul_service_test.go b/builtin/providers/consul/resource_consul_service_test.go new file mode 100644 index 000000000..f4df71542 --- /dev/null +++ b/builtin/providers/consul/resource_consul_service_test.go @@ -0,0 +1,90 @@ +package consul + +import ( + "fmt" + "testing" + + consulapi "github.com/hashicorp/consul/api" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccConsulService_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() {}, + Providers: testAccProviders, + CheckDestroy: testAccCheckConsulServiceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccConsulServiceConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckConsulServiceExists(), + testAccCheckConsulServiceValue("consul_service.app", "address", "www.google.com"), + testAccCheckConsulServiceValue("consul_service.app", "id", "google"), + testAccCheckConsulServiceValue("consul_service.app", "name", "google"), + testAccCheckConsulServiceValue("consul_service.app", "port", "80"), + testAccCheckConsulServiceValue("consul_service.app", "tags.#", "2"), + testAccCheckConsulServiceValue("consul_service.app", "tags.0", "tag0"), + testAccCheckConsulServiceValue("consul_service.app", "tags.1", "tag1"), + ), + }, + }, + }) +} + +func testAccCheckConsulServiceDestroy(s *terraform.State) error { + agent := testAccProvider.Meta().(*consulapi.Client).Agent() + services, err := agent.Services() + if err != nil { + return fmt.Errorf("Could not retrieve services: %#v", err) + } + _, ok := services["google"] + if ok { + return fmt.Errorf("Service still exists: %#v", "google") + } + return nil +} + +func testAccCheckConsulServiceExists() resource.TestCheckFunc { + return func(s *terraform.State) error { + agent := testAccProvider.Meta().(*consulapi.Client).Agent() + services, err := agent.Services() + if err != nil { + return err + } + _, ok := services["google"] + if !ok { + return fmt.Errorf("Service does not exist: %#v", "google") + } + return nil + } +} + +func testAccCheckConsulServiceValue(n, attr, val string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rn, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Resource not found") + } + out, ok := rn.Primary.Attributes[attr] + if !ok { + return fmt.Errorf("Attribute '%s' not found: %#v", attr, rn.Primary.Attributes) + } + if val != "" && out != val { + return fmt.Errorf("Attribute '%s' value '%s' != '%s'", attr, out, val) + } + if val == "" && out == "" { + return fmt.Errorf("Attribute '%s' value '%s'", attr, out) + } + return nil + } +} + +const testAccConsulServiceConfig = ` +resource "consul_service" "app" { + address = "www.google.com" + name = "google" + port = 80 + tags = ["tag0", "tag1"] +} +` diff --git a/builtin/providers/consul/resource_provider.go b/builtin/providers/consul/resource_provider.go index 08a153046..b1d090f11 100644 --- a/builtin/providers/consul/resource_provider.go +++ b/builtin/providers/consul/resource_provider.go @@ -37,6 +37,8 @@ func Provider() terraform.ResourceProvider { "consul_agent_service": resourceConsulAgentService(), "consul_catalog_entry": resourceConsulCatalogEntry(), "consul_keys": resourceConsulKeys(), + "consul_node": resourceConsulNode(), + "consul_service": resourceConsulService(), }, ConfigureFunc: providerConfigure, From 4fe7db9441f5d9638667587af57124842f4906ca Mon Sep 17 00:00:00 2001 From: Max Englander Date: Sun, 27 Mar 2016 04:12:38 +0000 Subject: [PATCH 0010/1238] #2087 add docs for consul_node and consul_service --- .../providers/consul/r/node.html.markdown | 37 +++++++++++++++ .../providers/consul/r/service.html.markdown | 47 +++++++++++++++++++ 2 files changed, 84 insertions(+) create mode 100644 website/source/docs/providers/consul/r/node.html.markdown create mode 100644 website/source/docs/providers/consul/r/service.html.markdown diff --git a/website/source/docs/providers/consul/r/node.html.markdown b/website/source/docs/providers/consul/r/node.html.markdown new file mode 100644 index 000000000..d8cc322bb --- /dev/null +++ b/website/source/docs/providers/consul/r/node.html.markdown @@ -0,0 +1,37 @@ +--- +layout: "consul" +page_title: "Consul: consul_node" +sidebar_current: "docs-consul-resource-node" +description: |- + Provides access to Node data in Consul. This can be used to define a node. +--- + +# consul\_node + +Provides access to Node data in Consul. This can be used to define a node. Currently, defining health checks is not supported. + +## Example Usage + +``` +resource "consul_node" "foobar" { + address = "192.168.10.10" + name = "foobar" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `address` - (Required) The address of the node being added to + or referenced in the catalog. + +* `name` - (Required) The name of the node being added to or + referenced in the catalog. + +## Attributes Reference + +The following attributes are exported: + +* `address` - The address of the service. +* `name` - The name of the service. diff --git a/website/source/docs/providers/consul/r/service.html.markdown b/website/source/docs/providers/consul/r/service.html.markdown new file mode 100644 index 000000000..a91370c2c --- /dev/null +++ b/website/source/docs/providers/consul/r/service.html.markdown @@ -0,0 +1,47 @@ +--- +layout: "consul" +page_title: "Consul: consul_service" +sidebar_current: "docs-consul-resource-service" +description: |- + A high-level resource for creating a Service in Consul. Since Consul requires clients to register services with either the catalog or an agent, `consul_service` may register with either the catalog or an agent, depending on the configuration of `consul_service`. For now, `consul_service` always registers services with the agent running at the address defined in the `consul` resource. Health checks are not currently supported. +--- + +# consul\_service + +A high-level resource for creating a Service in Consul. Currently, defining health checks for a service is not supported. + +## Example Usage + +``` +resource "consul_service" "google" { + address = "www.google.com" + name = "google" + port = 80 + tags = ["tag0", "tag1"] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `address` - (Optional) The address of the service. Defaults to the + address of the agent. + +* `name` - (Required) The name of the service. + +* `port` - (Optional) The port of the service. + +* `tags` - (Optional) A list of values that are opaque to Consul, + but can be used to distinguish between services or nodes. + + +## Attributes Reference + +The following attributes are exported: + +* `address` - The address of the service. +* `id` - The id of the service, defaults to the value of `name`. +* `name` - The name of the service. +* `port` - The port of the service. +* `tags` - The tags of the service. From 03baf3e5a54faffa11b6263e72b9c5e225b912a3 Mon Sep 17 00:00:00 2001 From: stack72 Date: Wed, 30 Mar 2016 20:13:28 -0500 Subject: [PATCH 0011/1238] provider/aws: Allow `aws_redshift_security_group` ingress rules to change --- .../resource_aws_redshift_security_group.go | 107 ++++++++- ...source_aws_redshift_security_group_test.go | 221 ++++++++++++++++++ 2 files changed, 327 insertions(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_redshift_security_group.go b/builtin/providers/aws/resource_aws_redshift_security_group.go index 8393e647b..92c3c9666 100644 --- a/builtin/providers/aws/resource_aws_redshift_security_group.go +++ b/builtin/providers/aws/resource_aws_redshift_security_group.go @@ -20,6 +20,7 @@ func resourceAwsRedshiftSecurityGroup() *schema.Resource { return &schema.Resource{ Create: resourceAwsRedshiftSecurityGroupCreate, Read: resourceAwsRedshiftSecurityGroupRead, + Update: resourceAwsRedshiftSecurityGroupUpdate, Delete: resourceAwsRedshiftSecurityGroupDelete, Schema: map[string]*schema.Schema{ @@ -39,7 +40,6 @@ func resourceAwsRedshiftSecurityGroup() *schema.Resource { "ingress": &schema.Schema{ Type: schema.TypeSet, Required: true, - ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "cidr": &schema.Schema{ @@ -150,6 +150,55 @@ func resourceAwsRedshiftSecurityGroupRead(d *schema.ResourceData, meta interface return nil } +func resourceAwsRedshiftSecurityGroupUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).redshiftconn + + if d.HasChange("ingress") { + o, n := d.GetChange("ingress") + if o == nil { + o = new(schema.Set) + } + if n == nil { + n = new(schema.Set) + } + + os := o.(*schema.Set) + ns := n.(*schema.Set) + + removeIngressRules, err := expandRedshiftSGRevokeIngress(os.Difference(ns).List()) + if err != nil { + return err + } + if len(removeIngressRules) > 0 { + for _, r := range removeIngressRules { + r.ClusterSecurityGroupName = aws.String(d.Id()) + + _, err := conn.RevokeClusterSecurityGroupIngress(&r) + if err != nil { + return err + } + } + } + + addIngressRules, err := expandRedshiftSGAuthorizeIngress(ns.Difference(os).List()) + if err != nil { + return err + } + if len(addIngressRules) > 0 { + for _, r := range addIngressRules { + r.ClusterSecurityGroupName = aws.String(d.Id()) + + _, err := conn.AuthorizeClusterSecurityGroupIngress(&r) + if err != nil { + return err + } + } + } + + } + return resourceAwsRedshiftSecurityGroupRead(d, meta) +} + func resourceAwsRedshiftSecurityGroupDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).redshiftconn @@ -289,3 +338,59 @@ func resourceAwsRedshiftSecurityGroupStateRefreshFunc( return v, "authorized", nil } } + +func expandRedshiftSGAuthorizeIngress(configured []interface{}) ([]redshift.AuthorizeClusterSecurityGroupIngressInput, error) { + var ingress []redshift.AuthorizeClusterSecurityGroupIngressInput + + // Loop over our configured parameters and create + // an array of aws-sdk-go compatabile objects + for _, pRaw := range configured { + data := pRaw.(map[string]interface{}) + + i := redshift.AuthorizeClusterSecurityGroupIngressInput{} + + if v, ok := data["cidr"]; ok { + i.CIDRIP = aws.String(v.(string)) + } + + if v, ok := data["security_group_name"]; ok { + i.EC2SecurityGroupName = aws.String(v.(string)) + } + + if v, ok := data["security_group_owner_id"]; ok { + i.EC2SecurityGroupOwnerId = aws.String(v.(string)) + } + + ingress = append(ingress, i) + } + + return ingress, nil +} + +func expandRedshiftSGRevokeIngress(configured []interface{}) ([]redshift.RevokeClusterSecurityGroupIngressInput, error) { + var ingress []redshift.RevokeClusterSecurityGroupIngressInput + + // Loop over our configured parameters and create + // an array of aws-sdk-go compatabile objects + for _, pRaw := range configured { + data := pRaw.(map[string]interface{}) + + i := redshift.RevokeClusterSecurityGroupIngressInput{} + + if v, ok := data["cidr"]; ok { + i.CIDRIP = aws.String(v.(string)) + } + + if v, ok := data["security_group_name"]; ok { + i.EC2SecurityGroupName = aws.String(v.(string)) + } + + if v, ok := data["security_group_owner_id"]; ok { + i.EC2SecurityGroupOwnerId = aws.String(v.(string)) + } + + ingress = append(ingress, i) + } + + return ingress, nil +} diff --git a/builtin/providers/aws/resource_aws_redshift_security_group_test.go b/builtin/providers/aws/resource_aws_redshift_security_group_test.go index 4fc3bdbe5..739f7f07a 100644 --- a/builtin/providers/aws/resource_aws_redshift_security_group_test.go +++ b/builtin/providers/aws/resource_aws_redshift_security_group_test.go @@ -37,6 +37,44 @@ func TestAccAWSRedshiftSecurityGroup_ingressCidr(t *testing.T) { }) } +func TestAccAWSRedshiftSecurityGroup_updateIngressCidr(t *testing.T) { + var v redshift.ClusterSecurityGroup + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSRedshiftSecurityGroupDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSRedshiftSecurityGroupConfig_ingressCidr, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSRedshiftSecurityGroupExists("aws_redshift_security_group.bar", &v), + resource.TestCheckResourceAttr( + "aws_redshift_security_group.bar", "ingress.#", "1"), + ), + }, + + resource.TestStep{ + Config: testAccAWSRedshiftSecurityGroupConfig_ingressCidrAdd, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSRedshiftSecurityGroupExists("aws_redshift_security_group.bar", &v), + resource.TestCheckResourceAttr( + "aws_redshift_security_group.bar", "ingress.#", "3"), + ), + }, + + resource.TestStep{ + Config: testAccAWSRedshiftSecurityGroupConfig_ingressCidrReduce, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSRedshiftSecurityGroupExists("aws_redshift_security_group.bar", &v), + resource.TestCheckResourceAttr( + "aws_redshift_security_group.bar", "ingress.#", "2"), + ), + }, + }, + }) +} + func TestAccAWSRedshiftSecurityGroup_ingressSecurityGroup(t *testing.T) { var v redshift.ClusterSecurityGroup @@ -63,6 +101,44 @@ func TestAccAWSRedshiftSecurityGroup_ingressSecurityGroup(t *testing.T) { }) } +func TestAccAWSRedshiftSecurityGroup_updateIngressSecurityGroup(t *testing.T) { + var v redshift.ClusterSecurityGroup + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSRedshiftSecurityGroupDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSRedshiftSecurityGroupConfig_ingressSgId, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSRedshiftSecurityGroupExists("aws_redshift_security_group.bar", &v), + resource.TestCheckResourceAttr( + "aws_redshift_security_group.bar", "ingress.#", "1"), + ), + }, + + resource.TestStep{ + Config: testAccAWSRedshiftSecurityGroupConfig_ingressSgIdAdd, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSRedshiftSecurityGroupExists("aws_redshift_security_group.bar", &v), + resource.TestCheckResourceAttr( + "aws_redshift_security_group.bar", "ingress.#", "3"), + ), + }, + + resource.TestStep{ + Config: testAccAWSRedshiftSecurityGroupConfig_ingressSgIdReduce, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSRedshiftSecurityGroupExists("aws_redshift_security_group.bar", &v), + resource.TestCheckResourceAttr( + "aws_redshift_security_group.bar", "ingress.#", "2"), + ), + }, + }, + }) +} + func testAccCheckAWSRedshiftSecurityGroupExists(n string, v *redshift.ClusterSecurityGroup) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -177,6 +253,46 @@ resource "aws_redshift_security_group" "bar" { } }` +const testAccAWSRedshiftSecurityGroupConfig_ingressCidrAdd = ` +provider "aws" { + region = "us-east-1" +} + +resource "aws_redshift_security_group" "bar" { + name = "redshift-sg-terraform" + description = "this is a description" + + ingress { + cidr = "10.0.0.1/24" + } + + ingress { + cidr = "10.0.10.1/24" + } + + ingress { + cidr = "10.0.20.1/24" + } +}` + +const testAccAWSRedshiftSecurityGroupConfig_ingressCidrReduce = ` +provider "aws" { + region = "us-east-1" +} + +resource "aws_redshift_security_group" "bar" { + name = "redshift-sg-terraform" + description = "this is a description" + + ingress { + cidr = "10.0.0.1/24" + } + + ingress { + cidr = "10.0.10.1/24" + } +}` + const testAccAWSRedshiftSecurityGroupConfig_ingressSgId = ` provider "aws" { region = "us-east-1" @@ -203,3 +319,108 @@ resource "aws_redshift_security_group" "bar" { security_group_owner_id = "${aws_security_group.redshift.owner_id}" } }` + +const testAccAWSRedshiftSecurityGroupConfig_ingressSgIdAdd = ` +provider "aws" { + region = "us-east-1" +} + +resource "aws_security_group" "redshift" { + name = "terraform_redshift_acceptance_test" + description = "Used in the redshift acceptance tests" + + ingress { + protocol = "tcp" + from_port = 22 + to_port = 22 + cidr_blocks = ["10.0.0.0/8"] + } +} + +resource "aws_security_group" "redshift2" { + name = "terraform_redshift_acceptance_test_2" + description = "Used in the redshift acceptance tests #2" + + ingress { + protocol = "tcp" + from_port = 22 + to_port = 22 + cidr_blocks = ["10.0.10.0/8"] + } +} + +resource "aws_security_group" "redshift3" { + name = "terraform_redshift_acceptance_test_3" + description = "Used in the redshift acceptance tests #3" + + ingress { + protocol = "tcp" + from_port = 22 + to_port = 22 + cidr_blocks = ["10.0.20.0/8"] + } +} + +resource "aws_redshift_security_group" "bar" { + name = "redshift-sg-terraform" + description = "this is a description" + + ingress { + security_group_name = "${aws_security_group.redshift.name}" + security_group_owner_id = "${aws_security_group.redshift.owner_id}" + } + + ingress { + security_group_name = "${aws_security_group.redshift2.name}" + security_group_owner_id = "${aws_security_group.redshift.owner_id}" + } + + ingress { + security_group_name = "${aws_security_group.redshift3.name}" + security_group_owner_id = "${aws_security_group.redshift.owner_id}" + } +}` + +const testAccAWSRedshiftSecurityGroupConfig_ingressSgIdReduce = ` +provider "aws" { + region = "us-east-1" +} + +resource "aws_security_group" "redshift" { + name = "terraform_redshift_acceptance_test" + description = "Used in the redshift acceptance tests" + + ingress { + protocol = "tcp" + from_port = 22 + to_port = 22 + cidr_blocks = ["10.0.0.0/8"] + } +} + +resource "aws_security_group" "redshift2" { + name = "terraform_redshift_acceptance_test_2" + description = "Used in the redshift acceptance tests #2" + + ingress { + protocol = "tcp" + from_port = 22 + to_port = 22 + cidr_blocks = ["10.0.10.0/8"] + } +} + +resource "aws_redshift_security_group" "bar" { + name = "redshift-sg-terraform" + description = "this is a description" + + ingress { + security_group_name = "${aws_security_group.redshift.name}" + security_group_owner_id = "${aws_security_group.redshift.owner_id}" + } + + ingress { + security_group_name = "${aws_security_group.redshift2.name}" + security_group_owner_id = "${aws_security_group.redshift.owner_id}" + } +}` From 0315d511250a13766590c5e7bad3c4b4bb22c50f Mon Sep 17 00:00:00 2001 From: Mark Severson Date: Tue, 17 May 2016 14:23:05 -0600 Subject: [PATCH 0012/1238] provider/aws: Query all pages of group membership By default, group membership queries return pages of 100 users at a time. Because of this, if there are more than 100 users in an aws_iam_group_membership resource, the resource always reports as needing to be changed (because it only sees 100 of the users as existing in the group). The group membership now queries all pages. Fixes #6722 --- .../aws/resource_aws_iam_group_membership.go | 40 ++++++++++++------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/builtin/providers/aws/resource_aws_iam_group_membership.go b/builtin/providers/aws/resource_aws_iam_group_membership.go index 14bdd3713..7977bbfb7 100644 --- a/builtin/providers/aws/resource_aws_iam_group_membership.go +++ b/builtin/providers/aws/resource_aws_iam_group_membership.go @@ -56,25 +56,35 @@ func resourceAwsIamGroupMembershipCreate(d *schema.ResourceData, meta interface{ func resourceAwsIamGroupMembershipRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).iamconn group := d.Get("group").(string) - resp, err := conn.GetGroup(&iam.GetGroupInput{ - GroupName: aws.String(group), - }) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // aws specific error - if awsErr.Code() == "NoSuchEntity" { - // group not found - d.SetId("") - return nil + var ul []string + var marker *string + for { + resp, err := conn.GetGroup(&iam.GetGroupInput{ + GroupName: aws.String(group), + Marker: marker, + }) + + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + // aws specific error + if awsErr.Code() == "NoSuchEntity" { + // group not found + d.SetId("") + return nil + } } + return err } - return err - } - ul := make([]string, 0, len(resp.Users)) - for _, u := range resp.Users { - ul = append(ul, *u.UserName) + for _, u := range resp.Users { + ul = append(ul, *u.UserName) + } + + if !*resp.IsTruncated { + break + } + marker = resp.Marker } if err := d.Set("users", ul); err != nil { From efdee645bfacac191d478d4c5e0ddd3c22589356 Mon Sep 17 00:00:00 2001 From: Thibault Vigouroux Date: Tue, 24 May 2016 15:52:38 +0200 Subject: [PATCH 0013/1238] Handled case when instanceId is absent in network interfaces --- builtin/providers/aws/structure.go | 4 +++- builtin/providers/aws/structure_test.go | 17 +++++++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/builtin/providers/aws/structure.go b/builtin/providers/aws/structure.go index 1df36291a..bed05e92c 100644 --- a/builtin/providers/aws/structure.go +++ b/builtin/providers/aws/structure.go @@ -719,7 +719,9 @@ func expandPrivateIPAddresses(ips []interface{}) []*ec2.PrivateIpAddressSpecific //Flattens network interface attachment into a map[string]interface func flattenAttachment(a *ec2.NetworkInterfaceAttachment) map[string]interface{} { att := make(map[string]interface{}) - att["instance"] = *a.InstanceId + if a.InstanceId != nil { + att["instance"] = *a.InstanceId + } att["device_index"] = *a.DeviceIndex att["attachment_id"] = *a.AttachmentId return att diff --git a/builtin/providers/aws/structure_test.go b/builtin/providers/aws/structure_test.go index 80b3711aa..63666eefc 100644 --- a/builtin/providers/aws/structure_test.go +++ b/builtin/providers/aws/structure_test.go @@ -756,6 +756,23 @@ func TestFlattenAttachment(t *testing.T) { } } +func TestFlattenAttachmentWhenNoInstanceId(t *testing.T) { + expanded := &ec2.NetworkInterfaceAttachment{ + DeviceIndex: aws.Int64(int64(1)), + AttachmentId: aws.String("at-002"), + } + + result := flattenAttachment(expanded) + + if result == nil { + t.Fatal("expected result to have value, but got nil") + } + + if result["instance"] != nil { + t.Fatalf("expected instance to be nil, but got %s", result["instance"]) + } +} + func TestflattenStepAdjustments(t *testing.T) { expanded := []*autoscaling.StepAdjustment{ &autoscaling.StepAdjustment{ From c62dc3f72fad1bfb026c6f0487bcfafd96903300 Mon Sep 17 00:00:00 2001 From: Joe Topjian Date: Sat, 28 May 2016 23:46:33 +0000 Subject: [PATCH 0014/1238] provider/openstack: Disassociate Monitors from Pool Before Deletion This commit ensures that all monitors have been disassociated from the load balancing pool before the pool is deleted. A test has been added to ensure that a full load balancing stack is capable of handling an update to an instance, causing some components to be rebuilt. --- .../resource_openstack_lb_pool_v1.go | 13 +++ .../resource_openstack_lb_pool_v1_test.go | 104 +++++++++++++++++- 2 files changed, 115 insertions(+), 2 deletions(-) diff --git a/builtin/providers/openstack/resource_openstack_lb_pool_v1.go b/builtin/providers/openstack/resource_openstack_lb_pool_v1.go index 345d2b166..136b73a42 100644 --- a/builtin/providers/openstack/resource_openstack_lb_pool_v1.go +++ b/builtin/providers/openstack/resource_openstack_lb_pool_v1.go @@ -299,6 +299,19 @@ func resourceLBPoolV1Delete(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("Error creating OpenStack networking client: %s", err) } + // Make sure all monitors are disassociated first + if v, ok := d.GetOk("monitor_ids"); ok { + if monitorIDList, ok := v.([]interface{}); ok { + for _, monitorID := range monitorIDList { + mID := monitorID.(string) + log.Printf("[DEBUG] Attempting to disassociate monitor %s from pool %s", mID, d.Id()) + if res := pools.DisassociateMonitor(networkingClient, d.Id(), mID); res.Err != nil { + return fmt.Errorf("Error disassociating monitor %s from pool %s: %s", mID, d.Id(), err) + } + } + } + } + stateConf := &resource.StateChangeConf{ Pending: []string{"ACTIVE", "PENDING_DELETE"}, Target: []string{"DELETED"}, diff --git a/builtin/providers/openstack/resource_openstack_lb_pool_v1_test.go b/builtin/providers/openstack/resource_openstack_lb_pool_v1_test.go index 8264a32c9..3608db2e6 100644 --- a/builtin/providers/openstack/resource_openstack_lb_pool_v1_test.go +++ b/builtin/providers/openstack/resource_openstack_lb_pool_v1_test.go @@ -56,7 +56,20 @@ func TestAccLBV1Pool_fullstack(t *testing.T) { CheckDestroy: testAccCheckLBV1PoolDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccLBV1Pool_fullstack, + Config: testAccLBV1Pool_fullstack_1, + Check: resource.ComposeTestCheckFunc( + testAccCheckNetworkingV2NetworkExists(t, "openstack_networking_network_v2.network_1", &network), + testAccCheckNetworkingV2SubnetExists(t, "openstack_networking_subnet_v2.subnet_1", &subnet), + testAccCheckComputeV2SecGroupExists(t, "openstack_compute_secgroup_v2.secgroup_1", &secgroup), + testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.instance_1", &instance1), + testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.instance_2", &instance2), + testAccCheckLBV1PoolExists(t, "openstack_lb_pool_v1.pool_1", &pool), + testAccCheckLBV1MonitorExists(t, "openstack_lb_monitor_v1.monitor_1", &monitor), + testAccCheckLBV1VIPExists(t, "openstack_lb_vip_v1.vip_1", &vip), + ), + }, + resource.TestStep{ + Config: testAccLBV1Pool_fullstack_2, Check: resource.ComposeTestCheckFunc( testAccCheckNetworkingV2NetworkExists(t, "openstack_networking_network_v2.network_1", &network), testAccCheckNetworkingV2SubnetExists(t, "openstack_networking_subnet_v2.subnet_1", &subnet), @@ -172,7 +185,7 @@ var testAccLBV1Pool_update = fmt.Sprintf(` }`, OS_REGION_NAME, OS_REGION_NAME, OS_REGION_NAME) -var testAccLBV1Pool_fullstack = fmt.Sprintf(` +var testAccLBV1Pool_fullstack_1 = fmt.Sprintf(` resource "openstack_networking_network_v2" "network_1" { name = "network_1" admin_state_up = "true" @@ -257,3 +270,90 @@ var testAccLBV1Pool_fullstack = fmt.Sprintf(` pool_id = "${openstack_lb_pool_v1.pool_1.id}" admin_state_up = true }`) + +var testAccLBV1Pool_fullstack_2 = fmt.Sprintf(` + resource "openstack_networking_network_v2" "network_1" { + name = "network_1" + admin_state_up = "true" + } + + resource "openstack_networking_subnet_v2" "subnet_1" { + network_id = "${openstack_networking_network_v2.network_1.id}" + cidr = "192.168.199.0/24" + ip_version = 4 + } + + resource "openstack_compute_secgroup_v2" "secgroup_1" { + name = "secgroup_1" + description = "Rules for secgroup_1" + + rule { + from_port = -1 + to_port = -1 + ip_protocol = "icmp" + cidr = "0.0.0.0/0" + } + + rule { + from_port = 80 + to_port = 80 + ip_protocol = "tcp" + cidr = "0.0.0.0/0" + } + } + + resource "openstack_compute_instance_v2" "instance_1" { + name = "instance_1" + security_groups = ["default", "${openstack_compute_secgroup_v2.secgroup_1.name}"] + network { + uuid = "${openstack_networking_network_v2.network_1.id}" + } + } + + resource "openstack_compute_instance_v2" "instance_2" { + name = "instance_2" + security_groups = ["default", "${openstack_compute_secgroup_v2.secgroup_1.name}"] + user_data = "#cloud-config\ndisable_root: false" + network { + uuid = "${openstack_networking_network_v2.network_1.id}" + } + } + + resource "openstack_lb_monitor_v1" "monitor_1" { + type = "TCP" + delay = 30 + timeout = 5 + max_retries = 3 + admin_state_up = "true" + } + + resource "openstack_lb_pool_v1" "pool_1" { + name = "pool_1" + protocol = "TCP" + subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" + lb_method = "ROUND_ROBIN" + monitor_ids = ["${openstack_lb_monitor_v1.monitor_1.id}"] + } + + resource "openstack_lb_member_v1" "member_1" { + pool_id = "${openstack_lb_pool_v1.pool_1.id}" + address = "${openstack_compute_instance_v2.instance_1.access_ip_v4}" + port = 80 + admin_state_up = true + } + + resource "openstack_lb_member_v1" "member_2" { + pool_id = "${openstack_lb_pool_v1.pool_1.id}" + address = "${openstack_compute_instance_v2.instance_2.access_ip_v4}" + port = 80 + admin_state_up = true + } + + resource "openstack_lb_vip_v1" "vip_1" { + name = "vip_1" + subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" + protocol = "TCP" + port = 80 + pool_id = "${openstack_lb_pool_v1.pool_1.id}" + admin_state_up = true + }`) From dd5f121494f35355bb26f3c183c13f82c4e0a973 Mon Sep 17 00:00:00 2001 From: Joakim Sernbrant Date: Wed, 8 Jun 2016 10:23:14 +0200 Subject: [PATCH 0015/1238] provider/cloudstack: add root_disk_size --- .../cloudstack/resource_cloudstack_instance.go | 11 +++++++++++ .../providers/cloudstack/r/instance.html.markdown | 4 ++++ 2 files changed, 15 insertions(+) diff --git a/builtin/providers/cloudstack/resource_cloudstack_instance.go b/builtin/providers/cloudstack/resource_cloudstack_instance.go index aa894f517..3e2c7d463 100644 --- a/builtin/providers/cloudstack/resource_cloudstack_instance.go +++ b/builtin/providers/cloudstack/resource_cloudstack_instance.go @@ -124,6 +124,12 @@ func resourceCloudStackInstance() *schema.Resource { Optional: true, Computed: true, }, + + "root_disk_size": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, }, } } @@ -252,6 +258,11 @@ func resourceCloudStackInstanceCreate(d *schema.ResourceData, meta interface{}) p.SetGroup(group.(string)) } + // If there is a root_disk_size supplied, add it to the parameter struct + if rootdisksize, ok := d.GetOk("root_disk_size"); ok { + p.SetRootdisksize(int64(rootdisksize.(int))) + } + // Create the new instance r, err := cs.VirtualMachine.DeployVirtualMachine(p) if err != nil { diff --git a/website/source/docs/providers/cloudstack/r/instance.html.markdown b/website/source/docs/providers/cloudstack/r/instance.html.markdown index bc8f6a2d5..9ed4a9db5 100644 --- a/website/source/docs/providers/cloudstack/r/instance.html.markdown +++ b/website/source/docs/providers/cloudstack/r/instance.html.markdown @@ -66,6 +66,10 @@ The following arguments are supported: * `expunge` - (Optional) This determines if the instance is expunged when it is destroyed (defaults false) +* `root_disk_size` - (Optional) The size of the root disk in + gigabytes. The root disk is resized on deploy. Only applies to + template-based deployments. + ## Attributes Reference The following attributes are exported: From ad9a3fe44db43b71e1ba057ef1204c5116a63b74 Mon Sep 17 00:00:00 2001 From: Igor Wiedler Date: Mon, 6 Jun 2016 19:35:13 +0200 Subject: [PATCH 0016/1238] [provider/google] Use resource-specific project when waiting for creation Creating most google cloud resources uses the compute_operation to wait for the creation to complete. However, the computeOperationWait* functions always uses the global `config.Project`, instead of the resource- specific one. This means that creating resource in a project other than the main one fails with a 404 on the operation resource. This patch uses the project from google_compute_instance instead of the global one. --- builtin/providers/google/compute_operation.go | 16 ++++++++-------- .../providers/google/resource_compute_address.go | 4 ++-- .../google/resource_compute_autoscaler.go | 6 +++--- .../google/resource_compute_backend_service.go | 6 +++--- .../providers/google/resource_compute_disk.go | 4 ++-- .../google/resource_compute_firewall.go | 6 +++--- .../google/resource_compute_forwarding_rule.go | 6 +++--- .../google/resource_compute_global_address.go | 4 ++-- .../resource_compute_global_forwarding_rule.go | 6 +++--- .../google/resource_compute_http_health_check.go | 6 +++--- .../resource_compute_https_health_check.go | 6 +++--- .../google/resource_compute_instance.go | 16 +++++++++------- .../google/resource_compute_instance_group.go | 12 ++++++------ .../resource_compute_instance_group_manager.go | 16 ++++++++-------- .../google/resource_compute_instance_template.go | 4 ++-- .../providers/google/resource_compute_network.go | 4 ++-- .../google/resource_compute_project_metadata.go | 6 +++--- .../providers/google/resource_compute_route.go | 4 ++-- .../google/resource_compute_ssl_certificate.go | 4 ++-- .../google/resource_compute_subnetwork.go | 4 ++-- .../google/resource_compute_target_http_proxy.go | 6 +++--- .../resource_compute_target_https_proxy.go | 8 ++++---- .../google/resource_compute_target_pool.go | 14 +++++++------- .../providers/google/resource_compute_url_map.go | 6 +++--- .../google/resource_compute_vpn_gateway.go | 4 ++-- .../google/resource_compute_vpn_tunnel.go | 4 ++-- 26 files changed, 92 insertions(+), 90 deletions(-) diff --git a/builtin/providers/google/compute_operation.go b/builtin/providers/google/compute_operation.go index ab76895e8..edbd753dc 100644 --- a/builtin/providers/google/compute_operation.go +++ b/builtin/providers/google/compute_operation.go @@ -82,11 +82,11 @@ func (e ComputeOperationError) Error() string { return buf.String() } -func computeOperationWaitGlobal(config *Config, op *compute.Operation, activity string) error { +func computeOperationWaitGlobal(config *Config, op *compute.Operation, project string, activity string) error { w := &ComputeOperationWaiter{ Service: config.clientCompute, Op: op, - Project: config.Project, + Project: project, Type: ComputeOperationWaitGlobal, } @@ -107,11 +107,11 @@ func computeOperationWaitGlobal(config *Config, op *compute.Operation, activity return nil } -func computeOperationWaitRegion(config *Config, op *compute.Operation, region, activity string) error { +func computeOperationWaitRegion(config *Config, op *compute.Operation, project string, region, activity string) error { w := &ComputeOperationWaiter{ Service: config.clientCompute, Op: op, - Project: config.Project, + Project: project, Type: ComputeOperationWaitRegion, Region: region, } @@ -133,15 +133,15 @@ func computeOperationWaitRegion(config *Config, op *compute.Operation, region, a return nil } -func computeOperationWaitZone(config *Config, op *compute.Operation, zone, activity string) error { - return computeOperationWaitZoneTime(config, op, zone, 4, activity) +func computeOperationWaitZone(config *Config, op *compute.Operation, project string, zone, activity string) error { + return computeOperationWaitZoneTime(config, op, project, zone, 4, activity) } -func computeOperationWaitZoneTime(config *Config, op *compute.Operation, zone string, minutes int, activity string) error { +func computeOperationWaitZoneTime(config *Config, op *compute.Operation, project string, zone string, minutes int, activity string) error { w := &ComputeOperationWaiter{ Service: config.clientCompute, Op: op, - Project: config.Project, + Project: project, Zone: zone, Type: ComputeOperationWaitZone, } diff --git a/builtin/providers/google/resource_compute_address.go b/builtin/providers/google/resource_compute_address.go index 427f24610..d4c962230 100644 --- a/builtin/providers/google/resource_compute_address.go +++ b/builtin/providers/google/resource_compute_address.go @@ -71,7 +71,7 @@ func resourceComputeAddressCreate(d *schema.ResourceData, meta interface{}) erro // It probably maybe worked, so store the ID now d.SetId(addr.Name) - err = computeOperationWaitRegion(config, op, region, "Creating Address") + err = computeOperationWaitRegion(config, op, project, region, "Creating Address") if err != nil { return err } @@ -133,7 +133,7 @@ func resourceComputeAddressDelete(d *schema.ResourceData, meta interface{}) erro return fmt.Errorf("Error deleting address: %s", err) } - err = computeOperationWaitRegion(config, op, region, "Deleting Address") + err = computeOperationWaitRegion(config, op, project, region, "Deleting Address") if err != nil { return err } diff --git a/builtin/providers/google/resource_compute_autoscaler.go b/builtin/providers/google/resource_compute_autoscaler.go index cb6834b57..0afb83e38 100644 --- a/builtin/providers/google/resource_compute_autoscaler.go +++ b/builtin/providers/google/resource_compute_autoscaler.go @@ -233,7 +233,7 @@ func resourceComputeAutoscalerCreate(d *schema.ResourceData, meta interface{}) e // It probably maybe worked, so store the ID now d.SetId(scaler.Name) - err = computeOperationWaitZone(config, op, zone.Name, "Creating Autoscaler") + err = computeOperationWaitZone(config, op, project, zone.Name, "Creating Autoscaler") if err != nil { return err } @@ -293,7 +293,7 @@ func resourceComputeAutoscalerUpdate(d *schema.ResourceData, meta interface{}) e // It probably maybe worked, so store the ID now d.SetId(scaler.Name) - err = computeOperationWaitZone(config, op, zone, "Updating Autoscaler") + err = computeOperationWaitZone(config, op, project, zone, "Updating Autoscaler") if err != nil { return err } @@ -316,7 +316,7 @@ func resourceComputeAutoscalerDelete(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error deleting autoscaler: %s", err) } - err = computeOperationWaitZone(config, op, zone, "Deleting Autoscaler") + err = computeOperationWaitZone(config, op, project, zone, "Deleting Autoscaler") if err != nil { return err } diff --git a/builtin/providers/google/resource_compute_backend_service.go b/builtin/providers/google/resource_compute_backend_service.go index 94bc23439..2e2923e34 100644 --- a/builtin/providers/google/resource_compute_backend_service.go +++ b/builtin/providers/google/resource_compute_backend_service.go @@ -181,7 +181,7 @@ func resourceComputeBackendServiceCreate(d *schema.ResourceData, meta interface{ d.SetId(service.Name) - err = computeOperationWaitGlobal(config, op, "Creating Backend Service") + err = computeOperationWaitGlobal(config, op, project, "Creating Backend Service") if err != nil { return err } @@ -269,7 +269,7 @@ func resourceComputeBackendServiceUpdate(d *schema.ResourceData, meta interface{ d.SetId(service.Name) - err = computeOperationWaitGlobal(config, op, "Updating Backend Service") + err = computeOperationWaitGlobal(config, op, project, "Updating Backend Service") if err != nil { return err } @@ -292,7 +292,7 @@ func resourceComputeBackendServiceDelete(d *schema.ResourceData, meta interface{ return fmt.Errorf("Error deleting backend service: %s", err) } - err = computeOperationWaitGlobal(config, op, "Deleting Backend Service") + err = computeOperationWaitGlobal(config, op, project, "Deleting Backend Service") if err != nil { return err } diff --git a/builtin/providers/google/resource_compute_disk.go b/builtin/providers/google/resource_compute_disk.go index c6811deb2..5984383f7 100644 --- a/builtin/providers/google/resource_compute_disk.go +++ b/builtin/providers/google/resource_compute_disk.go @@ -138,7 +138,7 @@ func resourceComputeDiskCreate(d *schema.ResourceData, meta interface{}) error { // It probably maybe worked, so store the ID now d.SetId(disk.Name) - err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Creating Disk") + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Creating Disk") if err != nil { return err } @@ -194,7 +194,7 @@ func resourceComputeDiskDelete(d *schema.ResourceData, meta interface{}) error { } zone := d.Get("zone").(string) - err = computeOperationWaitZone(config, op, zone, "Creating Disk") + err = computeOperationWaitZone(config, op, project, zone, "Creating Disk") if err != nil { return err } diff --git a/builtin/providers/google/resource_compute_firewall.go b/builtin/providers/google/resource_compute_firewall.go index a4776c34d..d5a8ef210 100644 --- a/builtin/providers/google/resource_compute_firewall.go +++ b/builtin/providers/google/resource_compute_firewall.go @@ -138,7 +138,7 @@ func resourceComputeFirewallCreate(d *schema.ResourceData, meta interface{}) err // It probably maybe worked, so store the ID now d.SetId(firewall.Name) - err = computeOperationWaitGlobal(config, op, "Creating Firewall") + err = computeOperationWaitGlobal(config, op, project, "Creating Firewall") if err != nil { return err } @@ -194,7 +194,7 @@ func resourceComputeFirewallUpdate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Error updating firewall: %s", err) } - err = computeOperationWaitGlobal(config, op, "Updating Firewall") + err = computeOperationWaitGlobal(config, op, project, "Updating Firewall") if err != nil { return err } @@ -219,7 +219,7 @@ func resourceComputeFirewallDelete(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Error deleting firewall: %s", err) } - err = computeOperationWaitGlobal(config, op, "Deleting Firewall") + err = computeOperationWaitGlobal(config, op, project, "Deleting Firewall") if err != nil { return err } diff --git a/builtin/providers/google/resource_compute_forwarding_rule.go b/builtin/providers/google/resource_compute_forwarding_rule.go index af6b267d1..8f1634c44 100644 --- a/builtin/providers/google/resource_compute_forwarding_rule.go +++ b/builtin/providers/google/resource_compute_forwarding_rule.go @@ -107,7 +107,7 @@ func resourceComputeForwardingRuleCreate(d *schema.ResourceData, meta interface{ // It probably maybe worked, so store the ID now d.SetId(frule.Name) - err = computeOperationWaitRegion(config, op, region, "Creating Fowarding Rule") + err = computeOperationWaitRegion(config, op, project, region, "Creating Fowarding Rule") if err != nil { return err } @@ -139,7 +139,7 @@ func resourceComputeForwardingRuleUpdate(d *schema.ResourceData, meta interface{ return fmt.Errorf("Error updating target: %s", err) } - err = computeOperationWaitRegion(config, op, region, "Updating Forwarding Rule") + err = computeOperationWaitRegion(config, op, project, region, "Updating Forwarding Rule") if err != nil { return err } @@ -207,7 +207,7 @@ func resourceComputeForwardingRuleDelete(d *schema.ResourceData, meta interface{ return fmt.Errorf("Error deleting ForwardingRule: %s", err) } - err = computeOperationWaitRegion(config, op, region, "Deleting Forwarding Rule") + err = computeOperationWaitRegion(config, op, project, region, "Deleting Forwarding Rule") if err != nil { return err } diff --git a/builtin/providers/google/resource_compute_global_address.go b/builtin/providers/google/resource_compute_global_address.go index 6c2da4fc7..e335e527a 100644 --- a/builtin/providers/google/resource_compute_global_address.go +++ b/builtin/providers/google/resource_compute_global_address.go @@ -60,7 +60,7 @@ func resourceComputeGlobalAddressCreate(d *schema.ResourceData, meta interface{} // It probably maybe worked, so store the ID now d.SetId(addr.Name) - err = computeOperationWaitGlobal(config, op, "Creating Global Address") + err = computeOperationWaitGlobal(config, op, project, "Creating Global Address") if err != nil { return err } @@ -112,7 +112,7 @@ func resourceComputeGlobalAddressDelete(d *schema.ResourceData, meta interface{} return fmt.Errorf("Error deleting address: %s", err) } - err = computeOperationWaitGlobal(config, op, "Deleting Global Address") + err = computeOperationWaitGlobal(config, op, project, "Deleting Global Address") if err != nil { return err } diff --git a/builtin/providers/google/resource_compute_global_forwarding_rule.go b/builtin/providers/google/resource_compute_global_forwarding_rule.go index e098a993d..e70c88377 100644 --- a/builtin/providers/google/resource_compute_global_forwarding_rule.go +++ b/builtin/providers/google/resource_compute_global_forwarding_rule.go @@ -101,7 +101,7 @@ func resourceComputeGlobalForwardingRuleCreate(d *schema.ResourceData, meta inte // It probably maybe worked, so store the ID now d.SetId(frule.Name) - err = computeOperationWaitGlobal(config, op, "Creating Global Fowarding Rule") + err = computeOperationWaitGlobal(config, op, project, "Creating Global Fowarding Rule") if err != nil { return err } @@ -128,7 +128,7 @@ func resourceComputeGlobalForwardingRuleUpdate(d *schema.ResourceData, meta inte return fmt.Errorf("Error updating target: %s", err) } - err = computeOperationWaitGlobal(config, op, "Updating Global Forwarding Rule") + err = computeOperationWaitGlobal(config, op, project, "Updating Global Forwarding Rule") if err != nil { return err } @@ -186,7 +186,7 @@ func resourceComputeGlobalForwardingRuleDelete(d *schema.ResourceData, meta inte return fmt.Errorf("Error deleting GlobalForwardingRule: %s", err) } - err = computeOperationWaitGlobal(config, op, "Deleting GlobalForwarding Rule") + err = computeOperationWaitGlobal(config, op, project, "Deleting GlobalForwarding Rule") if err != nil { return err } diff --git a/builtin/providers/google/resource_compute_http_health_check.go b/builtin/providers/google/resource_compute_http_health_check.go index b9114273a..70c0146bb 100644 --- a/builtin/providers/google/resource_compute_http_health_check.go +++ b/builtin/providers/google/resource_compute_http_health_check.go @@ -131,7 +131,7 @@ func resourceComputeHttpHealthCheckCreate(d *schema.ResourceData, meta interface // It probably maybe worked, so store the ID now d.SetId(hchk.Name) - err = computeOperationWaitGlobal(config, op, "Creating Http Health Check") + err = computeOperationWaitGlobal(config, op, project, "Creating Http Health Check") if err != nil { return err } @@ -187,7 +187,7 @@ func resourceComputeHttpHealthCheckUpdate(d *schema.ResourceData, meta interface // It probably maybe worked, so store the ID now d.SetId(hchk.Name) - err = computeOperationWaitGlobal(config, op, "Updating Http Health Check") + err = computeOperationWaitGlobal(config, op, project, "Updating Http Health Check") if err != nil { return err } @@ -244,7 +244,7 @@ func resourceComputeHttpHealthCheckDelete(d *schema.ResourceData, meta interface return fmt.Errorf("Error deleting HttpHealthCheck: %s", err) } - err = computeOperationWaitGlobal(config, op, "Deleting Http Health Check") + err = computeOperationWaitGlobal(config, op, project, "Deleting Http Health Check") if err != nil { return err } diff --git a/builtin/providers/google/resource_compute_https_health_check.go b/builtin/providers/google/resource_compute_https_health_check.go index a52fa186c..0746d542b 100644 --- a/builtin/providers/google/resource_compute_https_health_check.go +++ b/builtin/providers/google/resource_compute_https_health_check.go @@ -131,7 +131,7 @@ func resourceComputeHttpsHealthCheckCreate(d *schema.ResourceData, meta interfac // It probably maybe worked, so store the ID now d.SetId(hchk.Name) - err = computeOperationWaitGlobal(config, op, "Creating Https Health Check") + err = computeOperationWaitGlobal(config, op, project, "Creating Https Health Check") if err != nil { return err } @@ -187,7 +187,7 @@ func resourceComputeHttpsHealthCheckUpdate(d *schema.ResourceData, meta interfac // It probably maybe worked, so store the ID now d.SetId(hchk.Name) - err = computeOperationWaitGlobal(config, op, "Updating Https Health Check") + err = computeOperationWaitGlobal(config, op, project, "Updating Https Health Check") if err != nil { return err } @@ -244,7 +244,7 @@ func resourceComputeHttpsHealthCheckDelete(d *schema.ResourceData, meta interfac return fmt.Errorf("Error deleting HttpsHealthCheck: %s", err) } - err = computeOperationWaitGlobal(config, op, "Deleting Https Health Check") + err = computeOperationWaitGlobal(config, op, project, "Deleting Https Health Check") if err != nil { return err } diff --git a/builtin/providers/google/resource_compute_instance.go b/builtin/providers/google/resource_compute_instance.go index bc0c0d244..11aa864dd 100644 --- a/builtin/providers/google/resource_compute_instance.go +++ b/builtin/providers/google/resource_compute_instance.go @@ -577,7 +577,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err d.SetId(instance.Name) // Wait for the operation to complete - waitErr := computeOperationWaitZone(config, op, zone.Name, "instance to create") + waitErr := computeOperationWaitZone(config, op, project, zone.Name, "instance to create") if waitErr != nil { // The resource didn't actually create d.SetId("") @@ -786,7 +786,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Error updating metadata: %s", err) } - opErr := computeOperationWaitZone(config, op, zone, "metadata to update") + opErr := computeOperationWaitZone(config, op, project, zone, "metadata to update") if opErr != nil { return opErr } @@ -806,7 +806,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Error updating tags: %s", err) } - opErr := computeOperationWaitZone(config, op, zone, "tags to update") + opErr := computeOperationWaitZone(config, op, project, zone, "tags to update") if opErr != nil { return opErr } @@ -837,7 +837,7 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Error updating scheduling policy: %s", err) } - opErr := computeOperationWaitZone(config, op, zone, + opErr := computeOperationWaitZone(config, op, project, zone, "scheduling policy update") if opErr != nil { return opErr @@ -879,7 +879,8 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err if err != nil { return fmt.Errorf("Error deleting old access_config: %s", err) } - opErr := computeOperationWaitZone(config, op, zone, "old access_config to delete") + opErr := computeOperationWaitZone(config, op, project, zone, + "old access_config to delete") if opErr != nil { return opErr } @@ -898,7 +899,8 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err if err != nil { return fmt.Errorf("Error adding new access_config: %s", err) } - opErr := computeOperationWaitZone(config, op, zone, "new access_config to add") + opErr := computeOperationWaitZone(config, op, project, zone, + "new access_config to add") if opErr != nil { return opErr } @@ -929,7 +931,7 @@ func resourceComputeInstanceDelete(d *schema.ResourceData, meta interface{}) err } // Wait for the operation to complete - opErr := computeOperationWaitZone(config, op, zone, "instance to delete") + opErr := computeOperationWaitZone(config, op, project, zone, "instance to delete") if opErr != nil { return opErr } diff --git a/builtin/providers/google/resource_compute_instance_group.go b/builtin/providers/google/resource_compute_instance_group.go index 4bbbc4e45..a6ece3a41 100644 --- a/builtin/providers/google/resource_compute_instance_group.go +++ b/builtin/providers/google/resource_compute_instance_group.go @@ -136,7 +136,7 @@ func resourceComputeInstanceGroupCreate(d *schema.ResourceData, meta interface{} d.SetId(instanceGroup.Name) // Wait for the operation to complete - err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Creating InstanceGroup") + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Creating InstanceGroup") if err != nil { return err } @@ -159,7 +159,7 @@ func resourceComputeInstanceGroupCreate(d *schema.ResourceData, meta interface{} } // Wait for the operation to complete - err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Adding instances to InstanceGroup") + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Adding instances to InstanceGroup") if err != nil { return err } @@ -264,7 +264,7 @@ func resourceComputeInstanceGroupUpdate(d *schema.ResourceData, meta interface{} } // Wait for the operation to complete - err = computeOperationWaitZone(config, removeOp, d.Get("zone").(string), "Updating InstanceGroup") + err = computeOperationWaitZone(config, removeOp, project, d.Get("zone").(string), "Updating InstanceGroup") if err != nil { return err } @@ -284,7 +284,7 @@ func resourceComputeInstanceGroupUpdate(d *schema.ResourceData, meta interface{} } // Wait for the operation to complete - err = computeOperationWaitZone(config, addOp, d.Get("zone").(string), "Updating InstanceGroup") + err = computeOperationWaitZone(config, addOp, project, d.Get("zone").(string), "Updating InstanceGroup") if err != nil { return err } @@ -307,7 +307,7 @@ func resourceComputeInstanceGroupUpdate(d *schema.ResourceData, meta interface{} return fmt.Errorf("Error updating named ports for InstanceGroup: %s", err) } - err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Updating InstanceGroup") + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroup") if err != nil { return err } @@ -333,7 +333,7 @@ func resourceComputeInstanceGroupDelete(d *schema.ResourceData, meta interface{} return fmt.Errorf("Error deleting InstanceGroup: %s", err) } - err = computeOperationWaitZone(config, op, zone, "Deleting InstanceGroup") + err = computeOperationWaitZone(config, op, project, zone, "Deleting InstanceGroup") if err != nil { return err } diff --git a/builtin/providers/google/resource_compute_instance_group_manager.go b/builtin/providers/google/resource_compute_instance_group_manager.go index 21deac9d4..b0caa0374 100644 --- a/builtin/providers/google/resource_compute_instance_group_manager.go +++ b/builtin/providers/google/resource_compute_instance_group_manager.go @@ -176,7 +176,7 @@ func resourceComputeInstanceGroupManagerCreate(d *schema.ResourceData, meta inte d.SetId(manager.Name) // Wait for the operation to complete - err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Creating InstanceGroupManager") + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Creating InstanceGroupManager") if err != nil { return err } @@ -247,7 +247,7 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte } // Wait for the operation to complete - err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Updating InstanceGroupManager") + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroupManager") if err != nil { return err } @@ -269,7 +269,7 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte } // Wait for the operation to complete - err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Updating InstanceGroupManager") + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroupManager") if err != nil { return err } @@ -296,7 +296,7 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte } // Wait for the operation to complete - err = computeOperationWaitZoneTime(config, op, d.Get("zone").(string), + err = computeOperationWaitZoneTime(config, op, project, d.Get("zone").(string), managedInstanceCount*4, "Restarting InstanceGroupManagers instances") if err != nil { return err @@ -323,7 +323,7 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte } // Wait for the operation to complete: - err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Updating InstanceGroupManager") + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroupManager") if err != nil { return err } @@ -344,7 +344,7 @@ func resourceComputeInstanceGroupManagerUpdate(d *schema.ResourceData, meta inte } // Wait for the operation to complete - err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Updating InstanceGroupManager") + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Updating InstanceGroupManager") if err != nil { return err } @@ -375,7 +375,7 @@ func resourceComputeInstanceGroupManagerDelete(d *schema.ResourceData, meta inte currentSize := int64(d.Get("target_size").(int)) // Wait for the operation to complete - err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Deleting InstanceGroupManager") + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Deleting InstanceGroupManager") for err != nil && currentSize > 0 { if !strings.Contains(err.Error(), "timeout") { @@ -397,7 +397,7 @@ func resourceComputeInstanceGroupManagerDelete(d *schema.ResourceData, meta inte currentSize = instanceGroup.Size - err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Deleting InstanceGroupManager") + err = computeOperationWaitZone(config, op, project, d.Get("zone").(string), "Deleting InstanceGroupManager") } d.SetId("") diff --git a/builtin/providers/google/resource_compute_instance_template.go b/builtin/providers/google/resource_compute_instance_template.go index a4b2a3526..4add7124d 100644 --- a/builtin/providers/google/resource_compute_instance_template.go +++ b/builtin/providers/google/resource_compute_instance_template.go @@ -562,7 +562,7 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac // Store the ID now d.SetId(instanceTemplate.Name) - err = computeOperationWaitGlobal(config, op, "Creating Instance Template") + err = computeOperationWaitGlobal(config, op, project, "Creating Instance Template") if err != nil { return err } @@ -620,7 +620,7 @@ func resourceComputeInstanceTemplateDelete(d *schema.ResourceData, meta interfac return fmt.Errorf("Error deleting instance template: %s", err) } - err = computeOperationWaitGlobal(config, op, "Deleting Instance Template") + err = computeOperationWaitGlobal(config, op, project, "Deleting Instance Template") if err != nil { return err } diff --git a/builtin/providers/google/resource_compute_network.go b/builtin/providers/google/resource_compute_network.go index 3a08f7c40..3356edcc8 100644 --- a/builtin/providers/google/resource_compute_network.go +++ b/builtin/providers/google/resource_compute_network.go @@ -110,7 +110,7 @@ func resourceComputeNetworkCreate(d *schema.ResourceData, meta interface{}) erro // It probably maybe worked, so store the ID now d.SetId(network.Name) - err = computeOperationWaitGlobal(config, op, "Creating Network") + err = computeOperationWaitGlobal(config, op, project, "Creating Network") if err != nil { return err } @@ -161,7 +161,7 @@ func resourceComputeNetworkDelete(d *schema.ResourceData, meta interface{}) erro return fmt.Errorf("Error deleting network: %s", err) } - err = computeOperationWaitGlobal(config, op, "Deleting Network") + err = computeOperationWaitGlobal(config, op, project, "Deleting Network") if err != nil { return err } diff --git a/builtin/providers/google/resource_compute_project_metadata.go b/builtin/providers/google/resource_compute_project_metadata.go index 39f3ba2b0..ea8a51281 100644 --- a/builtin/providers/google/resource_compute_project_metadata.go +++ b/builtin/providers/google/resource_compute_project_metadata.go @@ -77,7 +77,7 @@ func resourceComputeProjectMetadataCreate(d *schema.ResourceData, meta interface log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink) - return computeOperationWaitGlobal(config, op, "SetCommonMetadata") + return computeOperationWaitGlobal(config, op, project.Name, "SetCommonMetadata") } err = MetadataRetryWrapper(createMD) @@ -156,7 +156,7 @@ func resourceComputeProjectMetadataUpdate(d *schema.ResourceData, meta interface // Optimistic locking requires the fingerprint received to match // the fingerprint we send the server, if there is a mismatch then we // are working on old data, and must retry - return computeOperationWaitGlobal(config, op, "SetCommonMetadata") + return computeOperationWaitGlobal(config, op, project.Name, "SetCommonMetadata") } err := MetadataRetryWrapper(updateMD) @@ -194,7 +194,7 @@ func resourceComputeProjectMetadataDelete(d *schema.ResourceData, meta interface log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink) - err = computeOperationWaitGlobal(config, op, "SetCommonMetadata") + err = computeOperationWaitGlobal(config, op, project.Name, "SetCommonMetadata") if err != nil { return err } diff --git a/builtin/providers/google/resource_compute_route.go b/builtin/providers/google/resource_compute_route.go index 82ea18064..5808216e1 100644 --- a/builtin/providers/google/resource_compute_route.go +++ b/builtin/providers/google/resource_compute_route.go @@ -167,7 +167,7 @@ func resourceComputeRouteCreate(d *schema.ResourceData, meta interface{}) error // It probably maybe worked, so store the ID now d.SetId(route.Name) - err = computeOperationWaitGlobal(config, op, "Creating Route") + err = computeOperationWaitGlobal(config, op, project, "Creating Route") if err != nil { return err } @@ -218,7 +218,7 @@ func resourceComputeRouteDelete(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("Error deleting route: %s", err) } - err = computeOperationWaitGlobal(config, op, "Deleting Route") + err = computeOperationWaitGlobal(config, op, project, "Deleting Route") if err != nil { return err } diff --git a/builtin/providers/google/resource_compute_ssl_certificate.go b/builtin/providers/google/resource_compute_ssl_certificate.go index 8310b4403..25b695fbb 100644 --- a/builtin/providers/google/resource_compute_ssl_certificate.go +++ b/builtin/providers/google/resource_compute_ssl_certificate.go @@ -86,7 +86,7 @@ func resourceComputeSslCertificateCreate(d *schema.ResourceData, meta interface{ return fmt.Errorf("Error creating ssl certificate: %s", err) } - err = computeOperationWaitGlobal(config, op, "Creating SslCertificate") + err = computeOperationWaitGlobal(config, op, project, "Creating SslCertificate") if err != nil { return err } @@ -138,7 +138,7 @@ func resourceComputeSslCertificateDelete(d *schema.ResourceData, meta interface{ return fmt.Errorf("Error deleting ssl certificate: %s", err) } - err = computeOperationWaitGlobal(config, op, "Deleting SslCertificate") + err = computeOperationWaitGlobal(config, op, project, "Deleting SslCertificate") if err != nil { return err } diff --git a/builtin/providers/google/resource_compute_subnetwork.go b/builtin/providers/google/resource_compute_subnetwork.go index 88ef4255a..add8916e8 100644 --- a/builtin/providers/google/resource_compute_subnetwork.go +++ b/builtin/providers/google/resource_compute_subnetwork.go @@ -115,7 +115,7 @@ func resourceComputeSubnetworkCreate(d *schema.ResourceData, meta interface{}) e subnetwork.Region = region d.SetId(createSubnetID(subnetwork)) - err = computeOperationWaitRegion(config, op, region, "Creating Subnetwork") + err = computeOperationWaitRegion(config, op, project, region, "Creating Subnetwork") if err != nil { return err } @@ -178,7 +178,7 @@ func resourceComputeSubnetworkDelete(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error deleting subnetwork: %s", err) } - err = computeOperationWaitRegion(config, op, region, "Deleting Subnetwork") + err = computeOperationWaitRegion(config, op, project, region, "Deleting Subnetwork") if err != nil { return err } diff --git a/builtin/providers/google/resource_compute_target_http_proxy.go b/builtin/providers/google/resource_compute_target_http_proxy.go index a85cddb55..72c68eb51 100644 --- a/builtin/providers/google/resource_compute_target_http_proxy.go +++ b/builtin/providers/google/resource_compute_target_http_proxy.go @@ -78,7 +78,7 @@ func resourceComputeTargetHttpProxyCreate(d *schema.ResourceData, meta interface return fmt.Errorf("Error creating TargetHttpProxy: %s", err) } - err = computeOperationWaitGlobal(config, op, "Creating Target Http Proxy") + err = computeOperationWaitGlobal(config, op, project, "Creating Target Http Proxy") if err != nil { return err } @@ -107,7 +107,7 @@ func resourceComputeTargetHttpProxyUpdate(d *schema.ResourceData, meta interface return fmt.Errorf("Error updating target: %s", err) } - err = computeOperationWaitGlobal(config, op, "Updating Target Http Proxy") + err = computeOperationWaitGlobal(config, op, project, "Updating Target Http Proxy") if err != nil { return err } @@ -164,7 +164,7 @@ func resourceComputeTargetHttpProxyDelete(d *schema.ResourceData, meta interface return fmt.Errorf("Error deleting TargetHttpProxy: %s", err) } - err = computeOperationWaitGlobal(config, op, "Deleting Target Http Proxy") + err = computeOperationWaitGlobal(config, op, project, "Deleting Target Http Proxy") if err != nil { return err } diff --git a/builtin/providers/google/resource_compute_target_https_proxy.go b/builtin/providers/google/resource_compute_target_https_proxy.go index 041ae4b6b..5e8bf58c2 100644 --- a/builtin/providers/google/resource_compute_target_https_proxy.go +++ b/builtin/providers/google/resource_compute_target_https_proxy.go @@ -92,7 +92,7 @@ func resourceComputeTargetHttpsProxyCreate(d *schema.ResourceData, meta interfac return fmt.Errorf("Error creating TargetHttpsProxy: %s", err) } - err = computeOperationWaitGlobal(config, op, "Creating Target Https Proxy") + err = computeOperationWaitGlobal(config, op, project, "Creating Target Https Proxy") if err != nil { return err } @@ -121,7 +121,7 @@ func resourceComputeTargetHttpsProxyUpdate(d *schema.ResourceData, meta interfac return fmt.Errorf("Error updating Target HTTPS proxy URL map: %s", err) } - err = computeOperationWaitGlobal(config, op, "Updating Target Https Proxy URL Map") + err = computeOperationWaitGlobal(config, op, project, "Updating Target Https Proxy URL Map") if err != nil { return err } @@ -182,7 +182,7 @@ func resourceComputeTargetHttpsProxyUpdate(d *schema.ResourceData, meta interfac return fmt.Errorf("Error updating Target Https Proxy SSL Certificates: %s", err) } - err = computeOperationWaitGlobal(config, op, "Updating Target Https Proxy SSL certificates") + err = computeOperationWaitGlobal(config, op, project, "Updating Target Https Proxy SSL certificates") if err != nil { return err } @@ -257,7 +257,7 @@ func resourceComputeTargetHttpsProxyDelete(d *schema.ResourceData, meta interfac return fmt.Errorf("Error deleting TargetHttpsProxy: %s", err) } - err = computeOperationWaitGlobal(config, op, "Deleting Target Https Proxy") + err = computeOperationWaitGlobal(config, op, project, "Deleting Target Https Proxy") if err != nil { return err } diff --git a/builtin/providers/google/resource_compute_target_pool.go b/builtin/providers/google/resource_compute_target_pool.go index 810f292f3..b49ca4251 100644 --- a/builtin/providers/google/resource_compute_target_pool.go +++ b/builtin/providers/google/resource_compute_target_pool.go @@ -172,7 +172,7 @@ func resourceComputeTargetPoolCreate(d *schema.ResourceData, meta interface{}) e // It probably maybe worked, so store the ID now d.SetId(tpool.Name) - err = computeOperationWaitRegion(config, op, region, "Creating Target Pool") + err = computeOperationWaitRegion(config, op, project, region, "Creating Target Pool") if err != nil { return err } @@ -251,7 +251,7 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error updating health_check: %s", err) } - err = computeOperationWaitRegion(config, op, region, "Updating Target Pool") + err = computeOperationWaitRegion(config, op, project, region, "Updating Target Pool") if err != nil { return err } @@ -267,7 +267,7 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error updating health_check: %s", err) } - err = computeOperationWaitRegion(config, op, region, "Updating Target Pool") + err = computeOperationWaitRegion(config, op, project, region, "Updating Target Pool") if err != nil { return err } @@ -301,7 +301,7 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error updating instances: %s", err) } - err = computeOperationWaitRegion(config, op, region, "Updating Target Pool") + err = computeOperationWaitRegion(config, op, project, region, "Updating Target Pool") if err != nil { return err } @@ -316,7 +316,7 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e if err != nil { return fmt.Errorf("Error updating instances: %s", err) } - err = computeOperationWaitRegion(config, op, region, "Updating Target Pool") + err = computeOperationWaitRegion(config, op, project, region, "Updating Target Pool") if err != nil { return err } @@ -334,7 +334,7 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error updating backup_pool: %s", err) } - err = computeOperationWaitRegion(config, op, region, "Updating Target Pool") + err = computeOperationWaitRegion(config, op, project, region, "Updating Target Pool") if err != nil { return err } @@ -398,7 +398,7 @@ func resourceComputeTargetPoolDelete(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error deleting TargetPool: %s", err) } - err = computeOperationWaitRegion(config, op, region, "Deleting Target Pool") + err = computeOperationWaitRegion(config, op, project, region, "Deleting Target Pool") if err != nil { return err } diff --git a/builtin/providers/google/resource_compute_url_map.go b/builtin/providers/google/resource_compute_url_map.go index 303ff6688..9caebb1cb 100644 --- a/builtin/providers/google/resource_compute_url_map.go +++ b/builtin/providers/google/resource_compute_url_map.go @@ -288,7 +288,7 @@ func resourceComputeUrlMapCreate(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("Error, failed to insert Url Map %s: %s", name, err) } - err = computeOperationWaitGlobal(config, op, "Insert Url Map") + err = computeOperationWaitGlobal(config, op, project, "Insert Url Map") if err != nil { return fmt.Errorf("Error, failed waitng to insert Url Map %s: %s", name, err) @@ -651,7 +651,7 @@ func resourceComputeUrlMapUpdate(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("Error, failed to update Url Map %s: %s", name, err) } - err = computeOperationWaitGlobal(config, op, "Update Url Map") + err = computeOperationWaitGlobal(config, op, project, "Update Url Map") if err != nil { return fmt.Errorf("Error, failed waitng to update Url Map %s: %s", name, err) @@ -676,7 +676,7 @@ func resourceComputeUrlMapDelete(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("Error, failed to delete Url Map %s: %s", name, err) } - err = computeOperationWaitGlobal(config, op, "Delete Url Map") + err = computeOperationWaitGlobal(config, op, project, "Delete Url Map") if err != nil { return fmt.Errorf("Error, failed waitng to delete Url Map %s: %s", name, err) diff --git a/builtin/providers/google/resource_compute_vpn_gateway.go b/builtin/providers/google/resource_compute_vpn_gateway.go index 1a10ec52d..ed20a7c6c 100644 --- a/builtin/providers/google/resource_compute_vpn_gateway.go +++ b/builtin/providers/google/resource_compute_vpn_gateway.go @@ -89,7 +89,7 @@ func resourceComputeVpnGatewayCreate(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error Inserting VPN Gateway %s into network %s: %s", name, network, err) } - err = computeOperationWaitRegion(config, op, region, "Inserting VPN Gateway") + err = computeOperationWaitRegion(config, op, project, region, "Inserting VPN Gateway") if err != nil { return fmt.Errorf("Error Waiting to Insert VPN Gateway %s into network %s: %s", name, network, err) } @@ -155,7 +155,7 @@ func resourceComputeVpnGatewayDelete(d *schema.ResourceData, meta interface{}) e return fmt.Errorf("Error Reading VPN Gateway %s: %s", name, err) } - err = computeOperationWaitRegion(config, op, region, "Deleting VPN Gateway") + err = computeOperationWaitRegion(config, op, project, region, "Deleting VPN Gateway") if err != nil { return fmt.Errorf("Error Waiting to Delete VPN Gateway %s: %s", name, err) } diff --git a/builtin/providers/google/resource_compute_vpn_tunnel.go b/builtin/providers/google/resource_compute_vpn_tunnel.go index 96ff15d4e..989764c25 100644 --- a/builtin/providers/google/resource_compute_vpn_tunnel.go +++ b/builtin/providers/google/resource_compute_vpn_tunnel.go @@ -144,7 +144,7 @@ func resourceComputeVpnTunnelCreate(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Error Inserting VPN Tunnel %s : %s", name, err) } - err = computeOperationWaitRegion(config, op, region, "Inserting VPN Tunnel") + err = computeOperationWaitRegion(config, op, project, region, "Inserting VPN Tunnel") if err != nil { return fmt.Errorf("Error Waiting to Insert VPN Tunnel %s: %s", name, err) } @@ -212,7 +212,7 @@ func resourceComputeVpnTunnelDelete(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Error Reading VPN Tunnel %s: %s", name, err) } - err = computeOperationWaitRegion(config, op, region, "Deleting VPN Tunnel") + err = computeOperationWaitRegion(config, op, project, region, "Deleting VPN Tunnel") if err != nil { return fmt.Errorf("Error Waiting to Delete VPN Tunnel %s: %s", name, err) } From 71dc5ea1c3db2d70614ebe27bfe0ad30d9fd0fe5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Patrick=20Sodre=CC=81?= Date: Sat, 11 Jun 2016 21:57:39 -0400 Subject: [PATCH 0017/1238] Add AccTest for tags with dots for Triton provider - Tests for issue hashicorp/terraform#2143. - Fixed package names since Triton deprecated the g3-series. - Must supply SDC_URL when invoking tests. Test will fail on metadata_3 but not on _2 as discussed with @jen20 on 11 June 2016. --- .../providers/triton/resource_machine_test.go | 78 +++++++++++++------ 1 file changed, 56 insertions(+), 22 deletions(-) diff --git a/builtin/providers/triton/resource_machine_test.go b/builtin/providers/triton/resource_machine_test.go index 2ed6dd04e..4d32e4fa2 100644 --- a/builtin/providers/triton/resource_machine_test.go +++ b/builtin/providers/triton/resource_machine_test.go @@ -233,6 +233,8 @@ func TestAccTritonMachine_metadata(t *testing.T) { machineName := fmt.Sprintf("acctest-%d", acctest.RandInt()) basic := fmt.Sprintf(testAccTritonMachine_metadata_1, machineName) add_metadata := fmt.Sprintf(testAccTritonMachine_metadata_1, machineName) + add_metadata_2 := fmt.Sprintf(testAccTritonMachine_metadata_2, machineName) + add_metadata_3 := fmt.Sprintf(testAccTritonMachine_metadata_3, machineName) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -253,18 +255,32 @@ func TestAccTritonMachine_metadata(t *testing.T) { "triton_machine.test", "user_data", "hello"), ), }, + resource.TestStep{ + Config: add_metadata_2, + Check: resource.ComposeTestCheckFunc( + testCheckTritonMachineExists("triton_machine.test"), + resource.TestCheckResourceAttr( + "triton_machine.test", + "tags.triton.cns.services", "test-cns-service"), + ), + }, + resource.TestStep{ + Config: add_metadata_3, + Check: resource.ComposeTestCheckFunc( + testCheckTritonMachineExists("triton_machine.test"), + resource.TestCheckResourceAttr( + "triton_machine.test", + "tags.triton.cns.services", "test-cns-service"), + ), + }, }, }) } var testAccTritonMachine_basic = ` -provider "triton" { - url = "https://us-west-1.api.joyentcloud.com" -} - resource "triton_machine" "test" { name = "%s" - package = "g3-standard-0.25-smartos" + package = "g4-highcpu-128M" image = "c20b4b7c-e1a6-11e5-9a4d-ef590901732e" tags = { @@ -274,26 +290,18 @@ resource "triton_machine" "test" { ` var testAccTritonMachine_firewall_0 = ` -provider "triton" { - url = "https://us-west-1.api.joyentcloud.com" -} - resource "triton_machine" "test" { name = "%s" - package = "g3-standard-0.25-smartos" + package = "g4-highcpu-128M" image = "c20b4b7c-e1a6-11e5-9a4d-ef590901732e" firewall_enabled = 0 } ` var testAccTritonMachine_firewall_1 = ` -provider "triton" { - url = "https://us-west-1.api.joyentcloud.com" -} - resource "triton_machine" "test" { name = "%s" - package = "g3-standard-0.25-smartos" + package = "g4-highcpu-128M" image = "c20b4b7c-e1a6-11e5-9a4d-ef590901732e" firewall_enabled = 1 @@ -301,13 +309,9 @@ resource "triton_machine" "test" { ` var testAccTritonMachine_metadata_1 = ` -provider "triton" { - url = "https://us-west-1.api.joyentcloud.com" -} - resource "triton_machine" "test" { name = "%s" - package = "g3-standard-0.25-smartos" + package = "g4-highcpu-128M" image = "c20b4b7c-e1a6-11e5-9a4d-ef590901732e" user_data = "hello" @@ -317,7 +321,37 @@ resource "triton_machine" "test" { } } ` +var testAccTritonMachine_metadata_2 = ` +variable "tags" { + default = { + test = "hello!" + triton.cns.services = "test-cns-service" + } +} +resource "triton_machine" "test" { + name = "%s" + package = "g4-highcpu-128M" + image = "c20b4b7c-e1a6-11e5-9a4d-ef590901732e" + user_data = "hello" + + tags = "${var.tags}" +} +` +var testAccTritonMachine_metadata_3 = ` +resource "triton_machine" "test" { + name = "%s" + package = "g4-highcpu-128M" + image = "c20b4b7c-e1a6-11e5-9a4d-ef590901732e" + + user_data = "hello" + + tags = { + test = "hello!" + triton.cns.services = "test-cns-service" + } +} +` var testAccTritonMachine_withnic = ` resource "triton_fabric" "test" { name = "%s-network" @@ -334,7 +368,7 @@ resource "triton_fabric" "test" { resource "triton_machine" "test" { name = "%s" - package = "g3-standard-0.25-smartos" + package = "g4-highcpu-128M" image = "842e6fa6-6e9b-11e5-8402-1b490459e334" tags = { @@ -361,7 +395,7 @@ resource "triton_fabric" "test" { resource "triton_machine" "test" { name = "%s" - package = "g3-standard-0.25-smartos" + package = "g4-highcpu-128M" image = "842e6fa6-6e9b-11e5-8402-1b490459e334" tags = { From 90889632e0f57c4e2d6daeb08f5c32e115c6fc48 Mon Sep 17 00:00:00 2001 From: Raphael Randschau Date: Wed, 15 Jun 2016 19:55:45 +0200 Subject: [PATCH 0018/1238] Add aws_ecs_container_definition data source this datasource allows terraform to work with externally modified state, e.g. when you're using an ECS service which is continously updated by your CI via the AWS CLI. right now you'd have to wrap terraform into a shell script which looks up the current image digest, so running terraform won't change the updated service. using the aws_ecs_container_definition data source you can now leverage terraform, removing the wrapper entirely. --- ...ata_source_aws_ecs_container_definition.go | 98 +++++++++++++++++++ ...ource_aws_ecs_container_definition_test.go | 62 ++++++++++++ builtin/providers/aws/provider.go | 9 +- .../d/ecs_container_definition.html.markdown | 41 ++++++++ website/source/layouts/aws.erb | 3 + 5 files changed, 209 insertions(+), 4 deletions(-) create mode 100644 builtin/providers/aws/data_source_aws_ecs_container_definition.go create mode 100644 builtin/providers/aws/data_source_aws_ecs_container_definition_test.go create mode 100644 website/source/docs/providers/aws/d/ecs_container_definition.html.markdown diff --git a/builtin/providers/aws/data_source_aws_ecs_container_definition.go b/builtin/providers/aws/data_source_aws_ecs_container_definition.go new file mode 100644 index 000000000..ecc1b20b7 --- /dev/null +++ b/builtin/providers/aws/data_source_aws_ecs_container_definition.go @@ -0,0 +1,98 @@ +package aws + +import ( + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ecs" + "github.com/hashicorp/terraform/helper/schema" +) + +func dataSourceAwsEcsContainerDefinition() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsEcsContainerDefinitionRead, + + Schema: map[string]*schema.Schema{ + "task_definition": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "container_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + // Computed values. + "image": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "image_digest": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "cpu": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + "memory": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + "disable_networking": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + }, + "docker_labels": &schema.Schema{ + Type: schema.TypeMap, + Computed: true, + Elem: schema.TypeString, + }, + "environment": &schema.Schema{ + Type: schema.TypeMap, + Computed: true, + Elem: schema.TypeString, + }, + }, + } +} + +func dataSourceAwsEcsContainerDefinitionRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ecsconn + + desc, err := conn.DescribeTaskDefinition(&ecs.DescribeTaskDefinitionInput{ + TaskDefinition: aws.String(d.Get("task_definition").(string)), + }) + if err != nil { + return err + } + + taskDefinition := *desc.TaskDefinition + for _, def := range taskDefinition.ContainerDefinitions { + if aws.StringValue(def.Name) != d.Get("container_name").(string) { + continue + } + + d.SetId(fmt.Sprintf("%s/%s", aws.StringValue(taskDefinition.TaskDefinitionArn), d.Get("container_name").(string))) + d.Set("image", aws.StringValue(def.Image)) + d.Set("image_digest", strings.Split(aws.StringValue(def.Image), ":")[1]) + d.Set("cpu", aws.Int64Value(def.Cpu)) + d.Set("memory", aws.Int64Value(def.Memory)) + d.Set("disable_networking", aws.BoolValue(def.DisableNetworking)) + d.Set("docker_labels", aws.StringValueMap(def.DockerLabels)) + + var environment = map[string]string{} + for _, keyValuePair := range def.Environment { + environment[aws.StringValue(keyValuePair.Name)] = aws.StringValue(keyValuePair.Value) + } + d.Set("environment", environment) + } + + if d.Id() == "" { + return fmt.Errorf("container with name %q not found in task definition %q", d.Get("container_name").(string), d.Get("task_definition").(string)) + } + + return nil +} diff --git a/builtin/providers/aws/data_source_aws_ecs_container_definition_test.go b/builtin/providers/aws/data_source_aws_ecs_container_definition_test.go new file mode 100644 index 000000000..c0037e682 --- /dev/null +++ b/builtin/providers/aws/data_source_aws_ecs_container_definition_test.go @@ -0,0 +1,62 @@ +package aws + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAWSAmiDataSource_ecsContainerDefinition(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckAwsEcsContainerDefinitionDataSourceConfig, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("data.aws_ecs_container_definition.mongo", "image", "mongo:latest"), + resource.TestCheckResourceAttr("data.aws_ecs_container_definition.mongo", "memory", "128"), + resource.TestCheckResourceAttr("data.aws_ecs_container_definition.mongo", "cpu", "128"), + resource.TestCheckResourceAttr("data.aws_ecs_container_definition.mongo", "environment.SECRET", "KEY"), + ), + }, + }, + }) +} + +const testAccCheckAwsEcsContainerDefinitionDataSourceConfig = ` +resource "aws_ecs_cluster" "default" { + name = "terraformecstest1" +} + +resource "aws_ecs_task_definition" "mongo" { + family = "mongodb" + container_definitions = <> aws_ami + > + aws_ecs_container_definition + > aws_availability_zones From b7c71382f67ed8c0aef9d9e724a847fbb5b54022 Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Fri, 24 Jun 2016 13:27:05 +0200 Subject: [PATCH 0019/1238] Make ACL's swappable, unless you want to stop using an ACL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In CloudStack you can dynamically start using an ACL and once you use an ACL you can dynamically swap ACL’s. But once your using an ACL, you can no longer stop using an ACL without rebuilding the network. This change makes the `ForceNew` value dynamic so that it only returns `true` if you are reverting from using an ACL to not using an ACL anymore, making this functionally inline with the behaviour CloudStack offers. --- .../cloudstack/resource_cloudstack_network.go | 37 +++++++++++++------ .../cloudstack/r/network.html.markdown | 7 +++- 2 files changed, 31 insertions(+), 13 deletions(-) diff --git a/builtin/providers/cloudstack/resource_cloudstack_network.go b/builtin/providers/cloudstack/resource_cloudstack_network.go index 458a768fe..b19ba5bca 100644 --- a/builtin/providers/cloudstack/resource_cloudstack_network.go +++ b/builtin/providers/cloudstack/resource_cloudstack_network.go @@ -11,7 +11,26 @@ import ( "github.com/xanzy/go-cloudstack/cloudstack" ) +const none = "none" + func resourceCloudStackNetwork() *schema.Resource { + aclidSchema := &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: none, + ConflictsWith: []string{"aclid"}, + } + + aclidSchema.StateFunc = func(v interface{}) string { + value := v.(string) + + if value == none { + aclidSchema.ForceNew = true + } + + return value + } + return &schema.Resource{ Create: resourceCloudStackNetworkCreate, Read: resourceCloudStackNetworkRead, @@ -82,12 +101,7 @@ func resourceCloudStackNetwork() *schema.Resource { Deprecated: "Please use the `vpc_id` field instead", }, - "acl_id": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - Computed: true, - ConflictsWith: []string{"aclid"}, - }, + "acl_id": aclidSchema, "aclid": &schema.Schema{ Type: schema.TypeString, @@ -177,7 +191,7 @@ func resourceCloudStackNetworkCreate(d *schema.ResourceData, meta interface{}) e if !ok { aclid, ok = d.GetOk("acl") } - if ok { + if ok && aclid != none { // Set the acl ID p.SetAclid(aclid.(string)) } @@ -232,11 +246,12 @@ func resourceCloudStackNetworkRead(d *schema.ResourceData, meta interface{}) err _, vpc := d.GetOk("vpc") if vpcID || vpc { d.Set("vpc_id", n.Vpcid) - } - _, aclID := d.GetOk("acl_id") - _, acl := d.GetOk("aclid") - if aclID || acl { + // Since we're in a VPC, also update the ACL ID. If we don't + // have an ACL ID make sure we set the default value instead. + if n.Aclid == "" { + n.Aclid = none + } d.Set("acl_id", n.Aclid) } diff --git a/website/source/docs/providers/cloudstack/r/network.html.markdown b/website/source/docs/providers/cloudstack/r/network.html.markdown index 5d40a43cf..580deefbe 100644 --- a/website/source/docs/providers/cloudstack/r/network.html.markdown +++ b/website/source/docs/providers/cloudstack/r/network.html.markdown @@ -56,9 +56,12 @@ The following arguments are supported: * `vpc` - (Optional, Deprecated) The name or ID of the VPC to create this network for. Changing this forces a new resource to be created. -* `acl_id` - (Optional) The network ACL ID that should be attached to the network. +* `acl_id` - (Optional) The ACL ID that should be attached to the network or + `none` if you do not want to attach an ACL. You can dynamically attach and + swap ACL's, but if you want to detach an attached ACL and revert to using + `none`, this will force a new resource to be created. Defaults to `none`. -* `aclid` - (Optional, Deprecated) The ID of a network ACL that should be attached +* `aclid` - (Optional, Deprecated) The ID of a ACL that should be attached to the network. * `project` - (Optional) The name or ID of the project to deploy this From 56284ea7816626e53c47176d51d6622b4faeb964 Mon Sep 17 00:00:00 2001 From: Pete Shima Date: Sat, 25 Jun 2016 00:41:42 -0700 Subject: [PATCH 0020/1238] Add a link to the cloudwatch dimensions aws page in the docs (#7325) --- .../docs/providers/aws/r/cloudwatch_metric_alarm.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/providers/aws/r/cloudwatch_metric_alarm.html.markdown b/website/source/docs/providers/aws/r/cloudwatch_metric_alarm.html.markdown index 9b65c76a0..75ae6ceb1 100644 --- a/website/source/docs/providers/aws/r/cloudwatch_metric_alarm.html.markdown +++ b/website/source/docs/providers/aws/r/cloudwatch_metric_alarm.html.markdown @@ -73,7 +73,7 @@ The following arguments are supported: * `actions_enabled` - (Optional) Indicates whether or not actions should be executed during any changes to the alarm's state. Defaults to `true`. * `alarm_actions` - (Optional) The list of actions to execute when this alarm transitions into an ALARM state from any other state. Each action is specified as an Amazon Resource Number (ARN). * `alarm_description` - (Optional) The description for the alarm. -* `dimensions` - (Optional) The dimensions for the alarm's associated metric. +* `dimensions` - (Optional) The dimensions for the alarm's associated metric. For the list of available dimensions see the AWS documentation [here](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html). * `insufficient_data_actions` - (Optional) The list of actions to execute when this alarm transitions into an INSUFFICIENT_DATA state from any other state. Each action is specified as an Amazon Resource Number (ARN). * `ok_actions` - (Optional) The list of actions to execute when this alarm transitions into an OK state from any other state. Each action is specified as an Amazon Resource Number (ARN). * `unit` - (Optional) The unit for the alarm's associated metric. From 2faaf0a18c9344df36cd5fae86621a7df3ffd21b Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Sat, 25 Jun 2016 19:35:36 +0200 Subject: [PATCH 0021/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 93d278fb9..ff55fd198 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -90,6 +90,7 @@ IMPROVEMENTS: * provider/clc: Fix optional server password [GH-6414] * provider/cloudstack: Add support for affinity groups to `cloudstack_instance` [GH-6898] * provider/cloudstack: Enable swapping of ACLs without having to rebuild the network tier [GH-6741] + * provider/cloudstack: Improve ACL swapping [GH-7315] * provider/datadog: Add support for 'require full window' and 'locked' [GH-6738] * provider/fastly: Add support for Cache Settings [GH-6781] * provider/fastly: Add support for Service Request Settings on `fastly_service_v1` resources [GH-6622] From c0ecbdb27b07f43dd7f5c148e1e659c90d0c6485 Mon Sep 17 00:00:00 2001 From: Joe Topjian Date: Sun, 26 Jun 2016 13:21:09 -0600 Subject: [PATCH 0022/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ff55fd198..b8a92443a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -104,6 +104,7 @@ IMPROVEMENTS: * provider/openstack: Add `lb_provider` argument to `lb_pool_v1` resource [GH-6919] * provider/openstack: Enforce `ForceNew` on Instance Block Device [GH-6921] * provider/openstack: Can now stop instances before destroying them [GH-7184] + * provider/openstack: Disassociate LBaaS v1 Monitors from Pool Before Deletion [GH-6997] * provider/vsphere: Add support for `controller_type` to `vsphere_virtual_machine` [GH-6785] * provider/vsphere: Fix bug with `vsphere_virtual_machine` wait for ip [GH-6377] * provider/vsphere: Virtual machine update disk [GH-6619] From 0a2245cbaee80d96968ce55f4ecc5ccbc2db0f8f Mon Sep 17 00:00:00 2001 From: stack72 Date: Sun, 26 Jun 2016 21:20:52 +0100 Subject: [PATCH 0023/1238] provider/aws: Support Import for `aws_db_parameter_group` ``` make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSDBParameterGroup_' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSDBParameterGroup_ -timeout 120m === RUN TestAccAWSDBParameterGroup_importBasic --- PASS: TestAccAWSDBParameterGroup_importBasic (31.03s) === RUN TestAccAWSDBParameterGroup_basic --- PASS: TestAccAWSDBParameterGroup_basic (51.22s) === RUN TestAccAWSDBParameterGroup_Only --- PASS: TestAccAWSDBParameterGroup_Only (25.45s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 107.725s ``` --- ...mport_aws_db_parameter_group_group_test.go | 31 +++++++++++++++++++ .../aws/resource_aws_db_parameter_group.go | 4 +++ 2 files changed, 35 insertions(+) create mode 100644 builtin/providers/aws/import_aws_db_parameter_group_group_test.go diff --git a/builtin/providers/aws/import_aws_db_parameter_group_group_test.go b/builtin/providers/aws/import_aws_db_parameter_group_group_test.go new file mode 100644 index 000000000..d9806e5cf --- /dev/null +++ b/builtin/providers/aws/import_aws_db_parameter_group_group_test.go @@ -0,0 +1,31 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAWSDBParameterGroup_importBasic(t *testing.T) { + resourceName := "aws_db_parameter_group.bar" + groupName := fmt.Sprintf("parameter-group-test-terraform-%d", acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSDBParameterGroupDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSDBParameterGroupConfig(groupName), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/aws/resource_aws_db_parameter_group.go b/builtin/providers/aws/resource_aws_db_parameter_group.go index 2dcbc7fca..5f450a292 100644 --- a/builtin/providers/aws/resource_aws_db_parameter_group.go +++ b/builtin/providers/aws/resource_aws_db_parameter_group.go @@ -23,6 +23,10 @@ func resourceAwsDbParameterGroup() *schema.Resource { Read: resourceAwsDbParameterGroupRead, Update: resourceAwsDbParameterGroupUpdate, Delete: resourceAwsDbParameterGroupDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ "arn": &schema.Schema{ Type: schema.TypeString, From 1bd8b449e00bba44fd74bd2ea35677b4feb0dae3 Mon Sep 17 00:00:00 2001 From: yissachar Date: Sun, 26 Jun 2016 17:07:14 -0400 Subject: [PATCH 0024/1238] Add SES resource (#5387) * Add SES resource * Detect ReceiptRule deletion outside of Terraform * Handle order of rule actions * Add position field to docs * Fix hashes, add log messages, and other small cleanup * Fix rebase issue * Fix formatting --- builtin/providers/aws/config.go | 5 + builtin/providers/aws/provider.go | 4 + ...esource_aws_ses_active_receipt_rule_set.go | 80 + ...ce_aws_ses_active_receipt_rule_set_test.go | 87 + .../aws/resource_aws_ses_receipt_filter.go | 105 + .../resource_aws_ses_receipt_filter_test.go | 99 + .../aws/resource_aws_ses_receipt_rule.go | 764 ++++ .../aws/resource_aws_ses_receipt_rule_set.go | 102 + .../resource_aws_ses_receipt_rule_set_test.go | 91 + .../aws/resource_aws_ses_receipt_rule_test.go | 292 ++ .../aws/aws-sdk-go/service/ses/api.go | 3984 +++++++++++++++++ .../aws/aws-sdk-go/service/ses/service.go | 93 + .../aws/aws-sdk-go/service/ses/waiters.go | 30 + .../ses_active_receipt_rule_set.html.markdown | 25 + .../aws/r/ses_receipt_filter.html.markdown | 29 + .../aws/r/ses_receipt_rule.html.markdown | 100 + .../aws/r/ses_receipt_rule_set.html.markdown | 25 + website/source/layouts/aws.erb | 24 + 18 files changed, 5939 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_ses_active_receipt_rule_set.go create mode 100644 builtin/providers/aws/resource_aws_ses_active_receipt_rule_set_test.go create mode 100644 builtin/providers/aws/resource_aws_ses_receipt_filter.go create mode 100644 builtin/providers/aws/resource_aws_ses_receipt_filter_test.go create mode 100644 builtin/providers/aws/resource_aws_ses_receipt_rule.go create mode 100644 builtin/providers/aws/resource_aws_ses_receipt_rule_set.go create mode 100644 builtin/providers/aws/resource_aws_ses_receipt_rule_set_test.go create mode 100644 builtin/providers/aws/resource_aws_ses_receipt_rule_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ses/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ses/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/ses/waiters.go create mode 100644 website/source/docs/providers/aws/r/ses_active_receipt_rule_set.html.markdown create mode 100644 website/source/docs/providers/aws/r/ses_receipt_filter.html.markdown create mode 100644 website/source/docs/providers/aws/r/ses_receipt_rule.html.markdown create mode 100644 website/source/docs/providers/aws/r/ses_receipt_rule_set.html.markdown diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go index b4430fb4e..25839af0b 100644 --- a/builtin/providers/aws/config.go +++ b/builtin/providers/aws/config.go @@ -50,6 +50,7 @@ import ( "github.com/aws/aws-sdk-go/service/redshift" "github.com/aws/aws-sdk-go/service/route53" "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/ses" "github.com/aws/aws-sdk-go/service/sns" "github.com/aws/aws-sdk-go/service/sqs" "github.com/aws/aws-sdk-go/service/sts" @@ -94,6 +95,7 @@ type AWSClient struct { apigateway *apigateway.APIGateway autoscalingconn *autoscaling.AutoScaling s3conn *s3.S3 + sesConn *ses.SES sqsconn *sqs.SQS snsconn *sns.SNS stsconn *sts.STS @@ -214,6 +216,9 @@ func (c *Config) Client() (interface{}, error) { log.Println("[INFO] Initializing S3 connection") client.s3conn = s3.New(sess) + log.Println("[INFO] Initializing SES connection") + client.sesConn = ses.New(sess) + log.Println("[INFO] Initializing SQS connection") client.sqsconn = sqs.New(sess) diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index bc55185f6..48d7c4077 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -251,6 +251,10 @@ func Provider() terraform.ResourceProvider { "aws_route": resourceAwsRoute(), "aws_route_table": resourceAwsRouteTable(), "aws_route_table_association": resourceAwsRouteTableAssociation(), + "aws_ses_active_receipt_rule_set": resourceAwsSesActiveReceiptRuleSet(), + "aws_ses_receipt_filter": resourceAwsSesReceiptFilter(), + "aws_ses_receipt_rule": resourceAwsSesReceiptRule(), + "aws_ses_receipt_rule_set": resourceAwsSesReceiptRuleSet(), "aws_s3_bucket": resourceAwsS3Bucket(), "aws_s3_bucket_object": resourceAwsS3BucketObject(), "aws_s3_bucket_notification": resourceAwsS3BucketNotification(), diff --git a/builtin/providers/aws/resource_aws_ses_active_receipt_rule_set.go b/builtin/providers/aws/resource_aws_ses_active_receipt_rule_set.go new file mode 100644 index 000000000..854d645a6 --- /dev/null +++ b/builtin/providers/aws/resource_aws_ses_active_receipt_rule_set.go @@ -0,0 +1,80 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ses" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsSesActiveReceiptRuleSet() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSesActiveReceiptRuleSetUpdate, + Update: resourceAwsSesActiveReceiptRuleSetUpdate, + Read: resourceAwsSesActiveReceiptRuleSetRead, + Delete: resourceAwsSesActiveReceiptRuleSetDelete, + + Schema: map[string]*schema.Schema{ + "rule_set_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func resourceAwsSesActiveReceiptRuleSetUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + ruleSetName := d.Get("rule_set_name").(string) + + createOpts := &ses.SetActiveReceiptRuleSetInput{ + RuleSetName: aws.String(ruleSetName), + } + + _, err := conn.SetActiveReceiptRuleSet(createOpts) + if err != nil { + return fmt.Errorf("Error setting active SES rule set: %s", err) + } + + d.SetId(ruleSetName) + + return resourceAwsSesActiveReceiptRuleSetRead(d, meta) +} + +func resourceAwsSesActiveReceiptRuleSetRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + describeOpts := &ses.DescribeActiveReceiptRuleSetInput{} + + response, err := conn.DescribeActiveReceiptRuleSet(describeOpts) + if err != nil { + return err + } + + if response.Metadata != nil { + d.Set("rule_set_name", response.Metadata.Name) + } else { + log.Print("[WARN] No active Receipt Rule Set found") + d.SetId("") + } + + return nil +} + +func resourceAwsSesActiveReceiptRuleSetDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + deleteOpts := &ses.SetActiveReceiptRuleSetInput{ + RuleSetName: nil, + } + + _, err := conn.SetActiveReceiptRuleSet(deleteOpts) + if err != nil { + return fmt.Errorf("Error deleting active SES rule set: %s", err) + } + + return nil +} diff --git a/builtin/providers/aws/resource_aws_ses_active_receipt_rule_set_test.go b/builtin/providers/aws/resource_aws_ses_active_receipt_rule_set_test.go new file mode 100644 index 000000000..0f9a37cdf --- /dev/null +++ b/builtin/providers/aws/resource_aws_ses_active_receipt_rule_set_test.go @@ -0,0 +1,87 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/service/ses" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSSESActiveReceiptRuleSet_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckSESActiveReceiptRuleSetDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSSESActiveReceiptRuleSetConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsSESActiveReceiptRuleSetExists("aws_ses_active_receipt_rule_set.test"), + ), + }, + }, + }) +} + +func testAccCheckSESActiveReceiptRuleSetDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).sesConn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_ses_active_receipt_rule_set" { + continue + } + + response, err := conn.DescribeActiveReceiptRuleSet(&ses.DescribeActiveReceiptRuleSetInput{}) + if err != nil { + return err + } + + if response.Metadata != nil && *response.Metadata.Name == "test-receipt-rule" { + return fmt.Errorf("Active receipt rule set still exists") + } + + } + + return nil + +} + +func testAccCheckAwsSESActiveReceiptRuleSetExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("SES Active Receipt Rule Set not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("SES Active Receipt Rule Set name not set") + } + + conn := testAccProvider.Meta().(*AWSClient).sesConn + + response, err := conn.DescribeActiveReceiptRuleSet(&ses.DescribeActiveReceiptRuleSetInput{}) + if err != nil { + return err + } + + if *response.Metadata.Name != "test-receipt-rule" { + return fmt.Errorf("The active receipt rule set (%s) was not set to test-receipt-rule", *response.Metadata.Name) + } + + return nil + } +} + +const testAccAWSSESActiveReceiptRuleSetConfig = ` +resource "aws_ses_receipt_rule_set" "test" { + rule_set_name = "test-receipt-rule" +} + +resource "aws_ses_active_receipt_rule_set" "test" { + rule_set_name = "${aws_ses_receipt_rule_set.test.rule_set_name}" +} +` diff --git a/builtin/providers/aws/resource_aws_ses_receipt_filter.go b/builtin/providers/aws/resource_aws_ses_receipt_filter.go new file mode 100644 index 000000000..4ea7ccad2 --- /dev/null +++ b/builtin/providers/aws/resource_aws_ses_receipt_filter.go @@ -0,0 +1,105 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ses" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsSesReceiptFilter() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSesReceiptFilterCreate, + Read: resourceAwsSesReceiptFilterRead, + Delete: resourceAwsSesReceiptFilterDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "cidr": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "policy": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsSesReceiptFilterCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + name := d.Get("name").(string) + + createOpts := &ses.CreateReceiptFilterInput{ + Filter: &ses.ReceiptFilter{ + Name: aws.String(name), + IpFilter: &ses.ReceiptIpFilter{ + Cidr: aws.String(d.Get("cidr").(string)), + Policy: aws.String(d.Get("policy").(string)), + }, + }, + } + + _, err := conn.CreateReceiptFilter(createOpts) + if err != nil { + return fmt.Errorf("Error creating SES receipt filter: %s", err) + } + + d.SetId(name) + + return resourceAwsSesReceiptFilterRead(d, meta) +} + +func resourceAwsSesReceiptFilterRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + listOpts := &ses.ListReceiptFiltersInput{} + + response, err := conn.ListReceiptFilters(listOpts) + if err != nil { + return err + } + + found := false + for _, element := range response.Filters { + if *element.Name == d.Id() { + d.Set("cidr", element.IpFilter.Cidr) + d.Set("policy", element.IpFilter.Policy) + found = true + } + } + + if !found { + log.Printf("[WARN] SES Receipt Filter (%s) not found", d.Id()) + d.SetId("") + } + + return nil +} + +func resourceAwsSesReceiptFilterDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + deleteOpts := &ses.DeleteReceiptFilterInput{ + FilterName: aws.String(d.Id()), + } + + _, err := conn.DeleteReceiptFilter(deleteOpts) + if err != nil { + return fmt.Errorf("Error deleting SES receipt filter: %s", err) + } + + return nil +} diff --git a/builtin/providers/aws/resource_aws_ses_receipt_filter_test.go b/builtin/providers/aws/resource_aws_ses_receipt_filter_test.go new file mode 100644 index 000000000..397d3f9a1 --- /dev/null +++ b/builtin/providers/aws/resource_aws_ses_receipt_filter_test.go @@ -0,0 +1,99 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/service/ses" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSSESReceiptFilter_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckSESReceiptFilterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSSESReceiptFilterConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsSESReceiptFilterExists("aws_ses_receipt_filter.test"), + ), + }, + }, + }) +} + +func testAccCheckSESReceiptFilterDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).sesConn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_ses_receipt_filter" { + continue + } + + response, err := conn.ListReceiptFilters(&ses.ListReceiptFiltersInput{}) + if err != nil { + return err + } + + found := false + for _, element := range response.Filters { + if *element.Name == "block-some-ip" { + found = true + } + } + + if found { + return fmt.Errorf("The receipt filter still exists") + } + + } + + return nil + +} + +func testAccCheckAwsSESReceiptFilterExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("SES receipt filter not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("SES receipt filter ID not set") + } + + conn := testAccProvider.Meta().(*AWSClient).sesConn + + response, err := conn.ListReceiptFilters(&ses.ListReceiptFiltersInput{}) + if err != nil { + return err + } + + found := false + for _, element := range response.Filters { + if *element.Name == "block-some-ip" { + found = true + } + } + + if !found { + return fmt.Errorf("The receipt filter was not created") + } + + return nil + } +} + +const testAccAWSSESReceiptFilterConfig = ` +resource "aws_ses_receipt_filter" "test" { + name = "block-some-ip" + cidr = "10.10.10.10" + policy = "Block" +} +` diff --git a/builtin/providers/aws/resource_aws_ses_receipt_rule.go b/builtin/providers/aws/resource_aws_ses_receipt_rule.go new file mode 100644 index 000000000..267458248 --- /dev/null +++ b/builtin/providers/aws/resource_aws_ses_receipt_rule.go @@ -0,0 +1,764 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "sort" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ses" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsSesReceiptRule() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSesReceiptRuleCreate, + Update: resourceAwsSesReceiptRuleUpdate, + Read: resourceAwsSesReceiptRuleRead, + Delete: resourceAwsSesReceiptRuleDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "rule_set_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "after": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "enabled": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + + "recipients": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Set: schema.HashString, + }, + + "scan_enabled": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + + "tls_policy": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "add_header_action": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "header_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "header_value": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "position": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["header_name"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["header_value"].(string))) + buf.WriteString(fmt.Sprintf("%d-", m["position"].(int))) + + return hashcode.String(buf.String()) + }, + }, + + "bounce_action": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "message": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "sender": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "smtp_reply_code": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "status_code": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "topic_arn": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "position": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["message"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["sender"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["smtp_reply_code"].(string))) + + if _, ok := m["status_code"]; ok { + buf.WriteString(fmt.Sprintf("%s-", m["status_code"].(string))) + } + + if _, ok := m["topic_arn"]; ok { + buf.WriteString(fmt.Sprintf("%s-", m["topic_arn"].(string))) + } + + buf.WriteString(fmt.Sprintf("%d-", m["position"].(int))) + + return hashcode.String(buf.String()) + }, + }, + + "lambda_action": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "function_arn": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "invocation_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "topic_arn": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "position": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["function_arn"].(string))) + + if _, ok := m["invocation_type"]; ok { + buf.WriteString(fmt.Sprintf("%s-", m["invocation_type"].(string))) + } + + if _, ok := m["topic_arn"]; ok { + buf.WriteString(fmt.Sprintf("%s-", m["topic_arn"].(string))) + } + + buf.WriteString(fmt.Sprintf("%d-", m["position"].(int))) + + return hashcode.String(buf.String()) + }, + }, + + "s3_action": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "bucket_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "kms_key_arn": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "object_key_prefix": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "topic_arn": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "position": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["bucket_name"].(string))) + + if _, ok := m["kms_key_arn"]; ok { + buf.WriteString(fmt.Sprintf("%s-", m["kms_key_arn"].(string))) + } + + if _, ok := m["object_key_prefix"]; ok { + buf.WriteString(fmt.Sprintf("%s-", m["object_key_prefix"].(string))) + } + + if _, ok := m["topic_arn"]; ok { + buf.WriteString(fmt.Sprintf("%s-", m["topic_arn"].(string))) + } + + buf.WriteString(fmt.Sprintf("%d-", m["position"].(int))) + + return hashcode.String(buf.String()) + }, + }, + + "sns_action": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "topic_arn": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "position": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["topic_arn"].(string))) + buf.WriteString(fmt.Sprintf("%d-", m["position"].(int))) + + return hashcode.String(buf.String()) + }, + }, + + "stop_action": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "scope": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "topic_arn": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "position": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["scope"].(string))) + + if _, ok := m["topic_arn"]; ok { + buf.WriteString(fmt.Sprintf("%s-", m["topic_arn"].(string))) + } + + buf.WriteString(fmt.Sprintf("%d-", m["position"].(int))) + + return hashcode.String(buf.String()) + }, + }, + + "workmail_action": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "organization_arn": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "topic_arn": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "position": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["organization_arn"].(string))) + + if _, ok := m["topic_arn"]; ok { + buf.WriteString(fmt.Sprintf("%s-", m["topic_arn"].(string))) + } + + buf.WriteString(fmt.Sprintf("%d-", m["position"].(int))) + + return hashcode.String(buf.String()) + }, + }, + }, + } +} + +func resourceAwsSesReceiptRuleCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + createOpts := &ses.CreateReceiptRuleInput{ + Rule: buildReceiptRule(d, meta), + RuleSetName: aws.String(d.Get("rule_set_name").(string)), + } + + if v, ok := d.GetOk("after"); ok { + createOpts.After = aws.String(v.(string)) + } + + _, err := conn.CreateReceiptRule(createOpts) + if err != nil { + return fmt.Errorf("Error creating SES rule: %s", err) + } + + d.SetId(d.Get("name").(string)) + + return resourceAwsSesReceiptRuleUpdate(d, meta) +} + +func resourceAwsSesReceiptRuleUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + updateOpts := &ses.UpdateReceiptRuleInput{ + Rule: buildReceiptRule(d, meta), + RuleSetName: aws.String(d.Get("rule_set_name").(string)), + } + + _, err := conn.UpdateReceiptRule(updateOpts) + if err != nil { + return fmt.Errorf("Error updating SES rule: %s", err) + } + + if d.HasChange("after") { + changePosOpts := &ses.SetReceiptRulePositionInput{ + After: aws.String(d.Get("after").(string)), + RuleName: aws.String(d.Get("name").(string)), + RuleSetName: aws.String(d.Get("rule_set_name").(string)), + } + + _, err := conn.SetReceiptRulePosition(changePosOpts) + if err != nil { + return fmt.Errorf("Error updating SES rule: %s", err) + } + } + + return resourceAwsSesReceiptRuleRead(d, meta) +} + +func resourceAwsSesReceiptRuleRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + describeOpts := &ses.DescribeReceiptRuleInput{ + RuleName: aws.String(d.Id()), + RuleSetName: aws.String(d.Get("rule_set_name").(string)), + } + + response, err := conn.DescribeReceiptRule(describeOpts) + if err != nil { + _, ok := err.(awserr.Error) + if ok && err.(awserr.Error).Code() == "RuleDoesNotExist" { + log.Printf("[WARN] SES Receipt Rule (%s) not found", d.Id()) + d.SetId("") + return nil + } else { + return err + } + } + + d.Set("enabled", *response.Rule.Enabled) + d.Set("recipients", flattenStringList(response.Rule.Recipients)) + d.Set("scan_enabled", *response.Rule.ScanEnabled) + d.Set("tls_policy", *response.Rule.TlsPolicy) + + addHeaderActionList := []map[string]interface{}{} + bounceActionList := []map[string]interface{}{} + lambdaActionList := []map[string]interface{}{} + s3ActionList := []map[string]interface{}{} + snsActionList := []map[string]interface{}{} + stopActionList := []map[string]interface{}{} + workmailActionList := []map[string]interface{}{} + + for i, element := range response.Rule.Actions { + if element.AddHeaderAction != nil { + addHeaderAction := map[string]interface{}{ + "header_name": *element.AddHeaderAction.HeaderName, + "header_value": *element.AddHeaderAction.HeaderValue, + "position": i, + } + addHeaderActionList = append(addHeaderActionList, addHeaderAction) + } + + if element.BounceAction != nil { + bounceAction := map[string]interface{}{ + "message": *element.BounceAction.Message, + "sender": *element.BounceAction.Sender, + "smtp_reply_code": *element.BounceAction.SmtpReplyCode, + "position": i, + } + + if element.BounceAction.StatusCode != nil { + bounceAction["status_code"] = *element.BounceAction.StatusCode + } + + if element.BounceAction.TopicArn != nil { + bounceAction["topic_arn"] = *element.BounceAction.TopicArn + } + + bounceActionList = append(bounceActionList, bounceAction) + } + + if element.LambdaAction != nil { + lambdaAction := map[string]interface{}{ + "function_arn": *element.LambdaAction.FunctionArn, + "position": i, + } + + if element.LambdaAction.InvocationType != nil { + lambdaAction["invocation_type"] = *element.LambdaAction.InvocationType + } + + if element.LambdaAction.TopicArn != nil { + lambdaAction["topic_arn"] = *element.LambdaAction.TopicArn + } + + lambdaActionList = append(lambdaActionList, lambdaAction) + } + + if element.S3Action != nil { + s3Action := map[string]interface{}{ + "bucket_name": *element.S3Action.BucketName, + "position": i, + } + + if element.S3Action.KmsKeyArn != nil { + s3Action["kms_key_arn"] = *element.S3Action.KmsKeyArn + } + + if element.S3Action.ObjectKeyPrefix != nil { + s3Action["object_key_prefix"] = *element.S3Action.ObjectKeyPrefix + } + + if element.S3Action.TopicArn != nil { + s3Action["topic_arn"] = *element.S3Action.TopicArn + } + + s3ActionList = append(s3ActionList, s3Action) + } + + if element.SNSAction != nil { + snsAction := map[string]interface{}{ + "topic_arn": *element.SNSAction.TopicArn, + "position": i, + } + + snsActionList = append(snsActionList, snsAction) + } + + if element.StopAction != nil { + stopAction := map[string]interface{}{ + "scope": *element.StopAction.Scope, + "position": i, + } + + if element.StopAction.TopicArn != nil { + stopAction["topic_arn"] = *element.StopAction.TopicArn + } + + stopActionList = append(stopActionList, stopAction) + } + + if element.WorkmailAction != nil { + workmailAction := map[string]interface{}{ + "organization_arn": *element.WorkmailAction.OrganizationArn, + "position": i, + } + + if element.WorkmailAction.TopicArn != nil { + workmailAction["topic_arn"] = *element.WorkmailAction.TopicArn + } + + workmailActionList = append(workmailActionList, workmailAction) + } + + } + + err = d.Set("add_header_action", addHeaderActionList) + if err != nil { + return err + } + + err = d.Set("bounce_action", bounceActionList) + if err != nil { + return err + } + + err = d.Set("lambda_action", lambdaActionList) + if err != nil { + return err + } + + err = d.Set("s3_action", s3ActionList) + if err != nil { + return err + } + + err = d.Set("sns_action", snsActionList) + if err != nil { + return err + } + + err = d.Set("stop_action", stopActionList) + if err != nil { + return err + } + + err = d.Set("workmail_action", workmailActionList) + if err != nil { + return err + } + + return nil +} + +func resourceAwsSesReceiptRuleDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + deleteOpts := &ses.DeleteReceiptRuleInput{ + RuleName: aws.String(d.Id()), + RuleSetName: aws.String(d.Get("rule_set_name").(string)), + } + + _, err := conn.DeleteReceiptRule(deleteOpts) + if err != nil { + return fmt.Errorf("Error deleting SES receipt rule: %s", err) + } + + return nil +} + +func buildReceiptRule(d *schema.ResourceData, meta interface{}) *ses.ReceiptRule { + receiptRule := &ses.ReceiptRule{ + Name: aws.String(d.Get("name").(string)), + } + + if v, ok := d.GetOk("enabled"); ok { + receiptRule.Enabled = aws.Bool(v.(bool)) + } + + if v, ok := d.GetOk("recipients"); ok { + receiptRule.Recipients = expandStringList(v.(*schema.Set).List()) + } + + if v, ok := d.GetOk("scan_enabled"); ok { + receiptRule.ScanEnabled = aws.Bool(v.(bool)) + } + + if v, ok := d.GetOk("tls_policy"); ok { + receiptRule.TlsPolicy = aws.String(v.(string)) + } + + actions := make(map[int]*ses.ReceiptAction) + + if v, ok := d.GetOk("add_header_action"); ok { + for _, element := range v.(*schema.Set).List() { + elem := element.(map[string]interface{}) + + actions[elem["position"].(int)] = &ses.ReceiptAction{ + AddHeaderAction: &ses.AddHeaderAction{ + HeaderName: aws.String(elem["header_name"].(string)), + HeaderValue: aws.String(elem["header_value"].(string)), + }, + } + } + } + + if v, ok := d.GetOk("bounce_action"); ok { + for _, element := range v.(*schema.Set).List() { + elem := element.(map[string]interface{}) + + bounceAction := &ses.BounceAction{ + Message: aws.String(elem["message"].(string)), + Sender: aws.String(elem["sender"].(string)), + SmtpReplyCode: aws.String(elem["smtp_reply_code"].(string)), + } + + if elem["status_code"] != "" { + bounceAction.StatusCode = aws.String(elem["status_code"].(string)) + } + + if elem["topic_arn"] != "" { + bounceAction.TopicArn = aws.String(elem["topic_arn"].(string)) + } + + actions[elem["position"].(int)] = &ses.ReceiptAction{ + BounceAction: bounceAction, + } + } + } + + if v, ok := d.GetOk("lambda_action"); ok { + for _, element := range v.(*schema.Set).List() { + elem := element.(map[string]interface{}) + + lambdaAction := &ses.LambdaAction{ + FunctionArn: aws.String(elem["function_arn"].(string)), + } + + if elem["invocation_type"] != "" { + lambdaAction.InvocationType = aws.String(elem["invocation_type"].(string)) + } + + if elem["topic_arn"] != "" { + lambdaAction.TopicArn = aws.String(elem["topic_arn"].(string)) + } + + actions[elem["position"].(int)] = &ses.ReceiptAction{ + LambdaAction: lambdaAction, + } + } + } + + if v, ok := d.GetOk("s3_action"); ok { + for _, element := range v.(*schema.Set).List() { + elem := element.(map[string]interface{}) + + s3Action := &ses.S3Action{ + BucketName: aws.String(elem["bucket_name"].(string)), + KmsKeyArn: aws.String(elem["kms_key_arn"].(string)), + ObjectKeyPrefix: aws.String(elem["object_key_prefix"].(string)), + } + + if elem["topic_arn"] != "" { + s3Action.TopicArn = aws.String(elem["topic_arn"].(string)) + } + + actions[elem["position"].(int)] = &ses.ReceiptAction{ + S3Action: s3Action, + } + } + } + + if v, ok := d.GetOk("sns_action"); ok { + for _, element := range v.(*schema.Set).List() { + elem := element.(map[string]interface{}) + + snsAction := &ses.SNSAction{ + TopicArn: aws.String(elem["topic_arn"].(string)), + } + + actions[elem["position"].(int)] = &ses.ReceiptAction{ + SNSAction: snsAction, + } + } + } + + if v, ok := d.GetOk("stop_action"); ok { + for _, element := range v.(*schema.Set).List() { + elem := element.(map[string]interface{}) + + stopAction := &ses.StopAction{ + Scope: aws.String(elem["scope"].(string)), + } + + if elem["topic_arn"] != "" { + stopAction.TopicArn = aws.String(elem["topic_arn"].(string)) + } + + actions[elem["position"].(int)] = &ses.ReceiptAction{ + StopAction: stopAction, + } + } + } + + if v, ok := d.GetOk("workmail_action"); ok { + for _, element := range v.(*schema.Set).List() { + elem := element.(map[string]interface{}) + + workmailAction := &ses.WorkmailAction{ + OrganizationArn: aws.String(elem["organization_arn"].(string)), + } + + if elem["topic_arn"] != "" { + workmailAction.TopicArn = aws.String(elem["topic_arn"].(string)) + } + + actions[elem["position"].(int)] = &ses.ReceiptAction{ + WorkmailAction: workmailAction, + } + } + } + + var keys []int + for k := range actions { + keys = append(keys, k) + } + sort.Ints(keys) + + sortedActions := []*ses.ReceiptAction{} + for _, k := range keys { + sortedActions = append(sortedActions, actions[k]) + } + + receiptRule.Actions = sortedActions + + return receiptRule +} diff --git a/builtin/providers/aws/resource_aws_ses_receipt_rule_set.go b/builtin/providers/aws/resource_aws_ses_receipt_rule_set.go new file mode 100644 index 000000000..547835b37 --- /dev/null +++ b/builtin/providers/aws/resource_aws_ses_receipt_rule_set.go @@ -0,0 +1,102 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ses" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsSesReceiptRuleSet() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSesReceiptRuleSetCreate, + Read: resourceAwsSesReceiptRuleSetRead, + Delete: resourceAwsSesReceiptRuleSetDelete, + + Schema: map[string]*schema.Schema{ + "rule_set_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsSesReceiptRuleSetCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + ruleSetName := d.Get("rule_set_name").(string) + + createOpts := &ses.CreateReceiptRuleSetInput{ + RuleSetName: aws.String(ruleSetName), + } + + _, err := conn.CreateReceiptRuleSet(createOpts) + if err != nil { + return fmt.Errorf("Error creating SES rule set: %s", err) + } + + d.SetId(ruleSetName) + + return resourceAwsSesReceiptRuleSetRead(d, meta) +} + +func resourceAwsSesReceiptRuleSetRead(d *schema.ResourceData, meta interface{}) error { + ruleSetExists, err := findRuleSet(d.Id(), nil, meta) + + if !ruleSetExists { + log.Printf("[WARN] SES Receipt Rule Set (%s) not found", d.Id()) + d.SetId("") + } + + if err != nil { + return err + } + + return nil +} + +func resourceAwsSesReceiptRuleSetDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).sesConn + + log.Printf("[DEBUG] SES Delete Receipt Rule Set: %s", d.Id()) + _, err := conn.DeleteReceiptRuleSet(&ses.DeleteReceiptRuleSetInput{ + RuleSetName: aws.String(d.Id()), + }) + + if err != nil { + return err + } + + return nil +} + +func findRuleSet(name string, token *string, meta interface{}) (bool, error) { + conn := meta.(*AWSClient).sesConn + + ruleSetExists := false + + listOpts := &ses.ListReceiptRuleSetsInput{ + NextToken: token, + } + + response, err := conn.ListReceiptRuleSets(listOpts) + for _, element := range response.RuleSets { + if *element.Name == name { + ruleSetExists = true + } + } + + if err != nil && !ruleSetExists && response.NextToken != nil { + ruleSetExists, err = findRuleSet(name, response.NextToken, meta) + } + + if err != nil { + return false, err + } + + return ruleSetExists, nil +} diff --git a/builtin/providers/aws/resource_aws_ses_receipt_rule_set_test.go b/builtin/providers/aws/resource_aws_ses_receipt_rule_set_test.go new file mode 100644 index 000000000..8fe767cfc --- /dev/null +++ b/builtin/providers/aws/resource_aws_ses_receipt_rule_set_test.go @@ -0,0 +1,91 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ses" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSSESReceiptRuleSet_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckSESReceiptRuleSetDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSSESReceiptRuleSetConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsSESReceiptRuleSetExists("aws_ses_receipt_rule_set.test"), + ), + }, + }, + }) +} + +func testAccCheckSESReceiptRuleSetDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).sesConn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_ses_receipt_rule_set" { + continue + } + + params := &ses.DescribeReceiptRuleSetInput{ + RuleSetName: aws.String("just-a-test"), + } + + _, err := conn.DescribeReceiptRuleSet(params) + if err == nil { + return fmt.Errorf("Receipt rule set %s still exists. Failing!", rs.Primary.ID) + } + + // Verify the error is what we want + _, ok := err.(awserr.Error) + if !ok { + return err + } + + } + + return nil + +} + +func testAccCheckAwsSESReceiptRuleSetExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("SES Receipt Rule Set not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("SES Receipt Rule Set name not set") + } + + conn := testAccProvider.Meta().(*AWSClient).sesConn + + params := &ses.DescribeReceiptRuleSetInput{ + RuleSetName: aws.String("just-a-test"), + } + + _, err := conn.DescribeReceiptRuleSet(params) + if err != nil { + return err + } + + return nil + } +} + +const testAccAWSSESReceiptRuleSetConfig = ` +resource "aws_ses_receipt_rule_set" "test" { + rule_set_name = "just-a-test" +} +` diff --git a/builtin/providers/aws/resource_aws_ses_receipt_rule_test.go b/builtin/providers/aws/resource_aws_ses_receipt_rule_test.go new file mode 100644 index 000000000..4c24a4976 --- /dev/null +++ b/builtin/providers/aws/resource_aws_ses_receipt_rule_test.go @@ -0,0 +1,292 @@ +package aws + +import ( + "fmt" + "reflect" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ses" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSSESReceiptRule_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckSESReceiptRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSSESReceiptRuleBasicConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsSESReceiptRuleExists("aws_ses_receipt_rule.basic"), + ), + }, + }, + }) +} + +func TestAccAWSSESReceiptRule_order(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckSESReceiptRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSSESReceiptRuleOrderConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsSESReceiptRuleOrder("aws_ses_receipt_rule.second"), + ), + }, + }, + }) +} + +func TestAccAWSSESReceiptRule_actions(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckSESReceiptRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSSESReceiptRuleActionsConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsSESReceiptRuleActions("aws_ses_receipt_rule.actions"), + ), + }, + }, + }) +} + +func testAccCheckSESReceiptRuleDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).sesConn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_ses_receipt_rule" { + continue + } + + params := &ses.DescribeReceiptRuleInput{ + RuleName: aws.String(rs.Primary.Attributes["name"]), + RuleSetName: aws.String(rs.Primary.Attributes["rule_set_name"]), + } + + _, err := conn.DescribeReceiptRule(params) + if err == nil { + return fmt.Errorf("Receipt rule %s still exists. Failing!", rs.Primary.ID) + } + + // Verify the error is what we want + _, ok := err.(awserr.Error) + if !ok { + return err + } + + } + + return nil + +} + +func testAccCheckAwsSESReceiptRuleExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("SES Receipt Rule not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("SES Receipt Rule name not set") + } + + conn := testAccProvider.Meta().(*AWSClient).sesConn + + params := &ses.DescribeReceiptRuleInput{ + RuleName: aws.String("basic"), + RuleSetName: aws.String("test-me"), + } + + response, err := conn.DescribeReceiptRule(params) + if err != nil { + return err + } + + if !*response.Rule.Enabled { + return fmt.Errorf("Enabled (%s) was not set to true", *response.Rule.Enabled) + } + + if !reflect.DeepEqual(response.Rule.Recipients, []*string{aws.String("test@example.com")}) { + return fmt.Errorf("Recipients (%v) was not set to [test@example.com]", response.Rule.Recipients) + } + + if !*response.Rule.ScanEnabled { + return fmt.Errorf("ScanEnabled (%s) was not set to true", *response.Rule.ScanEnabled) + } + + if *response.Rule.TlsPolicy != "Require" { + return fmt.Errorf("TLS Policy (%s) was not set to Require", *response.Rule.TlsPolicy) + } + + return nil + } +} + +func testAccCheckAwsSESReceiptRuleOrder(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("SES Receipt Rule not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("SES Receipt Rule name not set") + } + + conn := testAccProvider.Meta().(*AWSClient).sesConn + + params := &ses.DescribeReceiptRuleSetInput{ + RuleSetName: aws.String("test-me"), + } + + response, err := conn.DescribeReceiptRuleSet(params) + if err != nil { + return err + } + + if len(response.Rules) != 2 { + return fmt.Errorf("Number of rules (%s) was not equal to 2", len(response.Rules)) + } else if *response.Rules[0].Name != "first" || *response.Rules[1].Name != "second" { + return fmt.Errorf("Order of rules (%v) was incorrect", response.Rules) + } + + return nil + } +} + +func testAccCheckAwsSESReceiptRuleActions(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("SES Receipt Rule not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("SES Receipt Rule name not set") + } + + conn := testAccProvider.Meta().(*AWSClient).sesConn + + params := &ses.DescribeReceiptRuleInput{ + RuleName: aws.String("actions"), + RuleSetName: aws.String("test-me"), + } + + response, err := conn.DescribeReceiptRule(params) + if err != nil { + return err + } + + actions := response.Rule.Actions + + if len(actions) != 3 { + return fmt.Errorf("Number of rules (%d) was not equal to 3", len(actions)) + } + + addHeaderAction := actions[0].AddHeaderAction + if *addHeaderAction.HeaderName != "Another-Header" { + return fmt.Errorf("Header Name (%s) was not equal to Another-Header", *addHeaderAction.HeaderName) + } + + if *addHeaderAction.HeaderValue != "First" { + return fmt.Errorf("Header Value (%s) was not equal to First", *addHeaderAction.HeaderValue) + } + + secondAddHeaderAction := actions[1].AddHeaderAction + if *secondAddHeaderAction.HeaderName != "Added-By" { + return fmt.Errorf("Header Name (%s) was not equal to Added-By", *secondAddHeaderAction.HeaderName) + } + + if *secondAddHeaderAction.HeaderValue != "Terraform" { + return fmt.Errorf("Header Value (%s) was not equal to Terraform", *secondAddHeaderAction.HeaderValue) + } + + stopAction := actions[2].StopAction + if *stopAction.Scope != "RuleSet" { + return fmt.Errorf("Scope (%s) was not equal to RuleSet", *stopAction.Scope) + } + + return nil + } +} + +const testAccAWSSESReceiptRuleBasicConfig = ` +resource "aws_ses_receipt_rule_set" "test" { + rule_set_name = "test-me" +} + +resource "aws_ses_receipt_rule" "basic" { + name = "basic" + rule_set_name = "${aws_ses_receipt_rule_set.test.rule_set_name}" + recipients = ["test@example.com"] + enabled = true + scan_enabled = true + tls_policy = "Require" +} +` + +const testAccAWSSESReceiptRuleOrderConfig = ` +resource "aws_ses_receipt_rule_set" "test" { + rule_set_name = "test-me" +} + +resource "aws_ses_receipt_rule" "second" { + name = "second" + rule_set_name = "${aws_ses_receipt_rule_set.test.rule_set_name}" + after = "${aws_ses_receipt_rule.first.name}" +} + +resource "aws_ses_receipt_rule" "first" { + name = "first" + rule_set_name = "${aws_ses_receipt_rule_set.test.rule_set_name}" +} +` + +const testAccAWSSESReceiptRuleActionsConfig = ` +resource "aws_s3_bucket" "emails" { + bucket = "ses-terraform-emails" +} + +resource "aws_ses_receipt_rule_set" "test" { + rule_set_name = "test-me" +} + +resource "aws_ses_receipt_rule" "actions" { + name = "actions" + rule_set_name = "${aws_ses_receipt_rule_set.test.rule_set_name}" + + add_header_action { + header_name = "Added-By" + header_value = "Terraform" + position = 1 + } + + add_header_action { + header_name = "Another-Header" + header_value = "First" + position = 0 + } + + stop_action { + scope = "RuleSet" + position = 2 + } +} +` diff --git a/vendor/github.com/aws/aws-sdk-go/service/ses/api.go b/vendor/github.com/aws/aws-sdk-go/service/ses/api.go new file mode 100644 index 000000000..814640917 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ses/api.go @@ -0,0 +1,3984 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package ses provides a client for Amazon Simple Email Service. +package ses + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +const opCloneReceiptRuleSet = "CloneReceiptRuleSet" + +// CloneReceiptRuleSetRequest generates a request for the CloneReceiptRuleSet operation. +func (c *SES) CloneReceiptRuleSetRequest(input *CloneReceiptRuleSetInput) (req *request.Request, output *CloneReceiptRuleSetOutput) { + op := &request.Operation{ + Name: opCloneReceiptRuleSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CloneReceiptRuleSetInput{} + } + + req = c.newRequest(op, input, output) + output = &CloneReceiptRuleSetOutput{} + req.Data = output + return +} + +// Creates a receipt rule set by cloning an existing one. All receipt rules +// and configurations are copied to the new receipt rule set and are completely +// independent of the source rule set. +// +// For information about setting up rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rule-set.html). +// +// This action is throttled at one request per second. +func (c *SES) CloneReceiptRuleSet(input *CloneReceiptRuleSetInput) (*CloneReceiptRuleSetOutput, error) { + req, out := c.CloneReceiptRuleSetRequest(input) + err := req.Send() + return out, err +} + +const opCreateReceiptFilter = "CreateReceiptFilter" + +// CreateReceiptFilterRequest generates a request for the CreateReceiptFilter operation. +func (c *SES) CreateReceiptFilterRequest(input *CreateReceiptFilterInput) (req *request.Request, output *CreateReceiptFilterOutput) { + op := &request.Operation{ + Name: opCreateReceiptFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateReceiptFilterInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateReceiptFilterOutput{} + req.Data = output + return +} + +// Creates a new IP address filter. +// +// For information about setting up IP address filters, see the Amazon SES +// Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-ip-filters.html). +// +// This action is throttled at one request per second. +func (c *SES) CreateReceiptFilter(input *CreateReceiptFilterInput) (*CreateReceiptFilterOutput, error) { + req, out := c.CreateReceiptFilterRequest(input) + err := req.Send() + return out, err +} + +const opCreateReceiptRule = "CreateReceiptRule" + +// CreateReceiptRuleRequest generates a request for the CreateReceiptRule operation. +func (c *SES) CreateReceiptRuleRequest(input *CreateReceiptRuleInput) (req *request.Request, output *CreateReceiptRuleOutput) { + op := &request.Operation{ + Name: opCreateReceiptRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateReceiptRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateReceiptRuleOutput{} + req.Data = output + return +} + +// Creates a receipt rule. +// +// For information about setting up receipt rules, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rules.html). +// +// This action is throttled at one request per second. +func (c *SES) CreateReceiptRule(input *CreateReceiptRuleInput) (*CreateReceiptRuleOutput, error) { + req, out := c.CreateReceiptRuleRequest(input) + err := req.Send() + return out, err +} + +const opCreateReceiptRuleSet = "CreateReceiptRuleSet" + +// CreateReceiptRuleSetRequest generates a request for the CreateReceiptRuleSet operation. +func (c *SES) CreateReceiptRuleSetRequest(input *CreateReceiptRuleSetInput) (req *request.Request, output *CreateReceiptRuleSetOutput) { + op := &request.Operation{ + Name: opCreateReceiptRuleSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateReceiptRuleSetInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateReceiptRuleSetOutput{} + req.Data = output + return +} + +// Creates an empty receipt rule set. +// +// For information about setting up receipt rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rule-set.html). +// +// This action is throttled at one request per second. +func (c *SES) CreateReceiptRuleSet(input *CreateReceiptRuleSetInput) (*CreateReceiptRuleSetOutput, error) { + req, out := c.CreateReceiptRuleSetRequest(input) + err := req.Send() + return out, err +} + +const opDeleteIdentity = "DeleteIdentity" + +// DeleteIdentityRequest generates a request for the DeleteIdentity operation. +func (c *SES) DeleteIdentityRequest(input *DeleteIdentityInput) (req *request.Request, output *DeleteIdentityOutput) { + op := &request.Operation{ + Name: opDeleteIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteIdentityInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteIdentityOutput{} + req.Data = output + return +} + +// Deletes the specified identity (email address or domain) from the list of +// verified identities. +// +// This action is throttled at one request per second. +func (c *SES) DeleteIdentity(input *DeleteIdentityInput) (*DeleteIdentityOutput, error) { + req, out := c.DeleteIdentityRequest(input) + err := req.Send() + return out, err +} + +const opDeleteIdentityPolicy = "DeleteIdentityPolicy" + +// DeleteIdentityPolicyRequest generates a request for the DeleteIdentityPolicy operation. +func (c *SES) DeleteIdentityPolicyRequest(input *DeleteIdentityPolicyInput) (req *request.Request, output *DeleteIdentityPolicyOutput) { + op := &request.Operation{ + Name: opDeleteIdentityPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteIdentityPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteIdentityPolicyOutput{} + req.Data = output + return +} + +// Deletes the specified sending authorization policy for the given identity +// (email address or domain). This API returns successfully even if a policy +// with the specified name does not exist. +// +// This API is for the identity owner only. If you have not verified the identity, +// this API will return an error. Sending authorization is a feature that enables +// an identity owner to authorize other senders to use its identities. For information +// about using sending authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +// +// This action is throttled at one request per second. +func (c *SES) DeleteIdentityPolicy(input *DeleteIdentityPolicyInput) (*DeleteIdentityPolicyOutput, error) { + req, out := c.DeleteIdentityPolicyRequest(input) + err := req.Send() + return out, err +} + +const opDeleteReceiptFilter = "DeleteReceiptFilter" + +// DeleteReceiptFilterRequest generates a request for the DeleteReceiptFilter operation. +func (c *SES) DeleteReceiptFilterRequest(input *DeleteReceiptFilterInput) (req *request.Request, output *DeleteReceiptFilterOutput) { + op := &request.Operation{ + Name: opDeleteReceiptFilter, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteReceiptFilterInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteReceiptFilterOutput{} + req.Data = output + return +} + +// Deletes the specified IP address filter. +// +// For information about managing IP address filters, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-ip-filters.html). +// +// This action is throttled at one request per second. +func (c *SES) DeleteReceiptFilter(input *DeleteReceiptFilterInput) (*DeleteReceiptFilterOutput, error) { + req, out := c.DeleteReceiptFilterRequest(input) + err := req.Send() + return out, err +} + +const opDeleteReceiptRule = "DeleteReceiptRule" + +// DeleteReceiptRuleRequest generates a request for the DeleteReceiptRule operation. +func (c *SES) DeleteReceiptRuleRequest(input *DeleteReceiptRuleInput) (req *request.Request, output *DeleteReceiptRuleOutput) { + op := &request.Operation{ + Name: opDeleteReceiptRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteReceiptRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteReceiptRuleOutput{} + req.Data = output + return +} + +// Deletes the specified receipt rule. +// +// For information about managing receipt rules, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rules.html). +// +// This action is throttled at one request per second. +func (c *SES) DeleteReceiptRule(input *DeleteReceiptRuleInput) (*DeleteReceiptRuleOutput, error) { + req, out := c.DeleteReceiptRuleRequest(input) + err := req.Send() + return out, err +} + +const opDeleteReceiptRuleSet = "DeleteReceiptRuleSet" + +// DeleteReceiptRuleSetRequest generates a request for the DeleteReceiptRuleSet operation. +func (c *SES) DeleteReceiptRuleSetRequest(input *DeleteReceiptRuleSetInput) (req *request.Request, output *DeleteReceiptRuleSetOutput) { + op := &request.Operation{ + Name: opDeleteReceiptRuleSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteReceiptRuleSetInput{} + } + + req = c.newRequest(op, input, output) + output = &DeleteReceiptRuleSetOutput{} + req.Data = output + return +} + +// Deletes the specified receipt rule set and all of the receipt rules it contains. +// +// The currently active rule set cannot be deleted. For information about managing +// receipt rule sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). +// +// This action is throttled at one request per second. +func (c *SES) DeleteReceiptRuleSet(input *DeleteReceiptRuleSetInput) (*DeleteReceiptRuleSetOutput, error) { + req, out := c.DeleteReceiptRuleSetRequest(input) + err := req.Send() + return out, err +} + +const opDeleteVerifiedEmailAddress = "DeleteVerifiedEmailAddress" + +// DeleteVerifiedEmailAddressRequest generates a request for the DeleteVerifiedEmailAddress operation. +func (c *SES) DeleteVerifiedEmailAddressRequest(input *DeleteVerifiedEmailAddressInput) (req *request.Request, output *DeleteVerifiedEmailAddressOutput) { + op := &request.Operation{ + Name: opDeleteVerifiedEmailAddress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVerifiedEmailAddressInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteVerifiedEmailAddressOutput{} + req.Data = output + return +} + +// Deletes the specified email address from the list of verified addresses. +// +// The DeleteVerifiedEmailAddress action is deprecated as of the May 15, 2012 +// release of Domain Verification. The DeleteIdentity action is now preferred. +// This action is throttled at one request per second. +func (c *SES) DeleteVerifiedEmailAddress(input *DeleteVerifiedEmailAddressInput) (*DeleteVerifiedEmailAddressOutput, error) { + req, out := c.DeleteVerifiedEmailAddressRequest(input) + err := req.Send() + return out, err +} + +const opDescribeActiveReceiptRuleSet = "DescribeActiveReceiptRuleSet" + +// DescribeActiveReceiptRuleSetRequest generates a request for the DescribeActiveReceiptRuleSet operation. +func (c *SES) DescribeActiveReceiptRuleSetRequest(input *DescribeActiveReceiptRuleSetInput) (req *request.Request, output *DescribeActiveReceiptRuleSetOutput) { + op := &request.Operation{ + Name: opDescribeActiveReceiptRuleSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeActiveReceiptRuleSetInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeActiveReceiptRuleSetOutput{} + req.Data = output + return +} + +// Returns the metadata and receipt rules for the receipt rule set that is currently +// active. +// +// For information about setting up receipt rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rule-set.html). +// +// This action is throttled at one request per second. +func (c *SES) DescribeActiveReceiptRuleSet(input *DescribeActiveReceiptRuleSetInput) (*DescribeActiveReceiptRuleSetOutput, error) { + req, out := c.DescribeActiveReceiptRuleSetRequest(input) + err := req.Send() + return out, err +} + +const opDescribeReceiptRule = "DescribeReceiptRule" + +// DescribeReceiptRuleRequest generates a request for the DescribeReceiptRule operation. +func (c *SES) DescribeReceiptRuleRequest(input *DescribeReceiptRuleInput) (req *request.Request, output *DescribeReceiptRuleOutput) { + op := &request.Operation{ + Name: opDescribeReceiptRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeReceiptRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReceiptRuleOutput{} + req.Data = output + return +} + +// Returns the details of the specified receipt rule. +// +// For information about setting up receipt rules, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rules.html). +// +// This action is throttled at one request per second. +func (c *SES) DescribeReceiptRule(input *DescribeReceiptRuleInput) (*DescribeReceiptRuleOutput, error) { + req, out := c.DescribeReceiptRuleRequest(input) + err := req.Send() + return out, err +} + +const opDescribeReceiptRuleSet = "DescribeReceiptRuleSet" + +// DescribeReceiptRuleSetRequest generates a request for the DescribeReceiptRuleSet operation. +func (c *SES) DescribeReceiptRuleSetRequest(input *DescribeReceiptRuleSetInput) (req *request.Request, output *DescribeReceiptRuleSetOutput) { + op := &request.Operation{ + Name: opDescribeReceiptRuleSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeReceiptRuleSetInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeReceiptRuleSetOutput{} + req.Data = output + return +} + +// Returns the details of the specified receipt rule set. +// +// For information about managing receipt rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). +// +// This action is throttled at one request per second. +func (c *SES) DescribeReceiptRuleSet(input *DescribeReceiptRuleSetInput) (*DescribeReceiptRuleSetOutput, error) { + req, out := c.DescribeReceiptRuleSetRequest(input) + err := req.Send() + return out, err +} + +const opGetIdentityDkimAttributes = "GetIdentityDkimAttributes" + +// GetIdentityDkimAttributesRequest generates a request for the GetIdentityDkimAttributes operation. +func (c *SES) GetIdentityDkimAttributesRequest(input *GetIdentityDkimAttributesInput) (req *request.Request, output *GetIdentityDkimAttributesOutput) { + op := &request.Operation{ + Name: opGetIdentityDkimAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetIdentityDkimAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetIdentityDkimAttributesOutput{} + req.Data = output + return +} + +// Returns the current status of Easy DKIM signing for an entity. For domain +// name identities, this action also returns the DKIM tokens that are required +// for Easy DKIM signing, and whether Amazon SES has successfully verified that +// these tokens have been published. +// +// This action takes a list of identities as input and returns the following +// information for each: +// +// Whether Easy DKIM signing is enabled or disabled. A set of DKIM tokens +// that represent the identity. If the identity is an email address, the tokens +// represent the domain of that address. Whether Amazon SES has successfully +// verified the DKIM tokens published in the domain's DNS. This information +// is only returned for domain name identities, not for email addresses. This +// action is throttled at one request per second and can only get DKIM attributes +// for up to 100 identities at a time. +// +// For more information about creating DNS records using DKIM tokens, go to +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim-dns-records.html). +func (c *SES) GetIdentityDkimAttributes(input *GetIdentityDkimAttributesInput) (*GetIdentityDkimAttributesOutput, error) { + req, out := c.GetIdentityDkimAttributesRequest(input) + err := req.Send() + return out, err +} + +const opGetIdentityNotificationAttributes = "GetIdentityNotificationAttributes" + +// GetIdentityNotificationAttributesRequest generates a request for the GetIdentityNotificationAttributes operation. +func (c *SES) GetIdentityNotificationAttributesRequest(input *GetIdentityNotificationAttributesInput) (req *request.Request, output *GetIdentityNotificationAttributesOutput) { + op := &request.Operation{ + Name: opGetIdentityNotificationAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetIdentityNotificationAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetIdentityNotificationAttributesOutput{} + req.Data = output + return +} + +// Given a list of verified identities (email addresses and/or domains), returns +// a structure describing identity notification attributes. +// +// This action is throttled at one request per second and can only get notification +// attributes for up to 100 identities at a time. +// +// For more information about using notifications with Amazon SES, see the +// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications.html). +func (c *SES) GetIdentityNotificationAttributes(input *GetIdentityNotificationAttributesInput) (*GetIdentityNotificationAttributesOutput, error) { + req, out := c.GetIdentityNotificationAttributesRequest(input) + err := req.Send() + return out, err +} + +const opGetIdentityPolicies = "GetIdentityPolicies" + +// GetIdentityPoliciesRequest generates a request for the GetIdentityPolicies operation. +func (c *SES) GetIdentityPoliciesRequest(input *GetIdentityPoliciesInput) (req *request.Request, output *GetIdentityPoliciesOutput) { + op := &request.Operation{ + Name: opGetIdentityPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetIdentityPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetIdentityPoliciesOutput{} + req.Data = output + return +} + +// Returns the requested sending authorization policies for the given identity +// (email address or domain). The policies are returned as a map of policy names +// to policy contents. You can retrieve a maximum of 20 policies at a time. +// +// This API is for the identity owner only. If you have not verified the identity, +// this API will return an error. Sending authorization is a feature that enables +// an identity owner to authorize other senders to use its identities. For information +// about using sending authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +// +// This action is throttled at one request per second. +func (c *SES) GetIdentityPolicies(input *GetIdentityPoliciesInput) (*GetIdentityPoliciesOutput, error) { + req, out := c.GetIdentityPoliciesRequest(input) + err := req.Send() + return out, err +} + +const opGetIdentityVerificationAttributes = "GetIdentityVerificationAttributes" + +// GetIdentityVerificationAttributesRequest generates a request for the GetIdentityVerificationAttributes operation. +func (c *SES) GetIdentityVerificationAttributesRequest(input *GetIdentityVerificationAttributesInput) (req *request.Request, output *GetIdentityVerificationAttributesOutput) { + op := &request.Operation{ + Name: opGetIdentityVerificationAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetIdentityVerificationAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetIdentityVerificationAttributesOutput{} + req.Data = output + return +} + +// Given a list of identities (email addresses and/or domains), returns the +// verification status and (for domain identities) the verification token for +// each identity. +// +// This action is throttled at one request per second and can only get verification +// attributes for up to 100 identities at a time. +func (c *SES) GetIdentityVerificationAttributes(input *GetIdentityVerificationAttributesInput) (*GetIdentityVerificationAttributesOutput, error) { + req, out := c.GetIdentityVerificationAttributesRequest(input) + err := req.Send() + return out, err +} + +const opGetSendQuota = "GetSendQuota" + +// GetSendQuotaRequest generates a request for the GetSendQuota operation. +func (c *SES) GetSendQuotaRequest(input *GetSendQuotaInput) (req *request.Request, output *GetSendQuotaOutput) { + op := &request.Operation{ + Name: opGetSendQuota, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSendQuotaInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSendQuotaOutput{} + req.Data = output + return +} + +// Returns the user's current sending limits. +// +// This action is throttled at one request per second. +func (c *SES) GetSendQuota(input *GetSendQuotaInput) (*GetSendQuotaOutput, error) { + req, out := c.GetSendQuotaRequest(input) + err := req.Send() + return out, err +} + +const opGetSendStatistics = "GetSendStatistics" + +// GetSendStatisticsRequest generates a request for the GetSendStatistics operation. +func (c *SES) GetSendStatisticsRequest(input *GetSendStatisticsInput) (req *request.Request, output *GetSendStatisticsOutput) { + op := &request.Operation{ + Name: opGetSendStatistics, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSendStatisticsInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSendStatisticsOutput{} + req.Data = output + return +} + +// Returns the user's sending statistics. The result is a list of data points, +// representing the last two weeks of sending activity. +// +// Each data point in the list contains statistics for a 15-minute interval. +// +// This action is throttled at one request per second. +func (c *SES) GetSendStatistics(input *GetSendStatisticsInput) (*GetSendStatisticsOutput, error) { + req, out := c.GetSendStatisticsRequest(input) + err := req.Send() + return out, err +} + +const opListIdentities = "ListIdentities" + +// ListIdentitiesRequest generates a request for the ListIdentities operation. +func (c *SES) ListIdentitiesRequest(input *ListIdentitiesInput) (req *request.Request, output *ListIdentitiesOutput) { + op := &request.Operation{ + Name: opListIdentities, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxItems", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListIdentitiesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListIdentitiesOutput{} + req.Data = output + return +} + +// Returns a list containing all of the identities (email addresses and domains) +// for a specific AWS Account, regardless of verification status. +// +// This action is throttled at one request per second. +func (c *SES) ListIdentities(input *ListIdentitiesInput) (*ListIdentitiesOutput, error) { + req, out := c.ListIdentitiesRequest(input) + err := req.Send() + return out, err +} + +func (c *SES) ListIdentitiesPages(input *ListIdentitiesInput, fn func(p *ListIdentitiesOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListIdentitiesRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListIdentitiesOutput), lastPage) + }) +} + +const opListIdentityPolicies = "ListIdentityPolicies" + +// ListIdentityPoliciesRequest generates a request for the ListIdentityPolicies operation. +func (c *SES) ListIdentityPoliciesRequest(input *ListIdentityPoliciesInput) (req *request.Request, output *ListIdentityPoliciesOutput) { + op := &request.Operation{ + Name: opListIdentityPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListIdentityPoliciesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListIdentityPoliciesOutput{} + req.Data = output + return +} + +// Returns a list of sending authorization policies that are attached to the +// given identity (email address or domain). This API returns only a list. If +// you want the actual policy content, you can use GetIdentityPolicies. +// +// This API is for the identity owner only. If you have not verified the identity, +// this API will return an error. Sending authorization is a feature that enables +// an identity owner to authorize other senders to use its identities. For information +// about using sending authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +// +// This action is throttled at one request per second. +func (c *SES) ListIdentityPolicies(input *ListIdentityPoliciesInput) (*ListIdentityPoliciesOutput, error) { + req, out := c.ListIdentityPoliciesRequest(input) + err := req.Send() + return out, err +} + +const opListReceiptFilters = "ListReceiptFilters" + +// ListReceiptFiltersRequest generates a request for the ListReceiptFilters operation. +func (c *SES) ListReceiptFiltersRequest(input *ListReceiptFiltersInput) (req *request.Request, output *ListReceiptFiltersOutput) { + op := &request.Operation{ + Name: opListReceiptFilters, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListReceiptFiltersInput{} + } + + req = c.newRequest(op, input, output) + output = &ListReceiptFiltersOutput{} + req.Data = output + return +} + +// Lists the IP address filters associated with your account. +// +// For information about managing IP address filters, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-ip-filters.html). +// +// This action is throttled at one request per second. +func (c *SES) ListReceiptFilters(input *ListReceiptFiltersInput) (*ListReceiptFiltersOutput, error) { + req, out := c.ListReceiptFiltersRequest(input) + err := req.Send() + return out, err +} + +const opListReceiptRuleSets = "ListReceiptRuleSets" + +// ListReceiptRuleSetsRequest generates a request for the ListReceiptRuleSets operation. +func (c *SES) ListReceiptRuleSetsRequest(input *ListReceiptRuleSetsInput) (req *request.Request, output *ListReceiptRuleSetsOutput) { + op := &request.Operation{ + Name: opListReceiptRuleSets, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListReceiptRuleSetsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListReceiptRuleSetsOutput{} + req.Data = output + return +} + +// Lists the receipt rule sets that exist under your AWS account. If there are +// additional receipt rule sets to be retrieved, you will receive a NextToken +// that you can provide to the next call to ListReceiptRuleSets to retrieve +// the additional entries. +// +// For information about managing receipt rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). +// +// This action is throttled at one request per second. +func (c *SES) ListReceiptRuleSets(input *ListReceiptRuleSetsInput) (*ListReceiptRuleSetsOutput, error) { + req, out := c.ListReceiptRuleSetsRequest(input) + err := req.Send() + return out, err +} + +const opListVerifiedEmailAddresses = "ListVerifiedEmailAddresses" + +// ListVerifiedEmailAddressesRequest generates a request for the ListVerifiedEmailAddresses operation. +func (c *SES) ListVerifiedEmailAddressesRequest(input *ListVerifiedEmailAddressesInput) (req *request.Request, output *ListVerifiedEmailAddressesOutput) { + op := &request.Operation{ + Name: opListVerifiedEmailAddresses, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListVerifiedEmailAddressesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListVerifiedEmailAddressesOutput{} + req.Data = output + return +} + +// Returns a list containing all of the email addresses that have been verified. +// +// The ListVerifiedEmailAddresses action is deprecated as of the May 15, 2012 +// release of Domain Verification. The ListIdentities action is now preferred. +// This action is throttled at one request per second. +func (c *SES) ListVerifiedEmailAddresses(input *ListVerifiedEmailAddressesInput) (*ListVerifiedEmailAddressesOutput, error) { + req, out := c.ListVerifiedEmailAddressesRequest(input) + err := req.Send() + return out, err +} + +const opPutIdentityPolicy = "PutIdentityPolicy" + +// PutIdentityPolicyRequest generates a request for the PutIdentityPolicy operation. +func (c *SES) PutIdentityPolicyRequest(input *PutIdentityPolicyInput) (req *request.Request, output *PutIdentityPolicyOutput) { + op := &request.Operation{ + Name: opPutIdentityPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutIdentityPolicyInput{} + } + + req = c.newRequest(op, input, output) + output = &PutIdentityPolicyOutput{} + req.Data = output + return +} + +// Adds or updates a sending authorization policy for the specified identity +// (email address or domain). +// +// This API is for the identity owner only. If you have not verified the identity, +// this API will return an error. Sending authorization is a feature that enables +// an identity owner to authorize other senders to use its identities. For information +// about using sending authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +// +// This action is throttled at one request per second. +func (c *SES) PutIdentityPolicy(input *PutIdentityPolicyInput) (*PutIdentityPolicyOutput, error) { + req, out := c.PutIdentityPolicyRequest(input) + err := req.Send() + return out, err +} + +const opReorderReceiptRuleSet = "ReorderReceiptRuleSet" + +// ReorderReceiptRuleSetRequest generates a request for the ReorderReceiptRuleSet operation. +func (c *SES) ReorderReceiptRuleSetRequest(input *ReorderReceiptRuleSetInput) (req *request.Request, output *ReorderReceiptRuleSetOutput) { + op := &request.Operation{ + Name: opReorderReceiptRuleSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ReorderReceiptRuleSetInput{} + } + + req = c.newRequest(op, input, output) + output = &ReorderReceiptRuleSetOutput{} + req.Data = output + return +} + +// Reorders the receipt rules within a receipt rule set. +// +// All of the rules in the rule set must be represented in this request. That +// is, this API will return an error if the reorder request doesn’t explicitly +// position all of the rules. For information about managing receipt rule sets, +// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). +// +// This action is throttled at one request per second. +func (c *SES) ReorderReceiptRuleSet(input *ReorderReceiptRuleSetInput) (*ReorderReceiptRuleSetOutput, error) { + req, out := c.ReorderReceiptRuleSetRequest(input) + err := req.Send() + return out, err +} + +const opSendBounce = "SendBounce" + +// SendBounceRequest generates a request for the SendBounce operation. +func (c *SES) SendBounceRequest(input *SendBounceInput) (req *request.Request, output *SendBounceOutput) { + op := &request.Operation{ + Name: opSendBounce, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SendBounceInput{} + } + + req = c.newRequest(op, input, output) + output = &SendBounceOutput{} + req.Data = output + return +} + +// Generates and sends a bounce message to the sender of an email you received +// through Amazon SES. You can only use this API on an email up to 24 hours +// after you receive it. +// +// You cannot use this API to send generic bounces for mail that was not received +// by Amazon SES. For information about receiving email through Amazon SES, +// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email.html). +// +// This action is throttled at one request per second. +func (c *SES) SendBounce(input *SendBounceInput) (*SendBounceOutput, error) { + req, out := c.SendBounceRequest(input) + err := req.Send() + return out, err +} + +const opSendEmail = "SendEmail" + +// SendEmailRequest generates a request for the SendEmail operation. +func (c *SES) SendEmailRequest(input *SendEmailInput) (req *request.Request, output *SendEmailOutput) { + op := &request.Operation{ + Name: opSendEmail, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SendEmailInput{} + } + + req = c.newRequest(op, input, output) + output = &SendEmailOutput{} + req.Data = output + return +} + +// Composes an email message based on input data, and then immediately queues +// the message for sending. +// +// There are several important points to know about SendEmail: +// +// You can only send email from verified email addresses and domains; otherwise, +// you will get an "Email address not verified" error. If your account is still +// in the Amazon SES sandbox, you must also verify every recipient email address +// except for the recipients provided by the Amazon SES mailbox simulator. For +// more information, go to the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html). +// The total size of the message cannot exceed 10 MB. This includes any attachments +// that are part of the message. Amazon SES has a limit on the total number +// of recipients per message. The combined number of To:, CC: and BCC: email +// addresses cannot exceed 50. If you need to send an email message to a larger +// audience, you can divide your recipient list into groups of 50 or fewer, +// and then call Amazon SES repeatedly to send the message to each group. For +// every message that you send, the total number of recipients (To:, CC: and +// BCC:) is counted against your sending quota - the maximum number of emails +// you can send in a 24-hour period. For information about your sending quota, +// go to the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/manage-sending-limits.html). +func (c *SES) SendEmail(input *SendEmailInput) (*SendEmailOutput, error) { + req, out := c.SendEmailRequest(input) + err := req.Send() + return out, err +} + +const opSendRawEmail = "SendRawEmail" + +// SendRawEmailRequest generates a request for the SendRawEmail operation. +func (c *SES) SendRawEmailRequest(input *SendRawEmailInput) (req *request.Request, output *SendRawEmailOutput) { + op := &request.Operation{ + Name: opSendRawEmail, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SendRawEmailInput{} + } + + req = c.newRequest(op, input, output) + output = &SendRawEmailOutput{} + req.Data = output + return +} + +// Sends an email message, with header and content specified by the client. +// The SendRawEmail action is useful for sending multipart MIME emails. The +// raw text of the message must comply with Internet email standards; otherwise, +// the message cannot be sent. +// +// There are several important points to know about SendRawEmail: +// +// You can only send email from verified email addresses and domains; otherwise, +// you will get an "Email address not verified" error. If your account is still +// in the Amazon SES sandbox, you must also verify every recipient email address +// except for the recipients provided by the Amazon SES mailbox simulator. For +// more information, go to the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html). +// The total size of the message cannot exceed 10 MB. This includes any attachments +// that are part of the message. Amazon SES has a limit on the total number +// of recipients per message. The combined number of To:, CC: and BCC: email +// addresses cannot exceed 50. If you need to send an email message to a larger +// audience, you can divide your recipient list into groups of 50 or fewer, +// and then call Amazon SES repeatedly to send the message to each group. The +// To:, CC:, and BCC: headers in the raw message can contain a group list. Note +// that each recipient in a group list counts towards the 50-recipient limit. +// For every message that you send, the total number of recipients (To:, CC: +// and BCC:) is counted against your sending quota - the maximum number of emails +// you can send in a 24-hour period. For information about your sending quota, +// go to the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/manage-sending-limits.html). +// If you are using sending authorization to send on behalf of another user, +// SendRawEmail enables you to specify the cross-account identity for the email's +// "Source," "From," and "Return-Path" parameters in one of two ways: you can +// pass optional parameters SourceArn, FromArn, and/or ReturnPathArn to the +// API, or you can include the following X-headers in the header of your raw +// email: X-SES-SOURCE-ARN X-SES-FROM-ARN X-SES-RETURN-PATH-ARN Do not include +// these X-headers in the DKIM signature, because they are removed by Amazon +// SES before sending the email. For the most common sending authorization use +// case, we recommend that you specify the SourceIdentityArn and do not specify +// either the FromIdentityArn or ReturnPathIdentityArn. (The same note applies +// to the corresponding X-headers.) If you only specify the SourceIdentityArn, +// Amazon SES will simply set the "From" address and the "Return Path" address +// to the identity specified in SourceIdentityArn. For more information about +// sending authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +func (c *SES) SendRawEmail(input *SendRawEmailInput) (*SendRawEmailOutput, error) { + req, out := c.SendRawEmailRequest(input) + err := req.Send() + return out, err +} + +const opSetActiveReceiptRuleSet = "SetActiveReceiptRuleSet" + +// SetActiveReceiptRuleSetRequest generates a request for the SetActiveReceiptRuleSet operation. +func (c *SES) SetActiveReceiptRuleSetRequest(input *SetActiveReceiptRuleSetInput) (req *request.Request, output *SetActiveReceiptRuleSetOutput) { + op := &request.Operation{ + Name: opSetActiveReceiptRuleSet, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetActiveReceiptRuleSetInput{} + } + + req = c.newRequest(op, input, output) + output = &SetActiveReceiptRuleSetOutput{} + req.Data = output + return +} + +// Sets the specified receipt rule set as the active receipt rule set. +// +// To disable your email-receiving through Amazon SES completely, you can call +// this API with RuleSetName set to null. For information about managing receipt +// rule sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). +// +// This action is throttled at one request per second. +func (c *SES) SetActiveReceiptRuleSet(input *SetActiveReceiptRuleSetInput) (*SetActiveReceiptRuleSetOutput, error) { + req, out := c.SetActiveReceiptRuleSetRequest(input) + err := req.Send() + return out, err +} + +const opSetIdentityDkimEnabled = "SetIdentityDkimEnabled" + +// SetIdentityDkimEnabledRequest generates a request for the SetIdentityDkimEnabled operation. +func (c *SES) SetIdentityDkimEnabledRequest(input *SetIdentityDkimEnabledInput) (req *request.Request, output *SetIdentityDkimEnabledOutput) { + op := &request.Operation{ + Name: opSetIdentityDkimEnabled, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetIdentityDkimEnabledInput{} + } + + req = c.newRequest(op, input, output) + output = &SetIdentityDkimEnabledOutput{} + req.Data = output + return +} + +// Enables or disables Easy DKIM signing of email sent from an identity: +// +// If Easy DKIM signing is enabled for a domain name identity (e.g., example.com), +// then Amazon SES will DKIM-sign all email sent by addresses under that domain +// name (e.g., user@example.com). If Easy DKIM signing is enabled for an email +// address, then Amazon SES will DKIM-sign all email sent by that email address. +// For email addresses (e.g., user@example.com), you can only enable Easy DKIM +// signing if the corresponding domain (e.g., example.com) has been set up for +// Easy DKIM using the AWS Console or the VerifyDomainDkim action. +// +// This action is throttled at one request per second. +// +// For more information about Easy DKIM signing, go to the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim.html). +func (c *SES) SetIdentityDkimEnabled(input *SetIdentityDkimEnabledInput) (*SetIdentityDkimEnabledOutput, error) { + req, out := c.SetIdentityDkimEnabledRequest(input) + err := req.Send() + return out, err +} + +const opSetIdentityFeedbackForwardingEnabled = "SetIdentityFeedbackForwardingEnabled" + +// SetIdentityFeedbackForwardingEnabledRequest generates a request for the SetIdentityFeedbackForwardingEnabled operation. +func (c *SES) SetIdentityFeedbackForwardingEnabledRequest(input *SetIdentityFeedbackForwardingEnabledInput) (req *request.Request, output *SetIdentityFeedbackForwardingEnabledOutput) { + op := &request.Operation{ + Name: opSetIdentityFeedbackForwardingEnabled, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetIdentityFeedbackForwardingEnabledInput{} + } + + req = c.newRequest(op, input, output) + output = &SetIdentityFeedbackForwardingEnabledOutput{} + req.Data = output + return +} + +// Given an identity (email address or domain), enables or disables whether +// Amazon SES forwards bounce and complaint notifications as email. Feedback +// forwarding can only be disabled when Amazon Simple Notification Service (Amazon +// SNS) topics are specified for both bounces and complaints. +// +// Feedback forwarding does not apply to delivery notifications. Delivery notifications +// are only available through Amazon SNS. This action is throttled at one request +// per second. +// +// For more information about using notifications with Amazon SES, see the +// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications.html). +func (c *SES) SetIdentityFeedbackForwardingEnabled(input *SetIdentityFeedbackForwardingEnabledInput) (*SetIdentityFeedbackForwardingEnabledOutput, error) { + req, out := c.SetIdentityFeedbackForwardingEnabledRequest(input) + err := req.Send() + return out, err +} + +const opSetIdentityNotificationTopic = "SetIdentityNotificationTopic" + +// SetIdentityNotificationTopicRequest generates a request for the SetIdentityNotificationTopic operation. +func (c *SES) SetIdentityNotificationTopicRequest(input *SetIdentityNotificationTopicInput) (req *request.Request, output *SetIdentityNotificationTopicOutput) { + op := &request.Operation{ + Name: opSetIdentityNotificationTopic, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetIdentityNotificationTopicInput{} + } + + req = c.newRequest(op, input, output) + output = &SetIdentityNotificationTopicOutput{} + req.Data = output + return +} + +// Given an identity (email address or domain), sets the Amazon Simple Notification +// Service (Amazon SNS) topic to which Amazon SES will publish bounce, complaint, +// and/or delivery notifications for emails sent with that identity as the Source. +// +// Unless feedback forwarding is enabled, you must specify Amazon SNS topics +// for bounce and complaint notifications. For more information, see SetIdentityFeedbackForwardingEnabled. +// This action is throttled at one request per second. +// +// For more information about feedback notification, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications.html). +func (c *SES) SetIdentityNotificationTopic(input *SetIdentityNotificationTopicInput) (*SetIdentityNotificationTopicOutput, error) { + req, out := c.SetIdentityNotificationTopicRequest(input) + err := req.Send() + return out, err +} + +const opSetReceiptRulePosition = "SetReceiptRulePosition" + +// SetReceiptRulePositionRequest generates a request for the SetReceiptRulePosition operation. +func (c *SES) SetReceiptRulePositionRequest(input *SetReceiptRulePositionInput) (req *request.Request, output *SetReceiptRulePositionOutput) { + op := &request.Operation{ + Name: opSetReceiptRulePosition, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetReceiptRulePositionInput{} + } + + req = c.newRequest(op, input, output) + output = &SetReceiptRulePositionOutput{} + req.Data = output + return +} + +// Sets the position of the specified receipt rule in the receipt rule set. +// +// For information about managing receipt rules, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rules.html). +// +// This action is throttled at one request per second. +func (c *SES) SetReceiptRulePosition(input *SetReceiptRulePositionInput) (*SetReceiptRulePositionOutput, error) { + req, out := c.SetReceiptRulePositionRequest(input) + err := req.Send() + return out, err +} + +const opUpdateReceiptRule = "UpdateReceiptRule" + +// UpdateReceiptRuleRequest generates a request for the UpdateReceiptRule operation. +func (c *SES) UpdateReceiptRuleRequest(input *UpdateReceiptRuleInput) (req *request.Request, output *UpdateReceiptRuleOutput) { + op := &request.Operation{ + Name: opUpdateReceiptRule, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateReceiptRuleInput{} + } + + req = c.newRequest(op, input, output) + output = &UpdateReceiptRuleOutput{} + req.Data = output + return +} + +// Updates a receipt rule. +// +// For information about managing receipt rules, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rules.html). +// +// This action is throttled at one request per second. +func (c *SES) UpdateReceiptRule(input *UpdateReceiptRuleInput) (*UpdateReceiptRuleOutput, error) { + req, out := c.UpdateReceiptRuleRequest(input) + err := req.Send() + return out, err +} + +const opVerifyDomainDkim = "VerifyDomainDkim" + +// VerifyDomainDkimRequest generates a request for the VerifyDomainDkim operation. +func (c *SES) VerifyDomainDkimRequest(input *VerifyDomainDkimInput) (req *request.Request, output *VerifyDomainDkimOutput) { + op := &request.Operation{ + Name: opVerifyDomainDkim, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &VerifyDomainDkimInput{} + } + + req = c.newRequest(op, input, output) + output = &VerifyDomainDkimOutput{} + req.Data = output + return +} + +// Returns a set of DKIM tokens for a domain. DKIM tokens are character strings +// that represent your domain's identity. Using these tokens, you will need +// to create DNS CNAME records that point to DKIM public keys hosted by Amazon +// SES. Amazon Web Services will eventually detect that you have updated your +// DNS records; this detection process may take up to 72 hours. Upon successful +// detection, Amazon SES will be able to DKIM-sign email originating from that +// domain. +// +// This action is throttled at one request per second. +// +// To enable or disable Easy DKIM signing for a domain, use the SetIdentityDkimEnabled +// action. +// +// For more information about creating DNS records using DKIM tokens, go to +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim-dns-records.html). +func (c *SES) VerifyDomainDkim(input *VerifyDomainDkimInput) (*VerifyDomainDkimOutput, error) { + req, out := c.VerifyDomainDkimRequest(input) + err := req.Send() + return out, err +} + +const opVerifyDomainIdentity = "VerifyDomainIdentity" + +// VerifyDomainIdentityRequest generates a request for the VerifyDomainIdentity operation. +func (c *SES) VerifyDomainIdentityRequest(input *VerifyDomainIdentityInput) (req *request.Request, output *VerifyDomainIdentityOutput) { + op := &request.Operation{ + Name: opVerifyDomainIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &VerifyDomainIdentityInput{} + } + + req = c.newRequest(op, input, output) + output = &VerifyDomainIdentityOutput{} + req.Data = output + return +} + +// Verifies a domain. +// +// This action is throttled at one request per second. +func (c *SES) VerifyDomainIdentity(input *VerifyDomainIdentityInput) (*VerifyDomainIdentityOutput, error) { + req, out := c.VerifyDomainIdentityRequest(input) + err := req.Send() + return out, err +} + +const opVerifyEmailAddress = "VerifyEmailAddress" + +// VerifyEmailAddressRequest generates a request for the VerifyEmailAddress operation. +func (c *SES) VerifyEmailAddressRequest(input *VerifyEmailAddressInput) (req *request.Request, output *VerifyEmailAddressOutput) { + op := &request.Operation{ + Name: opVerifyEmailAddress, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &VerifyEmailAddressInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &VerifyEmailAddressOutput{} + req.Data = output + return +} + +// Verifies an email address. This action causes a confirmation email message +// to be sent to the specified address. +// +// The VerifyEmailAddress action is deprecated as of the May 15, 2012 release +// of Domain Verification. The VerifyEmailIdentity action is now preferred. +// This action is throttled at one request per second. +func (c *SES) VerifyEmailAddress(input *VerifyEmailAddressInput) (*VerifyEmailAddressOutput, error) { + req, out := c.VerifyEmailAddressRequest(input) + err := req.Send() + return out, err +} + +const opVerifyEmailIdentity = "VerifyEmailIdentity" + +// VerifyEmailIdentityRequest generates a request for the VerifyEmailIdentity operation. +func (c *SES) VerifyEmailIdentityRequest(input *VerifyEmailIdentityInput) (req *request.Request, output *VerifyEmailIdentityOutput) { + op := &request.Operation{ + Name: opVerifyEmailIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &VerifyEmailIdentityInput{} + } + + req = c.newRequest(op, input, output) + output = &VerifyEmailIdentityOutput{} + req.Data = output + return +} + +// Verifies an email address. This action causes a confirmation email message +// to be sent to the specified address. +// +// This action is throttled at one request per second. +func (c *SES) VerifyEmailIdentity(input *VerifyEmailIdentityInput) (*VerifyEmailIdentityOutput, error) { + req, out := c.VerifyEmailIdentityRequest(input) + err := req.Send() + return out, err +} + +// When included in a receipt rule, this action adds a header to the received +// email. +// +// For information about adding a header using a receipt rule, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-add-header.html). +type AddHeaderAction struct { + _ struct{} `type:"structure"` + + // The name of the header to add. Must be between 1 and 50 characters, inclusive, + // and consist of alphanumeric (a-z, A-Z, 0-9) characters and dashes only. + HeaderName *string `type:"string" required:"true"` + + // Must be less than 2048 characters, and must not contain newline characters + // ("\r" or "\n"). + HeaderValue *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AddHeaderAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddHeaderAction) GoString() string { + return s.String() +} + +// Represents the body of the message. You can specify text, HTML, or both. +// If you use both, then the message should display correctly in the widest +// variety of email clients. +type Body struct { + _ struct{} `type:"structure"` + + // The content of the message, in HTML format. Use this for email clients that + // can process HTML. You can include clickable links, formatted text, and much + // more in an HTML message. + Html *Content `type:"structure"` + + // The content of the message, in text format. Use this for text-based email + // clients, or clients on high-latency networks (such as mobile devices). + Text *Content `type:"structure"` +} + +// String returns the string representation +func (s Body) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Body) GoString() string { + return s.String() +} + +// When included in a receipt rule, this action rejects the received email by +// returning a bounce response to the sender and, optionally, publishes a notification +// to Amazon Simple Notification Service (Amazon SNS). +// +// For information about sending a bounce message in response to a received +// email, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-bounce.html). +type BounceAction struct { + _ struct{} `type:"structure"` + + // Human-readable text to include in the bounce message. + Message *string `type:"string" required:"true"` + + // The email address of the sender of the bounced email. This is the address + // from which the bounce message will be sent. + Sender *string `type:"string" required:"true"` + + // The SMTP reply code, as defined by RFC 5321 (https://tools.ietf.org/html/rfc5321). + SmtpReplyCode *string `type:"string" required:"true"` + + // The SMTP enhanced status code, as defined by RFC 3463 (https://tools.ietf.org/html/rfc3463). + StatusCode *string `type:"string"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the + // bounce action is taken. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. + // For more information about Amazon SNS topics, see the Amazon SNS Developer + // Guide (http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s BounceAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BounceAction) GoString() string { + return s.String() +} + +// Recipient-related information to include in the Delivery Status Notification +// (DSN) when an email that Amazon SES receives on your behalf bounces. +// +// For information about receiving email through Amazon SES, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email.html). +type BouncedRecipientInfo struct { + _ struct{} `type:"structure"` + + // The reason for the bounce. You must provide either this parameter or RecipientDsnFields. + BounceType *string `type:"string" enum:"BounceType"` + + // The email address of the recipient of the bounced email. + Recipient *string `type:"string" required:"true"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to receive email for the recipient of the bounced email. For more information + // about sending authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + RecipientArn *string `type:"string"` + + // Recipient-related DSN fields, most of which would normally be filled in automatically + // when provided with a BounceType. You must provide either this parameter or + // BounceType. + RecipientDsnFields *RecipientDsnFields `type:"structure"` +} + +// String returns the string representation +func (s BouncedRecipientInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BouncedRecipientInfo) GoString() string { + return s.String() +} + +type CloneReceiptRuleSetInput struct { + _ struct{} `type:"structure"` + + // The name of the rule set to clone. + OriginalRuleSetName *string `type:"string" required:"true"` + + // The name of the rule set to create. The name must: + // + // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores + // (_), or dashes (-). Start and end with a letter or number. Contain less than + // 64 characters. + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CloneReceiptRuleSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloneReceiptRuleSetInput) GoString() string { + return s.String() +} + +type CloneReceiptRuleSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CloneReceiptRuleSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloneReceiptRuleSetOutput) GoString() string { + return s.String() +} + +// Represents textual data, plus an optional character set specification. +// +// By default, the text must be 7-bit ASCII, due to the constraints of the +// SMTP protocol. If the text must contain any other characters, then you must +// also specify a character set. Examples include UTF-8, ISO-8859-1, and Shift_JIS. +type Content struct { + _ struct{} `type:"structure"` + + // The character set of the content. + Charset *string `type:"string"` + + // The textual data of the content. + Data *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Content) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Content) GoString() string { + return s.String() +} + +type CreateReceiptFilterInput struct { + _ struct{} `type:"structure"` + + // A data structure that describes the IP address filter to create, which consists + // of a name, an IP address range, and whether to allow or block mail from it. + Filter *ReceiptFilter `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateReceiptFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReceiptFilterInput) GoString() string { + return s.String() +} + +type CreateReceiptFilterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateReceiptFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReceiptFilterOutput) GoString() string { + return s.String() +} + +type CreateReceiptRuleInput struct { + _ struct{} `type:"structure"` + + // The name of an existing rule after which the new rule will be placed. If + // this parameter is null, the new rule will be inserted at the beginning of + // the rule list. + After *string `type:"string"` + + // A data structure that contains the specified rule's name, actions, recipients, + // domains, enabled status, scan status, and TLS policy. + Rule *ReceiptRule `type:"structure" required:"true"` + + // The name of the rule set to which to add the rule. + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateReceiptRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReceiptRuleInput) GoString() string { + return s.String() +} + +type CreateReceiptRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateReceiptRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReceiptRuleOutput) GoString() string { + return s.String() +} + +type CreateReceiptRuleSetInput struct { + _ struct{} `type:"structure"` + + // The name of the rule set to create. The name must: + // + // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores + // (_), or dashes (-). Start and end with a letter or number. Contain less than + // 64 characters. + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateReceiptRuleSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReceiptRuleSetInput) GoString() string { + return s.String() +} + +type CreateReceiptRuleSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateReceiptRuleSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateReceiptRuleSetOutput) GoString() string { + return s.String() +} + +// Represents a request instructing the service to delete an identity from the +// list of identities for the AWS Account. +type DeleteIdentityInput struct { + _ struct{} `type:"structure"` + + // The identity to be removed from the list of identities for the AWS Account. + Identity *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIdentityInput) GoString() string { + return s.String() +} + +// An empty element. Receiving this element indicates that the request completed +// successfully. +type DeleteIdentityOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIdentityOutput) GoString() string { + return s.String() +} + +// Represents a request instructing the service to delete an authorization policy +// applying to an identity. +// +// This request succeeds regardless of whether the specified policy exists. +type DeleteIdentityPolicyInput struct { + _ struct{} `type:"structure"` + + // The identity that is associated with the policy that you want to delete. + // You can specify the identity by using its name or by using its Amazon Resource + // Name (ARN). Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com. + // + // To successfully call this API, you must own the identity. + Identity *string `type:"string" required:"true"` + + // The name of the policy to be deleted. + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteIdentityPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIdentityPolicyInput) GoString() string { + return s.String() +} + +// An empty element. Receiving this element indicates that the request completed +// successfully. +type DeleteIdentityPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteIdentityPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteIdentityPolicyOutput) GoString() string { + return s.String() +} + +type DeleteReceiptFilterInput struct { + _ struct{} `type:"structure"` + + // The name of the IP address filter to delete. + FilterName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteReceiptFilterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReceiptFilterInput) GoString() string { + return s.String() +} + +type DeleteReceiptFilterOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteReceiptFilterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReceiptFilterOutput) GoString() string { + return s.String() +} + +type DeleteReceiptRuleInput struct { + _ struct{} `type:"structure"` + + // The name of the receipt rule to delete. + RuleName *string `type:"string" required:"true"` + + // The name of the receipt rule set that contains the receipt rule to delete. + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteReceiptRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReceiptRuleInput) GoString() string { + return s.String() +} + +type DeleteReceiptRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteReceiptRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReceiptRuleOutput) GoString() string { + return s.String() +} + +type DeleteReceiptRuleSetInput struct { + _ struct{} `type:"structure"` + + // The name of the receipt rule set to delete. + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteReceiptRuleSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReceiptRuleSetInput) GoString() string { + return s.String() +} + +type DeleteReceiptRuleSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteReceiptRuleSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteReceiptRuleSetOutput) GoString() string { + return s.String() +} + +// Represents a request instructing the service to delete an address from the +// list of verified email addresses. +type DeleteVerifiedEmailAddressInput struct { + _ struct{} `type:"structure"` + + // An email address to be removed from the list of verified addresses. + EmailAddress *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVerifiedEmailAddressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVerifiedEmailAddressInput) GoString() string { + return s.String() +} + +type DeleteVerifiedEmailAddressOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVerifiedEmailAddressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteVerifiedEmailAddressOutput) GoString() string { + return s.String() +} + +type DescribeActiveReceiptRuleSetInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeActiveReceiptRuleSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeActiveReceiptRuleSetInput) GoString() string { + return s.String() +} + +type DescribeActiveReceiptRuleSetOutput struct { + _ struct{} `type:"structure"` + + // The metadata for the currently active receipt rule set. The metadata consists + // of the rule set name and a timestamp of when the rule set was created. + Metadata *ReceiptRuleSetMetadata `type:"structure"` + + // The receipt rules that belong to the active rule set. + Rules []*ReceiptRule `type:"list"` +} + +// String returns the string representation +func (s DescribeActiveReceiptRuleSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeActiveReceiptRuleSetOutput) GoString() string { + return s.String() +} + +type DescribeReceiptRuleInput struct { + _ struct{} `type:"structure"` + + // The name of the receipt rule. + RuleName *string `type:"string" required:"true"` + + // The name of the receipt rule set to which the receipt rule belongs. + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeReceiptRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReceiptRuleInput) GoString() string { + return s.String() +} + +type DescribeReceiptRuleOutput struct { + _ struct{} `type:"structure"` + + // A data structure that contains the specified receipt rule's name, actions, + // recipients, domains, enabled status, scan status, and Transport Layer Security + // (TLS) policy. + Rule *ReceiptRule `type:"structure"` +} + +// String returns the string representation +func (s DescribeReceiptRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReceiptRuleOutput) GoString() string { + return s.String() +} + +type DescribeReceiptRuleSetInput struct { + _ struct{} `type:"structure"` + + // The name of the receipt rule set to describe. + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeReceiptRuleSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReceiptRuleSetInput) GoString() string { + return s.String() +} + +type DescribeReceiptRuleSetOutput struct { + _ struct{} `type:"structure"` + + // The metadata for the receipt rule set, which consists of the rule set name + // and the timestamp of when the rule set was created. + Metadata *ReceiptRuleSetMetadata `type:"structure"` + + // A list of the receipt rules that belong to the specified receipt rule set. + Rules []*ReceiptRule `type:"list"` +} + +// String returns the string representation +func (s DescribeReceiptRuleSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeReceiptRuleSetOutput) GoString() string { + return s.String() +} + +// Represents the destination of the message, consisting of To:, CC:, and BCC: +// fields. +// +// By default, the string must be 7-bit ASCII. If the text must contain any +// other characters, then you must use MIME encoded-word syntax (RFC 2047) instead +// of a literal string. MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?=. +// For more information, see RFC 2047 (http://tools.ietf.org/html/rfc2047). +type Destination struct { + _ struct{} `type:"structure"` + + // The BCC: field(s) of the message. + BccAddresses []*string `type:"list"` + + // The CC: field(s) of the message. + CcAddresses []*string `type:"list"` + + // The To: field(s) of the message. + ToAddresses []*string `type:"list"` +} + +// String returns the string representation +func (s Destination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Destination) GoString() string { + return s.String() +} + +// Additional X-headers to include in the Delivery Status Notification (DSN) +// when an email that Amazon SES receives on your behalf bounces. +// +// For information about receiving email through Amazon SES, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email.html). +type ExtensionField struct { + _ struct{} `type:"structure"` + + // The name of the header to add. Must be between 1 and 50 characters, inclusive, + // and consist of alphanumeric (a-z, A-Z, 0-9) characters and dashes only. + Name *string `type:"string" required:"true"` + + // The value of the header to add. Must be less than 2048 characters, and must + // not contain newline characters ("\r" or "\n"). + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ExtensionField) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ExtensionField) GoString() string { + return s.String() +} + +// Given a list of verified identities, describes their DKIM attributes. The +// DKIM attributes of an email address identity includes whether DKIM signing +// is individually enabled or disabled for that address. The DKIM attributes +// of a domain name identity includes whether DKIM signing is enabled, as well +// as the DNS records (tokens) that must remain published in the domain name's +// DNS. +type GetIdentityDkimAttributesInput struct { + _ struct{} `type:"structure"` + + // A list of one or more verified identities - email addresses, domains, or + // both. + Identities []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetIdentityDkimAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityDkimAttributesInput) GoString() string { + return s.String() +} + +// Represents a list of all the DKIM attributes for the specified identity. +type GetIdentityDkimAttributesOutput struct { + _ struct{} `type:"structure"` + + // The DKIM attributes for an email address or a domain. + DkimAttributes map[string]*IdentityDkimAttributes `type:"map" required:"true"` +} + +// String returns the string representation +func (s GetIdentityDkimAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityDkimAttributesOutput) GoString() string { + return s.String() +} + +type GetIdentityNotificationAttributesInput struct { + _ struct{} `type:"structure"` + + // A list of one or more identities. You can specify an identity by using its + // name or by using its Amazon Resource Name (ARN). Examples: user@example.com, + // example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com. + Identities []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetIdentityNotificationAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityNotificationAttributesInput) GoString() string { + return s.String() +} + +// Describes whether an identity has Amazon Simple Notification Service (Amazon +// SNS) topics set for bounce, complaint, and/or delivery notifications, and +// specifies whether feedback forwarding is enabled for bounce and complaint +// notifications. +type GetIdentityNotificationAttributesOutput struct { + _ struct{} `type:"structure"` + + // A map of Identity to IdentityNotificationAttributes. + NotificationAttributes map[string]*IdentityNotificationAttributes `type:"map" required:"true"` +} + +// String returns the string representation +func (s GetIdentityNotificationAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityNotificationAttributesOutput) GoString() string { + return s.String() +} + +// Represents a request instructing the service to retrieve the text of a list +// of authorization policies applying to an identity. +type GetIdentityPoliciesInput struct { + _ struct{} `type:"structure"` + + // The identity for which the policies will be retrieved. You can specify an + // identity by using its name or by using its Amazon Resource Name (ARN). Examples: + // user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com. + // + // To successfully call this API, you must own the identity. + Identity *string `type:"string" required:"true"` + + // A list of the names of policies to be retrieved. You can retrieve a maximum + // of 20 policies at a time. If you do not know the names of the policies that + // are attached to the identity, you can use ListIdentityPolicies. + PolicyNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetIdentityPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityPoliciesInput) GoString() string { + return s.String() +} + +// Represents a map of policy names to policies returned from a successful GetIdentityPolicies +// request. +type GetIdentityPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A map of policy names to policies. + Policies map[string]*string `type:"map" required:"true"` +} + +// String returns the string representation +func (s GetIdentityPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityPoliciesOutput) GoString() string { + return s.String() +} + +// Represents a request instructing the service to provide the verification +// attributes for a list of identities. +type GetIdentityVerificationAttributesInput struct { + _ struct{} `type:"structure"` + + // A list of identities. + Identities []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetIdentityVerificationAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityVerificationAttributesInput) GoString() string { + return s.String() +} + +// Represents the verification attributes for a list of identities. +type GetIdentityVerificationAttributesOutput struct { + _ struct{} `type:"structure"` + + // A map of Identities to IdentityVerificationAttributes objects. + VerificationAttributes map[string]*IdentityVerificationAttributes `type:"map" required:"true"` +} + +// String returns the string representation +func (s GetIdentityVerificationAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityVerificationAttributesOutput) GoString() string { + return s.String() +} + +type GetSendQuotaInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetSendQuotaInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSendQuotaInput) GoString() string { + return s.String() +} + +// Represents the user's current activity limits returned from a successful +// GetSendQuota request. +type GetSendQuotaOutput struct { + _ struct{} `type:"structure"` + + // The maximum number of emails the user is allowed to send in a 24-hour interval. + // A value of -1 signifies an unlimited quota. + Max24HourSend *float64 `type:"double"` + + // The maximum number of emails that Amazon SES can accept from the user's account + // per second. + // + // The rate at which Amazon SES accepts the user's messages might be less than + // the maximum send rate. + MaxSendRate *float64 `type:"double"` + + // The number of emails sent during the previous 24 hours. + SentLast24Hours *float64 `type:"double"` +} + +// String returns the string representation +func (s GetSendQuotaOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSendQuotaOutput) GoString() string { + return s.String() +} + +type GetSendStatisticsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetSendStatisticsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSendStatisticsInput) GoString() string { + return s.String() +} + +// Represents a list of SendDataPoint items returned from a successful GetSendStatistics +// request. This list contains aggregated data from the previous two weeks of +// sending activity. +type GetSendStatisticsOutput struct { + _ struct{} `type:"structure"` + + // A list of data points, each of which represents 15 minutes of activity. + SendDataPoints []*SendDataPoint `type:"list"` +} + +// String returns the string representation +func (s GetSendStatisticsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSendStatisticsOutput) GoString() string { + return s.String() +} + +// Represents the DKIM attributes of a verified email address or a domain. +type IdentityDkimAttributes struct { + _ struct{} `type:"structure"` + + // True if DKIM signing is enabled for email sent from the identity; false otherwise. + DkimEnabled *bool `type:"boolean" required:"true"` + + // A set of character strings that represent the domain's identity. Using these + // tokens, you will need to create DNS CNAME records that point to DKIM public + // keys hosted by Amazon SES. Amazon Web Services will eventually detect that + // you have updated your DNS records; this detection process may take up to + // 72 hours. Upon successful detection, Amazon SES will be able to DKIM-sign + // email originating from that domain. (This only applies to domain identities, + // not email address identities.) + // + // For more information about creating DNS records using DKIM tokens, go to + // the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim-dns-records.html). + DkimTokens []*string `type:"list"` + + // Describes whether Amazon SES has successfully verified the DKIM DNS records + // (tokens) published in the domain name's DNS. (This only applies to domain + // identities, not email address identities.) + DkimVerificationStatus *string `type:"string" required:"true" enum:"VerificationStatus"` +} + +// String returns the string representation +func (s IdentityDkimAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdentityDkimAttributes) GoString() string { + return s.String() +} + +// Represents the notification attributes of an identity, including whether +// an identity has Amazon Simple Notification Service (Amazon SNS) topics set +// for bounce, complaint, and/or delivery notifications, and whether feedback +// forwarding is enabled for bounce and complaint notifications. +type IdentityNotificationAttributes struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic where Amazon SES will + // publish bounce notifications. + BounceTopic *string `type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic where Amazon SES will + // publish complaint notifications. + ComplaintTopic *string `type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic where Amazon SES will + // publish delivery notifications. + DeliveryTopic *string `type:"string" required:"true"` + + // Describes whether Amazon SES will forward bounce and complaint notifications + // as email. true indicates that Amazon SES will forward bounce and complaint + // notifications as email, while false indicates that bounce and complaint notifications + // will be published only to the specified bounce and complaint Amazon SNS topics. + ForwardingEnabled *bool `type:"boolean" required:"true"` +} + +// String returns the string representation +func (s IdentityNotificationAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdentityNotificationAttributes) GoString() string { + return s.String() +} + +// Represents the verification attributes of a single identity. +type IdentityVerificationAttributes struct { + _ struct{} `type:"structure"` + + // The verification status of the identity: "Pending", "Success", "Failed", + // or "TemporaryFailure". + VerificationStatus *string `type:"string" required:"true" enum:"VerificationStatus"` + + // The verification token for a domain identity. Null for email address identities. + VerificationToken *string `type:"string"` +} + +// String returns the string representation +func (s IdentityVerificationAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdentityVerificationAttributes) GoString() string { + return s.String() +} + +// When included in a receipt rule, this action calls an AWS Lambda function +// and, optionally, publishes a notification to Amazon Simple Notification Service +// (Amazon SNS). +// +// To enable Amazon SES to call your AWS Lambda function or to publish to an +// Amazon SNS topic of another account, Amazon SES must have permission to access +// those resources. For information about giving permissions, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). +// +// For information about using AWS Lambda actions in receipt rules, see the +// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-lambda.html). +type LambdaAction struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the AWS Lambda function. An example of + // an AWS Lambda function ARN is arn:aws:lambda:us-west-2:account-id:function:MyFunction. + // For more information about AWS Lambda, see the AWS Lambda Developer Guide + // (http://docs.aws.amazon.com/lambda/latest/dg/welcome.html). + FunctionArn *string `type:"string" required:"true"` + + // The invocation type of the AWS Lambda function. An invocation type of RequestResponse + // means that the execution of the function will immediately result in a response, + // and a value of Event means that the function will be invoked asynchronously. + // The default value is Event. For information about AWS Lambda invocation types, + // see the AWS Lambda Developer Guide (http://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html). + // + // There is a 30-second timeout on RequestResponse invocations. You should + // use Event invocation in most cases. Use RequestResponse only when you want + // to make a mail flow decision, such as whether to stop the receipt rule or + // the receipt rule set. + InvocationType *string `type:"string" enum:"InvocationType"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the + // Lambda action is taken. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. + // For more information about Amazon SNS topics, see the Amazon SNS Developer + // Guide (http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s LambdaAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaAction) GoString() string { + return s.String() +} + +// Represents a request instructing the service to list all identities for the +// AWS Account. +type ListIdentitiesInput struct { + _ struct{} `type:"structure"` + + // The type of the identities to list. Possible values are "EmailAddress" and + // "Domain". If this parameter is omitted, then all identities will be listed. + IdentityType *string `type:"string" enum:"IdentityType"` + + // The maximum number of identities per page. Possible values are 1-1000 inclusive. + MaxItems *int64 `type:"integer"` + + // The token to use for pagination. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListIdentitiesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIdentitiesInput) GoString() string { + return s.String() +} + +// Represents a list of all verified identities for the AWS Account. +type ListIdentitiesOutput struct { + _ struct{} `type:"structure"` + + // A list of identities. + Identities []*string `type:"list" required:"true"` + + // The token used for pagination. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListIdentitiesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIdentitiesOutput) GoString() string { + return s.String() +} + +// Represents a request instructing the service to list all authorization policies, +// by name, applying to an identity. +type ListIdentityPoliciesInput struct { + _ struct{} `type:"structure"` + + // The identity that is associated with the policy for which the policies will + // be listed. You can specify an identity by using its name or by using its + // Amazon Resource Name (ARN). Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com. + // + // To successfully call this API, you must own the identity. + Identity *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListIdentityPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIdentityPoliciesInput) GoString() string { + return s.String() +} + +// Represents a list of policy names returned from a successful ListIdentityPolicies +// request. +type ListIdentityPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A list of names of policies that apply to the specified identity. + PolicyNames []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListIdentityPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIdentityPoliciesOutput) GoString() string { + return s.String() +} + +type ListReceiptFiltersInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListReceiptFiltersInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListReceiptFiltersInput) GoString() string { + return s.String() +} + +type ListReceiptFiltersOutput struct { + _ struct{} `type:"structure"` + + // A list of IP address filter data structures, which each consist of a name, + // an IP address range, and whether to allow or block mail from it. + Filters []*ReceiptFilter `type:"list"` +} + +// String returns the string representation +func (s ListReceiptFiltersOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListReceiptFiltersOutput) GoString() string { + return s.String() +} + +type ListReceiptRuleSetsInput struct { + _ struct{} `type:"structure"` + + // A token returned from a previous call to ListReceiptRuleSets to indicate + // the position in the receipt rule set list. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListReceiptRuleSetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListReceiptRuleSetsInput) GoString() string { + return s.String() +} + +type ListReceiptRuleSetsOutput struct { + _ struct{} `type:"structure"` + + // A token indicating that there are additional receipt rule sets available + // to be listed. Pass this token to successive calls of ListReceiptRuleSets + // to retrieve up to 100 receipt rule sets at a time. + NextToken *string `type:"string"` + + // The metadata for the currently active receipt rule set. The metadata consists + // of the rule set name and the timestamp of when the rule set was created. + RuleSets []*ReceiptRuleSetMetadata `type:"list"` +} + +// String returns the string representation +func (s ListReceiptRuleSetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListReceiptRuleSetsOutput) GoString() string { + return s.String() +} + +type ListVerifiedEmailAddressesInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListVerifiedEmailAddressesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVerifiedEmailAddressesInput) GoString() string { + return s.String() +} + +// Represents a list of all the email addresses verified for the current user. +type ListVerifiedEmailAddressesOutput struct { + _ struct{} `type:"structure"` + + // A list of email addresses that have been verified. + VerifiedEmailAddresses []*string `type:"list"` +} + +// String returns the string representation +func (s ListVerifiedEmailAddressesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListVerifiedEmailAddressesOutput) GoString() string { + return s.String() +} + +// Represents the message to be sent, composed of a subject and a body. +type Message struct { + _ struct{} `type:"structure"` + + // The message body. + Body *Body `type:"structure" required:"true"` + + // The subject of the message: A short summary of the content, which will appear + // in the recipient's inbox. + Subject *Content `type:"structure" required:"true"` +} + +// String returns the string representation +func (s Message) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Message) GoString() string { + return s.String() +} + +// Message-related information to include in the Delivery Status Notification +// (DSN) when an email that Amazon SES receives on your behalf bounces. +// +// For information about receiving email through Amazon SES, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email.html). +type MessageDsn struct { + _ struct{} `type:"structure"` + + // When the message was received by the reporting mail transfer agent (MTA), + // in RFC 822 (https://www.ietf.org/rfc/rfc0822.txt) date-time format. + ArrivalDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Additional X-headers to include in the DSN. + ExtensionFields []*ExtensionField `type:"list"` + + // The reporting MTA that attempted to deliver the message, formatted as specified + // in RFC 3464 (https://tools.ietf.org/html/rfc3464) (mta-name-type; mta-name). + // The default value is dns; inbound-smtp.[region].amazonaws.com. + ReportingMta *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s MessageDsn) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MessageDsn) GoString() string { + return s.String() +} + +// Represents a request instructing the service to apply an authorization policy +// to an identity. +type PutIdentityPolicyInput struct { + _ struct{} `type:"structure"` + + // The identity to which the policy will apply. You can specify an identity + // by using its name or by using its Amazon Resource Name (ARN). Examples: user@example.com, + // example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com. + // + // To successfully call this API, you must own the identity. + Identity *string `type:"string" required:"true"` + + // The text of the policy in JSON format. The policy cannot exceed 4 KB. + // + // For information about the syntax of sending authorization policies, see + // the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-policies.html). + Policy *string `min:"1" type:"string" required:"true"` + + // The name of the policy. + // + // The policy name cannot exceed 64 characters and can only include alphanumeric + // characters, dashes, and underscores. + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutIdentityPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutIdentityPolicyInput) GoString() string { + return s.String() +} + +// An empty element. Receiving this element indicates that the request completed +// successfully. +type PutIdentityPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutIdentityPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutIdentityPolicyOutput) GoString() string { + return s.String() +} + +// Represents the raw data of the message. +type RawMessage struct { + _ struct{} `type:"structure"` + + // The raw data of the message. The client must ensure that the message format + // complies with Internet email standards regarding email header fields, MIME + // types, MIME encoding, and base64 encoding (if necessary). + // + // The To:, CC:, and BCC: headers in the raw message can contain a group list. + // + // If you are using SendRawEmail with sending authorization, you can include + // X-headers in the raw message to specify the "Source," "From," and "Return-Path" + // addresses. For more information, see the documentation for SendRawEmail. + // + // Do not include these X-headers in the DKIM signature, because they are removed + // by Amazon SES before sending the email. For more information, go to the Amazon + // SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-raw.html). + Data []byte `type:"blob" required:"true"` +} + +// String returns the string representation +func (s RawMessage) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RawMessage) GoString() string { + return s.String() +} + +// An action that Amazon SES can take when it receives an email on behalf of +// one or more email addresses or domains that you own. An instance of this +// data type can represent only one action. +// +// For information about setting up receipt rules, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rules.html). +type ReceiptAction struct { + _ struct{} `type:"structure"` + + // Adds a header to the received email. + AddHeaderAction *AddHeaderAction `type:"structure"` + + // Rejects the received email by returning a bounce response to the sender and, + // optionally, publishes a notification to Amazon Simple Notification Service + // (Amazon SNS). + BounceAction *BounceAction `type:"structure"` + + // Calls an AWS Lambda function, and optionally, publishes a notification to + // Amazon SNS. + LambdaAction *LambdaAction `type:"structure"` + + // Saves the received message to an Amazon Simple Storage Service (Amazon S3) + // bucket and, optionally, publishes a notification to Amazon SNS. + S3Action *S3Action `type:"structure"` + + // Publishes the email content within a notification to Amazon SNS. + SNSAction *SNSAction `type:"structure"` + + // Terminates the evaluation of the receipt rule set and optionally publishes + // a notification to Amazon SNS. + StopAction *StopAction `type:"structure"` + + // Calls Amazon WorkMail and, optionally, publishes a notification to Amazon + // SNS. + WorkmailAction *WorkmailAction `type:"structure"` +} + +// String returns the string representation +func (s ReceiptAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReceiptAction) GoString() string { + return s.String() +} + +// A receipt IP address filter enables you to specify whether to accept or reject +// mail originating from an IP address or range of IP addresses. +// +// For information about setting up IP address filters, see the Amazon SES +// Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-ip-filters.html). +type ReceiptFilter struct { + _ struct{} `type:"structure"` + + // A structure that provides the IP addresses to block or allow, and whether + // to block or allow incoming mail from them. + IpFilter *ReceiptIpFilter `type:"structure" required:"true"` + + // The name of the IP address filter. The name must: + // + // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores + // (_), or dashes (-). Start and end with a letter or number. Contain less than + // 64 characters. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ReceiptFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReceiptFilter) GoString() string { + return s.String() +} + +// A receipt IP address filter enables you to specify whether to accept or reject +// mail originating from an IP address or range of IP addresses. +// +// For information about setting up IP address filters, see the Amazon SES +// Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-ip-filters.html). +type ReceiptIpFilter struct { + _ struct{} `type:"structure"` + + // A single IP address or a range of IP addresses that you want to block or + // allow, specified in Classless Inter-Domain Routing (CIDR) notation. An example + // of a single email address is 10.0.0.1. An example of a range of IP addresses + // is 10.0.0.1/24. For more information about CIDR notation, see RFC 2317 (https://tools.ietf.org/html/rfc2317). + Cidr *string `type:"string" required:"true"` + + // Indicates whether to block or allow incoming mail from the specified IP addresses. + Policy *string `type:"string" required:"true" enum:"ReceiptFilterPolicy"` +} + +// String returns the string representation +func (s ReceiptIpFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReceiptIpFilter) GoString() string { + return s.String() +} + +// Receipt rules enable you to specify which actions Amazon SES should take +// when it receives mail on behalf of one or more email addresses or domains +// that you own. +// +// Each receipt rule defines a set of email addresses or domains to which it +// applies. If the email addresses or domains match at least one recipient address +// of the message, Amazon SES executes all of the receipt rule's actions on +// the message. +// +// For information about setting up receipt rules, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rules.html). +type ReceiptRule struct { + _ struct{} `type:"structure"` + + // An ordered list of actions to perform on messages that match at least one + // of the recipient email addresses or domains specified in the receipt rule. + Actions []*ReceiptAction `type:"list"` + + // If true, the receipt rule is active. The default value is true. + Enabled *bool `type:"boolean"` + + // The name of the receipt rule. The name must: + // + // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores + // (_), or dashes (-). Start and end with a letter or number. Contain less than + // 64 characters. + Name *string `type:"string" required:"true"` + + // The recipient domains and email addresses to which the receipt rule applies. + // If this field is not specified, this rule will match all recipients under + // all verified domains. + Recipients []*string `type:"list"` + + // If true, then messages to which this receipt rule applies are scanned for + // spam and viruses. The default value is true. + ScanEnabled *bool `type:"boolean"` + + // Specifies whether Amazon SES should require that incoming email is delivered + // over a connection encrypted with Transport Layer Security (TLS). If this + // parameter is set to Require, Amazon SES will bounce emails that are not received + // over TLS. The default is Optional. + TlsPolicy *string `type:"string" enum:"TlsPolicy"` +} + +// String returns the string representation +func (s ReceiptRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReceiptRule) GoString() string { + return s.String() +} + +// Information about a receipt rule set. +// +// A receipt rule set is a collection of rules that specify what Amazon SES +// should do with mail it receives on behalf of your account's verified domains. +// +// For information about setting up receipt rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rule-set.html). +type ReceiptRuleSetMetadata struct { + _ struct{} `type:"structure"` + + // The date and time the receipt rule set was created. + CreatedTimestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The name of the receipt rule set. The name must: + // + // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores + // (_), or dashes (-). Start and end with a letter or number. Contain less than + // 64 characters. + Name *string `type:"string"` +} + +// String returns the string representation +func (s ReceiptRuleSetMetadata) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReceiptRuleSetMetadata) GoString() string { + return s.String() +} + +// Recipient-related information to include in the Delivery Status Notification +// (DSN) when an email that Amazon SES receives on your behalf bounces. +// +// For information about receiving email through Amazon SES, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email.html). +type RecipientDsnFields struct { + _ struct{} `type:"structure"` + + // The action performed by the reporting mail transfer agent (MTA) as a result + // of its attempt to deliver the message to the recipient address. This is required + // by RFC 3464 (https://tools.ietf.org/html/rfc3464). + Action *string `type:"string" required:"true" enum:"DsnAction"` + + // An extended explanation of what went wrong; this is usually an SMTP response. + // See RFC 3463 (https://tools.ietf.org/html/rfc3463) for the correct formatting + // of this parameter. + DiagnosticCode *string `type:"string"` + + // Additional X-headers to include in the DSN. + ExtensionFields []*ExtensionField `type:"list"` + + // The email address to which the message was ultimately delivered. This corresponds + // to the Final-Recipient in the DSN. If not specified, FinalRecipient will + // be set to the Recipient specified in the BouncedRecipientInfo structure. + // Either FinalRecipient or the recipient in BouncedRecipientInfo must be a + // recipient of the original bounced message. + // + // Do not prepend the FinalRecipient email address with rfc 822;, as described + // in RFC 3798 (https://tools.ietf.org/html/rfc3798). + FinalRecipient *string `type:"string"` + + // The time the final delivery attempt was made, in RFC 822 (https://www.ietf.org/rfc/rfc0822.txt) + // date-time format. + LastAttemptDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The MTA to which the remote MTA attempted to deliver the message, formatted + // as specified in RFC 3464 (https://tools.ietf.org/html/rfc3464) (mta-name-type; + // mta-name). This parameter typically applies only to propagating synchronous + // bounces. + RemoteMta *string `type:"string"` + + // The status code that indicates what went wrong. This is required by RFC 3464 + // (https://tools.ietf.org/html/rfc3464). + Status *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RecipientDsnFields) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RecipientDsnFields) GoString() string { + return s.String() +} + +type ReorderReceiptRuleSetInput struct { + _ struct{} `type:"structure"` + + // A list of the specified receipt rule set's receipt rules in the order that + // you want to put them. + RuleNames []*string `type:"list" required:"true"` + + // The name of the receipt rule set to reorder. + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ReorderReceiptRuleSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReorderReceiptRuleSetInput) GoString() string { + return s.String() +} + +type ReorderReceiptRuleSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ReorderReceiptRuleSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReorderReceiptRuleSetOutput) GoString() string { + return s.String() +} + +// When included in a receipt rule, this action saves the received message to +// an Amazon Simple Storage Service (Amazon S3) bucket and, optionally, publishes +// a notification to Amazon Simple Notification Service (Amazon SNS). +// +// To enable Amazon SES to write emails to your Amazon S3 bucket, use an AWS +// KMS key to encrypt your emails, or publish to an Amazon SNS topic of another +// account, Amazon SES must have permission to access those resources. For information +// about giving permissions, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). +// +// When you save your emails to an Amazon S3 bucket, the maximum email size +// (including headers) is 30 MB. Emails larger than that will bounce. For information +// about specifying Amazon S3 actions in receipt rules, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-s3.html). +type S3Action struct { + _ struct{} `type:"structure"` + + // The name of the Amazon S3 bucket to which to save the received email. + BucketName *string `type:"string" required:"true"` + + // The customer master key that Amazon SES should use to encrypt your emails + // before saving them to the Amazon S3 bucket. You can use the default master + // key or a custom master key you created in AWS KMS as follows: + // + // To use the default master key, provide an ARN in the form of arn:aws:kms:REGION:ACCOUNT-ID-WITHOUT-HYPHENS:alias/aws/ses. + // For example, if your AWS account ID is 123456789012 and you want to use the + // default master key in the US West (Oregon) region, the ARN of the default + // master key would be arn:aws:kms:us-west-2:123456789012:alias/aws/ses. If + // you use the default master key, you don't need to perform any extra steps + // to give Amazon SES permission to use the key. To use a custom master key + // you created in AWS KMS, provide the ARN of the master key and ensure that + // you add a statement to your key's policy to give Amazon SES permission to + // use it. For more information about giving permissions, see the Amazon SES + // Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). + // For more information about key policies, see the AWS KMS Developer Guide + // (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html). If + // you do not specify a master key, Amazon SES will not encrypt your emails. + // + // Your mail is encrypted by Amazon SES using the Amazon S3 encryption client + // before the mail is submitted to Amazon S3 for storage. It is not encrypted + // using Amazon S3 server-side encryption. This means that you must use the + // Amazon S3 encryption client to decrypt the email after retrieving it from + // Amazon S3, as the service has no access to use your AWS KMS keys for decryption. + // This encryption client is currently available with the AWS Java SDK (https://aws.amazon.com/sdk-for-java/) + // and AWS Ruby SDK (https://aws.amazon.com/sdk-for-ruby/) only. For more information + // about client-side encryption using AWS KMS master keys, see the Amazon S3 + // Developer Guide (http://alpha-docs-aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html). + KmsKeyArn *string `type:"string"` + + // The key prefix of the Amazon S3 bucket. The key prefix is similar to a directory + // name that enables you to store similar data under the same directory in a + // bucket. + ObjectKeyPrefix *string `type:"string"` + + // The ARN of the Amazon SNS topic to notify when the message is saved to the + // Amazon S3 bucket. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. + // For more information about Amazon SNS topics, see the Amazon SNS Developer + // Guide (http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s S3Action) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s S3Action) GoString() string { + return s.String() +} + +// When included in a receipt rule, this action publishes a notification to +// Amazon Simple Notification Service (Amazon SNS). This action includes a complete +// copy of the email content in the Amazon SNS notifications. Amazon SNS notifications +// for all other actions simply provide information about the email. They do +// not include the email content itself. +// +// If you own the Amazon SNS topic, you don't need to do anything to give Amazon +// SES permission to publish emails to it. However, if you don't own the Amazon +// SNS topic, you need to attach a policy to the topic to give Amazon SES permissions +// to access it. For information about giving permissions, see the Amazon SES +// Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). +// +// You can only publish emails that are 150 KB or less (including the header) +// to Amazon SNS. Larger emails will bounce. If you anticipate emails larger +// than 150 KB, use the S3 action instead. For information about using a receipt +// rule to publish an Amazon SNS notification, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-sns.html). +type SNSAction struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to notify. An example + // of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. + // For more information about Amazon SNS topics, see the Amazon SNS Developer + // Guide (http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + TopicArn *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SNSAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SNSAction) GoString() string { + return s.String() +} + +// Request object for sending a simple/complex bounce. It contains all of the +// information needed to generate a basic DSN or a fully-customized DSN. +type SendBounceInput struct { + _ struct{} `type:"structure"` + + // The address to use in the "From" header of the bounce message. This must + // be an identity that you have verified with Amazon SES. + BounceSender *string `type:"string" required:"true"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to use the address in the "From" header of the bounce. For more information + // about sending authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + BounceSenderArn *string `type:"string"` + + // A list of recipients of the bounced message, including the information required + // to create the Delivery Status Notifications (DSNs) for the recipients. You + // must specify at least one BouncedRecipientInfo in the list. + BouncedRecipientInfoList []*BouncedRecipientInfo `type:"list" required:"true"` + + // Human-readable text for the bounce message to explain the failure. If not + // specified, the text will be auto-generated based on the bounced recipient + // information. + Explanation *string `type:"string"` + + // Message-related DSN fields. If not specified, Amazon SES will choose the + // values. + MessageDsn *MessageDsn `type:"structure"` + + // The message ID of the message to be bounced. + OriginalMessageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendBounceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendBounceInput) GoString() string { + return s.String() +} + +type SendBounceOutput struct { + _ struct{} `type:"structure"` + + // The message ID of the bounce message. + MessageId *string `type:"string"` +} + +// String returns the string representation +func (s SendBounceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendBounceOutput) GoString() string { + return s.String() +} + +// Represents sending statistics data. Each SendDataPoint contains statistics +// for a 15-minute period of sending activity. +type SendDataPoint struct { + _ struct{} `type:"structure"` + + // Number of emails that have bounced. + Bounces *int64 `type:"long"` + + // Number of unwanted emails that were rejected by recipients. + Complaints *int64 `type:"long"` + + // Number of emails that have been enqueued for sending. + DeliveryAttempts *int64 `type:"long"` + + // Number of emails rejected by Amazon SES. + Rejects *int64 `type:"long"` + + // Time of the data point. + Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s SendDataPoint) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendDataPoint) GoString() string { + return s.String() +} + +// Represents a request instructing the service to send a single email message. +// +// This datatype can be used in application code to compose a message consisting +// of source, destination, message, reply-to, and return-path parts. This object +// can then be sent using the SendEmail action. +type SendEmailInput struct { + _ struct{} `type:"structure"` + + // The destination for this email, composed of To:, CC:, and BCC: fields. + Destination *Destination `type:"structure" required:"true"` + + // The message to be sent. + Message *Message `type:"structure" required:"true"` + + // The reply-to email address(es) for the message. If the recipient replies + // to the message, each reply-to address will receive the reply. + ReplyToAddresses []*string `type:"list"` + + // The email address to which bounces and complaints are to be forwarded when + // feedback forwarding is enabled. If the message cannot be delivered to the + // recipient, then an error message will be returned from the recipient's ISP; + // this message will then be forwarded to the email address specified by the + // ReturnPath parameter. The ReturnPath parameter is never overwritten. This + // email address must be either individually verified with Amazon SES, or from + // a domain that has been verified with Amazon SES. + ReturnPath *string `type:"string"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to use the email address specified in the ReturnPath parameter. + // + // For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) + // attaches a policy to it that authorizes you to use feedback@example.com, + // then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, + // and the ReturnPath to be feedback@example.com. + // + // For more information about sending authorization, see the Amazon SES Developer + // Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + ReturnPathArn *string `type:"string"` + + // The email address that is sending the email. This email address must be either + // individually verified with Amazon SES, or from a domain that has been verified + // with Amazon SES. For information about verifying identities, see the Amazon + // SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html). + // + // If you are sending on behalf of another user and have been permitted to + // do so by a sending authorization policy, then you must also specify the SourceArn + // parameter. For more information about sending authorization, see the Amazon + // SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + // + // In all cases, the email address must be 7-bit ASCII. If the text must contain + // any other characters, then you must use MIME encoded-word syntax (RFC 2047) + // instead of a literal string. MIME encoded-word syntax uses the following + // form: =?charset?encoding?encoded-text?=. For more information, see RFC 2047 + // (http://tools.ietf.org/html/rfc2047). + Source *string `type:"string" required:"true"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to send for the email address specified in the Source parameter. + // + // For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) + // attaches a policy to it that authorizes you to send from user@example.com, + // then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, + // and the Source to be user@example.com. + // + // For more information about sending authorization, see the Amazon SES Developer + // Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). + SourceArn *string `type:"string"` +} + +// String returns the string representation +func (s SendEmailInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendEmailInput) GoString() string { + return s.String() +} + +// Represents a unique message ID returned from a successful SendEmail request. +type SendEmailOutput struct { + _ struct{} `type:"structure"` + + // The unique message identifier returned from the SendEmail action. + MessageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendEmailOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendEmailOutput) GoString() string { + return s.String() +} + +// Represents a request instructing the service to send a raw email message. +// +// This datatype can be used in application code to compose a message consisting +// of source, destination, and raw message text. This object can then be sent +// using the SendRawEmail action. +type SendRawEmailInput struct { + _ struct{} `type:"structure"` + + // A list of destinations for the message, consisting of To:, CC:, and BCC: + // addresses. + Destinations []*string `type:"list"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to specify a particular "From" address in the header of the raw email. + // + // Instead of using this parameter, you can use the X-header X-SES-FROM-ARN + // in the raw message of the email. If you use both the FromArn parameter and + // the corresponding X-header, Amazon SES uses the value of the FromArn parameter. + // + // For information about when to use this parameter, see the description of + // SendRawEmail in this guide, or see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-delegate-sender-tasks-email.html). + FromArn *string `type:"string"` + + // The raw text of the message. The client is responsible for ensuring the following: + // + // Message must contain a header and a body, separated by a blank line. All + // required header fields must be present. Each part of a multipart MIME message + // must be formatted properly. MIME content types must be among those supported + // by Amazon SES. For more information, go to the Amazon SES Developer Guide + // (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mime-types.html). + // Content must be base64-encoded, if MIME requires it. + RawMessage *RawMessage `type:"structure" required:"true"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to use the email address specified in the ReturnPath parameter. + // + // For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) + // attaches a policy to it that authorizes you to use feedback@example.com, + // then you would specify the ReturnPathArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, + // and the ReturnPath to be feedback@example.com. + // + // Instead of using this parameter, you can use the X-header X-SES-RETURN-PATH-ARN + // in the raw message of the email. If you use both the ReturnPathArn parameter + // and the corresponding X-header, Amazon SES uses the value of the ReturnPathArn + // parameter. + // + // For information about when to use this parameter, see the description of + // SendRawEmail in this guide, or see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-delegate-sender-tasks-email.html). + ReturnPathArn *string `type:"string"` + + // The identity's email address. If you do not provide a value for this parameter, + // you must specify a "From" address in the raw text of the message. (You can + // also specify both.) + // + // By default, the string must be 7-bit ASCII. If the text must contain any + // other characters, then you must use MIME encoded-word syntax (RFC 2047) instead + // of a literal string. MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?=. + // For more information, see RFC 2047 (http://tools.ietf.org/html/rfc2047). + // + // If you specify the Source parameter and have feedback forwarding enabled, + // then bounces and complaints will be sent to this email address. This takes + // precedence over any Return-Path header that you might include in the raw + // text of the message. + Source *string `type:"string"` + + // This parameter is used only for sending authorization. It is the ARN of the + // identity that is associated with the sending authorization policy that permits + // you to send for the email address specified in the Source parameter. + // + // For example, if the owner of example.com (which has ARN arn:aws:ses:us-east-1:123456789012:identity/example.com) + // attaches a policy to it that authorizes you to send from user@example.com, + // then you would specify the SourceArn to be arn:aws:ses:us-east-1:123456789012:identity/example.com, + // and the Source to be user@example.com. + // + // Instead of using this parameter, you can use the X-header X-SES-SOURCE-ARN + // in the raw message of the email. If you use both the SourceArn parameter + // and the corresponding X-header, Amazon SES uses the value of the SourceArn + // parameter. + // + // For information about when to use this parameter, see the description of + // SendRawEmail in this guide, or see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-delegate-sender-tasks-email.html). + SourceArn *string `type:"string"` +} + +// String returns the string representation +func (s SendRawEmailInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendRawEmailInput) GoString() string { + return s.String() +} + +// Represents a unique message ID returned from a successful SendRawEmail request. +type SendRawEmailOutput struct { + _ struct{} `type:"structure"` + + // The unique message identifier returned from the SendRawEmail action. + MessageId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SendRawEmailOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SendRawEmailOutput) GoString() string { + return s.String() +} + +type SetActiveReceiptRuleSetInput struct { + _ struct{} `type:"structure"` + + // The name of the receipt rule set to make active. Setting this value to null + // disables all email receiving. + RuleSetName *string `type:"string"` +} + +// String returns the string representation +func (s SetActiveReceiptRuleSetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetActiveReceiptRuleSetInput) GoString() string { + return s.String() +} + +type SetActiveReceiptRuleSetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetActiveReceiptRuleSetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetActiveReceiptRuleSetOutput) GoString() string { + return s.String() +} + +// Represents a request instructing the service to enable or disable DKIM signing +// for an identity. +type SetIdentityDkimEnabledInput struct { + _ struct{} `type:"structure"` + + // Sets whether DKIM signing is enabled for an identity. Set to true to enable + // DKIM signing for this identity; false to disable it. + DkimEnabled *bool `type:"boolean" required:"true"` + + // The identity for which DKIM signing should be enabled or disabled. + Identity *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetIdentityDkimEnabledInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityDkimEnabledInput) GoString() string { + return s.String() +} + +// An empty element. Receiving this element indicates that the request completed +// successfully. +type SetIdentityDkimEnabledOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetIdentityDkimEnabledOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityDkimEnabledOutput) GoString() string { + return s.String() +} + +type SetIdentityFeedbackForwardingEnabledInput struct { + _ struct{} `type:"structure"` + + // Sets whether Amazon SES will forward bounce and complaint notifications as + // email. true specifies that Amazon SES will forward bounce and complaint notifications + // as email, in addition to any Amazon SNS topic publishing otherwise specified. + // false specifies that Amazon SES will publish bounce and complaint notifications + // only through Amazon SNS. This value can only be set to false when Amazon + // SNS topics are set for both Bounce and Complaint notification types. + ForwardingEnabled *bool `type:"boolean" required:"true"` + + // The identity for which to set bounce and complaint notification forwarding. + // Examples: user@example.com, example.com. + Identity *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetIdentityFeedbackForwardingEnabledInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityFeedbackForwardingEnabledInput) GoString() string { + return s.String() +} + +// An empty element. Receiving this element indicates that the request completed +// successfully. +type SetIdentityFeedbackForwardingEnabledOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetIdentityFeedbackForwardingEnabledOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityFeedbackForwardingEnabledOutput) GoString() string { + return s.String() +} + +// Represents a request to set or clear an identity's notification topic. +type SetIdentityNotificationTopicInput struct { + _ struct{} `type:"structure"` + + // The identity for which the Amazon SNS topic will be set. You can specify + // an identity by using its name or by using its Amazon Resource Name (ARN). + // Examples: user@example.com, example.com, arn:aws:ses:us-east-1:123456789012:identity/example.com. + Identity *string `type:"string" required:"true"` + + // The type of notifications that will be published to the specified Amazon + // SNS topic. + NotificationType *string `type:"string" required:"true" enum:"NotificationType"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic. If the parameter + // is omitted from the request or a null value is passed, SnsTopic is cleared + // and publishing is disabled. + SnsTopic *string `type:"string"` +} + +// String returns the string representation +func (s SetIdentityNotificationTopicInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityNotificationTopicInput) GoString() string { + return s.String() +} + +// An empty element. Receiving this element indicates that the request completed +// successfully. +type SetIdentityNotificationTopicOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetIdentityNotificationTopicOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityNotificationTopicOutput) GoString() string { + return s.String() +} + +type SetReceiptRulePositionInput struct { + _ struct{} `type:"structure"` + + // The name of the receipt rule after which to place the specified receipt rule. + After *string `type:"string"` + + // The name of the receipt rule to reposition. + RuleName *string `type:"string" required:"true"` + + // The name of the receipt rule set that contains the receipt rule to reposition. + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetReceiptRulePositionInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetReceiptRulePositionInput) GoString() string { + return s.String() +} + +type SetReceiptRulePositionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetReceiptRulePositionOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetReceiptRulePositionOutput) GoString() string { + return s.String() +} + +// When included in a receipt rule, this action terminates the evaluation of +// the receipt rule set and, optionally, publishes a notification to Amazon +// Simple Notification Service (Amazon SNS). +// +// For information about setting a stop action in a receipt rule, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-stop.html). +type StopAction struct { + _ struct{} `type:"structure"` + + // The scope to which the Stop action applies. That is, what is being stopped. + Scope *string `type:"string" required:"true" enum:"StopScope"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the + // stop action is taken. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. + // For more information about Amazon SNS topics, see the Amazon SNS Developer + // Guide (http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s StopAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StopAction) GoString() string { + return s.String() +} + +type UpdateReceiptRuleInput struct { + _ struct{} `type:"structure"` + + // A data structure that contains the updated receipt rule information. + Rule *ReceiptRule `type:"structure" required:"true"` + + // The name of the receipt rule set to which the receipt rule belongs. + RuleSetName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateReceiptRuleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateReceiptRuleInput) GoString() string { + return s.String() +} + +type UpdateReceiptRuleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateReceiptRuleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateReceiptRuleOutput) GoString() string { + return s.String() +} + +// Represents a request instructing the service to begin DKIM verification for +// a domain. +type VerifyDomainDkimInput struct { + _ struct{} `type:"structure"` + + // The name of the domain to be verified for Easy DKIM signing. + Domain *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s VerifyDomainDkimInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyDomainDkimInput) GoString() string { + return s.String() +} + +// Represents the DNS records that must be published in the domain name's DNS +// to complete DKIM setup. +type VerifyDomainDkimOutput struct { + _ struct{} `type:"structure"` + + // A set of character strings that represent the domain's identity. If the identity + // is an email address, the tokens represent the domain of that address. + // + // Using these tokens, you will need to create DNS CNAME records that point + // to DKIM public keys hosted by Amazon SES. Amazon Web Services will eventually + // detect that you have updated your DNS records; this detection process may + // take up to 72 hours. Upon successful detection, Amazon SES will be able to + // DKIM-sign emails originating from that domain. + // + // For more information about creating DNS records using DKIM tokens, go to + // the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim-dns-records.html). + DkimTokens []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s VerifyDomainDkimOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyDomainDkimOutput) GoString() string { + return s.String() +} + +// Represents a request instructing the service to begin domain verification. +type VerifyDomainIdentityInput struct { + _ struct{} `type:"structure"` + + // The domain to be verified. + Domain *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s VerifyDomainIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyDomainIdentityInput) GoString() string { + return s.String() +} + +// Represents a token used for domain ownership verification. +type VerifyDomainIdentityOutput struct { + _ struct{} `type:"structure"` + + // A TXT record that must be placed in the DNS settings for the domain, in order + // to complete domain verification. + VerificationToken *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s VerifyDomainIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyDomainIdentityOutput) GoString() string { + return s.String() +} + +// Represents a request instructing the service to begin email address verification. +type VerifyEmailAddressInput struct { + _ struct{} `type:"structure"` + + // The email address to be verified. + EmailAddress *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s VerifyEmailAddressInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyEmailAddressInput) GoString() string { + return s.String() +} + +type VerifyEmailAddressOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s VerifyEmailAddressOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyEmailAddressOutput) GoString() string { + return s.String() +} + +// Represents a request instructing the service to begin email address verification. +type VerifyEmailIdentityInput struct { + _ struct{} `type:"structure"` + + // The email address to be verified. + EmailAddress *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s VerifyEmailIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyEmailIdentityInput) GoString() string { + return s.String() +} + +// An empty element. Receiving this element indicates that the request completed +// successfully. +type VerifyEmailIdentityOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s VerifyEmailIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VerifyEmailIdentityOutput) GoString() string { + return s.String() +} + +// When included in a receipt rule, this action calls Amazon WorkMail and, optionally, +// publishes a notification to Amazon Simple Notification Service (Amazon SNS). +// You will typically not use this action directly because Amazon WorkMail adds +// the rule automatically during its setup procedure. +// +// For information using a receipt rule to call Amazon WorkMail, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-workmail.html). +type WorkmailAction struct { + _ struct{} `type:"structure"` + + // The ARN of the Amazon WorkMail organization. An example of an Amazon WorkMail + // organization ARN is arn:aws:workmail:us-west-2:123456789012:organization/m-68755160c4cb4e29a2b2f8fb58f359d7. + // For information about Amazon WorkMail organizations, see the Amazon WorkMail + // Administrator Guide (http://docs.aws.amazon.com/workmail/latest/adminguide/organizations_overview.html). + OrganizationArn *string `type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the Amazon SNS topic to notify when the + // WorkMail action is called. An example of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. + // For more information about Amazon SNS topics, see the Amazon SNS Developer + // Guide (http://docs.aws.amazon.com/sns/latest/dg/CreateTopic.html). + TopicArn *string `type:"string"` +} + +// String returns the string representation +func (s WorkmailAction) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WorkmailAction) GoString() string { + return s.String() +} + +const ( + // @enum BounceType + BounceTypeDoesNotExist = "DoesNotExist" + // @enum BounceType + BounceTypeMessageTooLarge = "MessageTooLarge" + // @enum BounceType + BounceTypeExceededQuota = "ExceededQuota" + // @enum BounceType + BounceTypeContentRejected = "ContentRejected" + // @enum BounceType + BounceTypeUndefined = "Undefined" + // @enum BounceType + BounceTypeTemporaryFailure = "TemporaryFailure" +) + +const ( + // @enum DsnAction + DsnActionFailed = "failed" + // @enum DsnAction + DsnActionDelayed = "delayed" + // @enum DsnAction + DsnActionDelivered = "delivered" + // @enum DsnAction + DsnActionRelayed = "relayed" + // @enum DsnAction + DsnActionExpanded = "expanded" +) + +const ( + // @enum IdentityType + IdentityTypeEmailAddress = "EmailAddress" + // @enum IdentityType + IdentityTypeDomain = "Domain" +) + +const ( + // @enum InvocationType + InvocationTypeEvent = "Event" + // @enum InvocationType + InvocationTypeRequestResponse = "RequestResponse" +) + +const ( + // @enum NotificationType + NotificationTypeBounce = "Bounce" + // @enum NotificationType + NotificationTypeComplaint = "Complaint" + // @enum NotificationType + NotificationTypeDelivery = "Delivery" +) + +const ( + // @enum ReceiptFilterPolicy + ReceiptFilterPolicyBlock = "Block" + // @enum ReceiptFilterPolicy + ReceiptFilterPolicyAllow = "Allow" +) + +const ( + // @enum StopScope + StopScopeRuleSet = "RuleSet" +) + +const ( + // @enum TlsPolicy + TlsPolicyRequire = "Require" + // @enum TlsPolicy + TlsPolicyOptional = "Optional" +) + +const ( + // @enum VerificationStatus + VerificationStatusPending = "Pending" + // @enum VerificationStatus + VerificationStatusSuccess = "Success" + // @enum VerificationStatus + VerificationStatusFailed = "Failed" + // @enum VerificationStatus + VerificationStatusTemporaryFailure = "TemporaryFailure" + // @enum VerificationStatus + VerificationStatusNotStarted = "NotStarted" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ses/service.go b/vendor/github.com/aws/aws-sdk-go/service/ses/service.go new file mode 100644 index 000000000..8f721db6d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ses/service.go @@ -0,0 +1,93 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ses + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/signer/v4" +) + +// This is the API Reference for Amazon Simple Email Service (Amazon SES). This +// documentation is intended to be used in conjunction with the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/Welcome.html). +// +// For a list of Amazon SES endpoints to use in service requests, see Regions +// and Amazon SES (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/regions.html) +// in the Amazon SES Developer Guide. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type SES struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "email" + +// New creates a new instance of the SES client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a SES client from just a session. +// svc := ses.New(mySession) +// +// // Create a SES client with additional configuration +// svc := ses.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SES { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *SES { + svc := &SES{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningName: "ses", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2010-12-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SES operation and runs any +// custom request initialization. +func (c *SES) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/ses/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/ses/waiters.go new file mode 100644 index 000000000..8156c0fc0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/ses/waiters.go @@ -0,0 +1,30 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package ses + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *SES) WaitUntilIdentityExists(input *GetIdentityVerificationAttributesInput) error { + waiterCfg := waiter.Config{ + Operation: "GetIdentityVerificationAttributes", + Delay: 3, + MaxAttempts: 20, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "VerificationAttributes.*.VerificationStatus", + Expected: "Success", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/website/source/docs/providers/aws/r/ses_active_receipt_rule_set.html.markdown b/website/source/docs/providers/aws/r/ses_active_receipt_rule_set.html.markdown new file mode 100644 index 000000000..49ce6dded --- /dev/null +++ b/website/source/docs/providers/aws/r/ses_active_receipt_rule_set.html.markdown @@ -0,0 +1,25 @@ +--- +layout: "aws" +page_title: "AWS: ses_active_receipt_rule_set" +sidebar_current: "docs-aws-resource-ses-active-receipt-rule-set" +description: |- + Provides a resource to designate the active SES receipt rule set +--- + +# aws\_ses\_active_receipt_rule_set + +Provides a resource to designate the active SES receipt rule set + +## Example Usage + +``` +resource "aws_ses_active_receipt_rule_set" "main" { + rule_set_name = "primary-rules" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `rule_set_name` - (Required) The name of the rule set diff --git a/website/source/docs/providers/aws/r/ses_receipt_filter.html.markdown b/website/source/docs/providers/aws/r/ses_receipt_filter.html.markdown new file mode 100644 index 000000000..94d665cc4 --- /dev/null +++ b/website/source/docs/providers/aws/r/ses_receipt_filter.html.markdown @@ -0,0 +1,29 @@ +--- +layout: "aws" +page_title: "AWS: ses_receipt_filter" +sidebar_current: "docs-aws-resource-ses-receipt-filter" +description: |- + Provides an SES receipt filter +--- + +# aws\_ses\_receipt_filter + +Provides an SES receipt filter resource + +## Example Usage + +``` +resource "aws_ses_receipt_filter" "filter" { + name = "block-spammer" + cidr = "10.10.10.10" + policy = "Block" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the filter +* `cidr` - (Required) The IP address or address range to filter, in CIDR notation +* `policy` - (Required) Block or Allow diff --git a/website/source/docs/providers/aws/r/ses_receipt_rule.html.markdown b/website/source/docs/providers/aws/r/ses_receipt_rule.html.markdown new file mode 100644 index 000000000..5362d3737 --- /dev/null +++ b/website/source/docs/providers/aws/r/ses_receipt_rule.html.markdown @@ -0,0 +1,100 @@ +--- +layout: "aws" +page_title: "AWS: ses_receipt_rule" +sidebar_current: "docs-aws-resource-ses-receipt-rule" +description: |- + Provides an SES receipt rule resource +--- + +# aws\_ses\_receipt_rule + +Provides an SES receipt rule resource + +## Example Usage + +``` +# Add a header to the email and store it in S3 +resource "aws_ses_receipt_rule" "store" { + name = "store" + rule_set_name = "default-rule-set" + recipients = ["karen@example.com"] + enabled = true + scan_enabled = true + + add_header_action { + header_name = "Custom-Header" + header_value = "Added by SES" + } + + s3_action { + bucket_name = "emails" + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the rule +* `rule_set_name` - (Required) The name of the rule set +* `after` - (Optional) The name of the rule to place this rule after +* `enabled` - (Optional) If true, the rule will be enabled +* `recipients` - (Optional) A list of email addresses +* `recipients` - (Optional) A list of email addresses +* `scan_enabled` - (Optional) If true, incoming emails will be scanned for spam and viruses +* `tls_policy` - (Optional) Require or Optional +* `add_header_action` - (Optional) A list of Add Header Action blocks. Documented below. +* `bounce_action` - (Optional) A list of Bounce Action blocks. Documented below. +* `lambda_action` - (Optional) A list of Lambda Action blocks. Documented below. +* `s3_action` - (Optional) A list of S3 Action blocks. Documented below. +* `sns_action` - (Optional) A list of SNS Action blocks. Documented below. +* `stop_action` - (Optional) A list of Stop Action blocks. Documented below. +* `workmail_action` - (Optional) A list of WorkMail Action blocks. Documented below. + +Add header actions support the following: + +* `header_name` - (Required) The name of the header to add +* `header_value` - (Required) The value of the header to add +* `position` - (Required) The position of the action in the receipt rule + +Bounce actions support the following: + +* `message` - (Required) The message to send +* `sender` - (Required) The email address of the sender +* `smtp_reply_code` - (Required) The RFC 5321 SMTP reply code +* `status_code` - (Optional) The RFC 3463 SMTP enhanced status code +* `topic_arn` - (Optional) The ARN of an SNS topic to notify +* `position` - (Required) The position of the action in the receipt rule + +Lambda actions support the following: + +* `function_arn` - (Required) The ARN of the Lambda function to invoke +* `invocation_type` - (Optional) Event or RequestResponse +* `topic_arn` - (Optional) The ARN of an SNS topic to notify +* `position` - (Required) The position of the action in the receipt rule + +S3 actions support the following: + +* `bucket_name` - (Required) The name of the S3 bucket +* `kms_key_arn` - (Optional) The ARN of the KMS key +* `object_key_prefix` - (Optional) The key prefix of the S3 bucket +* `topic_arn` - (Optional) The ARN of an SNS topic to notify +* `position` - (Required) The position of the action in the receipt rule + +SNS actions support the following: + +* `topic_arn` - (Required) The ARN of an SNS topic to notify +* `position` - (Required) The position of the action in the receipt rule + +Stop actions support the following: + +* `scope` - (Required) The scope to apply +* `topic_arn` - (Optional) The ARN of an SNS topic to notify +* `position` - (Required) The position of the action in the receipt rule + +WorkMail actions support the following: + +* `organization_arn` - (Required) The ARN of the WorkMail organization +* `topic_arn` - (Optional) The ARN of an SNS topic to notify +* `position` - (Required) The position of the action in the receipt rule diff --git a/website/source/docs/providers/aws/r/ses_receipt_rule_set.html.markdown b/website/source/docs/providers/aws/r/ses_receipt_rule_set.html.markdown new file mode 100644 index 000000000..d353bbd3e --- /dev/null +++ b/website/source/docs/providers/aws/r/ses_receipt_rule_set.html.markdown @@ -0,0 +1,25 @@ +--- +layout: "aws" +page_title: "AWS: ses_receipt_rule_set" +sidebar_current: "docs-aws-resource-ses-receipt-rule-set" +description: |- + Provides an SES receipt rule set resource +--- + +# aws\_ses\_active_receipt_rule_set + +Provides an SES receipt rule set resource + +## Example Usage + +``` +resource "aws_ses_receipt_rule_set" "main" { + rule_set_name = "primary-rules" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `rule_set_name` - (Required) The name of the rule set diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb index 1aac1417f..be7f97501 100644 --- a/website/source/layouts/aws.erb +++ b/website/source/layouts/aws.erb @@ -704,6 +704,30 @@ + > + SES Resources + + + + > SNS Resources From ecadf103ccf38bbefe960059b57ddef8381ded83 Mon Sep 17 00:00:00 2001 From: "Ernest W. Durbin III" Date: Thu, 30 Jun 2016 09:28:24 -0400 Subject: [PATCH 0132/1238] implement flexible resources for ELB Policies allows load balancer policies and their assignment to backend servers or listeners to be configured independently. this gives flexibility to configure additional policies on aws elastic load balancers aside from the already provided "convenience" wrappers for cookie stickiness --- builtin/providers/aws/provider.go | 3 + ...elb_load_balancer_backend_server_policy.go | 139 +++++++ ...oad_balancer_backend_server_policy_test.go | 388 ++++++++++++++++++ ...e_aws_elb_load_balancer_listener_policy.go | 139 +++++++ ..._elb_load_balancer_listener_policy_test.go | 233 +++++++++++ .../resource_aws_elb_load_balancer_policy.go | 350 ++++++++++++++++ ...ource_aws_elb_load_balancer_policy_test.go | 240 +++++++++++ ...lancer_backend_server_policy.html.markdown | 85 ++++ ...oad_balancer_listener_policy.html.markdown | 73 ++++ .../r/elb_load_balancer_policy.html.markdown | 108 +++++ 10 files changed, 1758 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_elb_load_balancer_backend_server_policy.go create mode 100644 builtin/providers/aws/resource_aws_elb_load_balancer_backend_server_policy_test.go create mode 100644 builtin/providers/aws/resource_aws_elb_load_balancer_listener_policy.go create mode 100644 builtin/providers/aws/resource_aws_elb_load_balancer_listener_policy_test.go create mode 100644 builtin/providers/aws/resource_aws_elb_load_balancer_policy.go create mode 100644 builtin/providers/aws/resource_aws_elb_load_balancer_policy_test.go create mode 100644 website/source/docs/providers/aws/r/elb_load_balancer_backend_server_policy.html.markdown create mode 100644 website/source/docs/providers/aws/r/elb_load_balancer_listener_policy.html.markdown create mode 100644 website/source/docs/providers/aws/r/elb_load_balancer_policy.html.markdown diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index 48d7c4077..ce141b005 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -215,6 +215,9 @@ func Provider() terraform.ResourceProvider { "aws_lambda_permission": resourceAwsLambdaPermission(), "aws_launch_configuration": resourceAwsLaunchConfiguration(), "aws_lb_cookie_stickiness_policy": resourceAwsLBCookieStickinessPolicy(), + "aws_elb_load_balancer_policy": resourceAwsLoadBalancerPolicy(), + "aws_elb_load_balancer_backend_server_policy": resourceAwsLoadBalancerBackendServerPolicies(), + "aws_elb_load_balancer_listener_policy": resourceAwsLoadBalancerListenerPolicies(), "aws_main_route_table_association": resourceAwsMainRouteTableAssociation(), "aws_nat_gateway": resourceAwsNatGateway(), "aws_network_acl": resourceAwsNetworkAcl(), diff --git a/builtin/providers/aws/resource_aws_elb_load_balancer_backend_server_policy.go b/builtin/providers/aws/resource_aws_elb_load_balancer_backend_server_policy.go new file mode 100644 index 000000000..0bdd85f94 --- /dev/null +++ b/builtin/providers/aws/resource_aws_elb_load_balancer_backend_server_policy.go @@ -0,0 +1,139 @@ +package aws + +import ( + "fmt" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/elb" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsLoadBalancerBackendServerPolicies() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsLoadBalancerBackendServerPoliciesCreate, + Read: resourceAwsLoadBalancerBackendServerPoliciesRead, + Update: resourceAwsLoadBalancerBackendServerPoliciesCreate, + Delete: resourceAwsLoadBalancerBackendServerPoliciesDelete, + + Schema: map[string]*schema.Schema{ + "load_balancer_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "policy_names": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Set: schema.HashString, + }, + + "instance_port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + }, + } +} + +func resourceAwsLoadBalancerBackendServerPoliciesCreate(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + loadBalancerName := d.Get("load_balancer_name") + + policyNames := []*string{} + if v, ok := d.GetOk("policy_names"); ok { + policyNames = expandStringList(v.(*schema.Set).List()) + } + + setOpts := &elb.SetLoadBalancerPoliciesForBackendServerInput{ + LoadBalancerName: aws.String(loadBalancerName.(string)), + InstancePort: aws.Int64(int64(d.Get("instance_port").(int))), + PolicyNames: policyNames, + } + + if _, err := elbconn.SetLoadBalancerPoliciesForBackendServer(setOpts); err != nil { + return fmt.Errorf("Error setting LoadBalancerPoliciesForBackendServer: %s", err) + } + + d.SetId(fmt.Sprintf("%s:%s", *setOpts.LoadBalancerName, strconv.FormatInt(*setOpts.InstancePort, 10))) + d.Set("load_balancer_name", setOpts.LoadBalancerName) + d.Set("instance_port", setOpts.InstancePort) + d.Set("policy_names", flattenStringList(setOpts.PolicyNames)) + return nil +} + +func resourceAwsLoadBalancerBackendServerPoliciesRead(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + loadBalancerName, instancePort := resourceAwsLoadBalancerBackendServerPoliciesParseId(d.Id()) + + describeElbOpts := &elb.DescribeLoadBalancersInput{ + LoadBalancerNames: []*string{aws.String(loadBalancerName)}, + } + + describeResp, err := elbconn.DescribeLoadBalancers(describeElbOpts) + + if err != nil { + if ec2err, ok := err.(awserr.Error); ok { + if ec2err.Code() == "LoadBalancerNotFound" { + return fmt.Errorf("LoadBalancerNotFound: %s", err) + } + } + return fmt.Errorf("Error retrieving ELB description: %s", err) + } + + if len(describeResp.LoadBalancerDescriptions) != 1 { + return fmt.Errorf("Unable to find ELB: %#v", describeResp.LoadBalancerDescriptions) + } + + lb := describeResp.LoadBalancerDescriptions[0] + + policyNames := []*string{} + for _, backendServer := range lb.BackendServerDescriptions { + if instancePort != strconv.Itoa(int(*backendServer.InstancePort)) { + continue + } + + for _, name := range backendServer.PolicyNames { + policyNames = append(policyNames, name) + } + } + + d.Set("load_balancer_name", loadBalancerName) + d.Set("instance_port", instancePort) + d.Set("policy_names", flattenStringList(policyNames)) + + return nil +} + +func resourceAwsLoadBalancerBackendServerPoliciesDelete(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + loadBalancerName, instancePort := resourceAwsLoadBalancerBackendServerPoliciesParseId(d.Id()) + + instancePortInt, err := strconv.ParseInt(instancePort, 10, 64) + if err != nil { + return fmt.Errorf("Error parsing instancePort as integer: %s", err) + } + + setOpts := &elb.SetLoadBalancerPoliciesForBackendServerInput{ + LoadBalancerName: aws.String(loadBalancerName), + InstancePort: aws.Int64(instancePortInt), + PolicyNames: []*string{}, + } + + if _, err := elbconn.SetLoadBalancerPoliciesForBackendServer(setOpts); err != nil { + return fmt.Errorf("Error setting LoadBalancerPoliciesForBackendServer: %s", err) + } + + return nil +} + +func resourceAwsLoadBalancerBackendServerPoliciesParseId(id string) (string, string) { + parts := strings.SplitN(id, ":", 2) + return parts[0], parts[1] +} diff --git a/builtin/providers/aws/resource_aws_elb_load_balancer_backend_server_policy_test.go b/builtin/providers/aws/resource_aws_elb_load_balancer_backend_server_policy_test.go new file mode 100644 index 000000000..0bb3f6934 --- /dev/null +++ b/builtin/providers/aws/resource_aws_elb_load_balancer_backend_server_policy_test.go @@ -0,0 +1,388 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/elb" + + tlsprovider "github.com/hashicorp/terraform/builtin/providers/tls" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSLoadBalancerBackendServerPolicy_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: map[string]terraform.ResourceProvider{ + "aws": testAccProvider, + "tls": tlsprovider.Provider(), + }, + CheckDestroy: testAccCheckAWSLoadBalancerBackendServerPolicyDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSLoadBalancerBackendServerPolicyConfig_basic0, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_elb_load_balancer_policy.test-pubkey-policy0"), + testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_elb_load_balancer_policy.test-backend-auth-policy0"), + testAccCheckAWSLoadBalancerBackendServerPolicyState("test-aws-elb-policies-lb", "test-backend-auth-policy0", true), + ), + }, + resource.TestStep{ + Config: testAccAWSLoadBalancerBackendServerPolicyConfig_basic1, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_elb_load_balancer_policy.test-pubkey-policy0"), + testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_elb_load_balancer_policy.test-pubkey-policy1"), + testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_elb_load_balancer_policy.test-backend-auth-policy0"), + testAccCheckAWSLoadBalancerBackendServerPolicyState("test-aws-elb-policies-lb", "test-backend-auth-policy0", true), + ), + }, + resource.TestStep{ + Config: testAccAWSLoadBalancerBackendServerPolicyConfig_basic2, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSLoadBalancerBackendServerPolicyState("test-aws-elb-policies-lb", "test-backend-auth-policy0", false), + ), + }, + }, + }) +} + +func policyInBackendServerPolicies(str string, list []string) bool { + for _, v := range list { + if v == str { + return true + } + } + return false +} + +func testAccCheckAWSLoadBalancerBackendServerPolicyDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).elbconn + + for _, rs := range s.RootModule().Resources { + switch { + case rs.Type == "aws_elb_load_balancer_policy": + loadBalancerName, policyName := resourceAwsLoadBalancerBackendServerPoliciesParseId(rs.Primary.ID) + out, err := conn.DescribeLoadBalancerPolicies( + &elb.DescribeLoadBalancerPoliciesInput{ + LoadBalancerName: aws.String(loadBalancerName), + PolicyNames: []*string{aws.String(policyName)}, + }) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && (ec2err.Code() == "PolicyNotFound" || ec2err.Code() == "LoadBalancerNotFound") { + continue + } + return err + } + if len(out.PolicyDescriptions) > 0 { + return fmt.Errorf("Policy still exists") + } + case rs.Type == "aws_elb_load_balancer_backend_policy": + loadBalancerName, policyName := resourceAwsLoadBalancerBackendServerPoliciesParseId(rs.Primary.ID) + out, err := conn.DescribeLoadBalancers( + &elb.DescribeLoadBalancersInput{ + LoadBalancerNames: []*string{aws.String(loadBalancerName)}, + }) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && (ec2err.Code() == "LoadBalancerNotFound") { + continue + } + return err + } + for _, backendServer := range out.LoadBalancerDescriptions[0].BackendServerDescriptions { + policyStrings := []string{} + for _, pol := range backendServer.PolicyNames { + policyStrings = append(policyStrings, *pol) + } + if policyInBackendServerPolicies(policyName, policyStrings) { + return fmt.Errorf("Policy still exists and is assigned") + } + } + default: + continue + } + } + return nil +} + +func testAccCheckAWSLoadBalancerBackendServerPolicyState(loadBalancerName string, loadBalancerBackendAuthPolicyName string, assigned bool) resource.TestCheckFunc { + return func(s *terraform.State) error { + elbconn := testAccProvider.Meta().(*AWSClient).elbconn + + loadBalancerDescription, err := elbconn.DescribeLoadBalancers(&elb.DescribeLoadBalancersInput{ + LoadBalancerNames: []*string{aws.String(loadBalancerName)}, + }) + if err != nil { + return err + } + + for _, backendServer := range loadBalancerDescription.LoadBalancerDescriptions[0].BackendServerDescriptions { + policyStrings := []string{} + for _, pol := range backendServer.PolicyNames { + policyStrings = append(policyStrings, *pol) + } + if policyInBackendServerPolicies(loadBalancerBackendAuthPolicyName, policyStrings) != assigned { + if assigned { + return fmt.Errorf("Policy no longer assigned %s not in %+v", loadBalancerBackendAuthPolicyName, policyStrings) + } else { + return fmt.Errorf("Policy exists and is assigned") + } + } + } + + return nil + } +} + +const testAccAWSLoadBalancerBackendServerPolicyConfig_basic0 = ` +resource "tls_private_key" "example0" { + algorithm = "RSA" +} + +resource "tls_self_signed_cert" "test-cert0" { + key_algorithm = "RSA" + private_key_pem = "${tls_private_key.example0.private_key_pem}" + + subject { + common_name = "example.com" + organization = "ACME Examples, Inc" + } + + validity_period_hours = 12 + + allowed_uses = [ + "key_encipherment", + "digital_signature", + "server_auth", + ] +} + +resource "aws_iam_server_certificate" "test-iam-cert0" { + name_prefix = "test_cert_" + certificate_body = "${tls_self_signed_cert.test-cert0.cert_pem}" + private_key = "${tls_private_key.example0.private_key_pem}" +} + +resource "aws_elb" "test-lb" { + name = "test-aws-elb-policies-lb" + availability_zones = ["us-east-1a"] + + listener { + instance_port = 443 + instance_protocol = "https" + lb_port = 443 + lb_protocol = "https" + ssl_certificate_id = "${aws_iam_server_certificate.test-iam-cert0.arn}" + } + + tags { + Name = "tf-acc-test" + } +} + +resource "aws_elb_load_balancer_policy" "test-pubkey-policy0" { + load_balancer_name = "${aws_elb.test-lb.name}" + policy_name = "test-pubkey-policy0" + policy_type_name = "PublicKeyPolicyType" + policy_attribute = { + name = "PublicKey" + value = "${replace(replace(replace(tls_private_key.example0.public_key_pem, "\n", ""), "-----BEGIN PUBLIC KEY-----", ""), "-----END PUBLIC KEY-----", "")}" + } +} + +resource "aws_elb_load_balancer_policy" "test-backend-auth-policy0" { + load_balancer_name = "${aws_elb.test-lb.name}" + policy_name = "test-backend-auth-policy0" + policy_type_name = "BackendServerAuthenticationPolicyType" + policy_attribute = { + name = "PublicKeyPolicyName" + value = "${aws_elb_load_balancer_policy.test-pubkey-policy0.policy_name}" + } +} + +resource "aws_elb_load_balancer_backend_server_policy" "test-backend-auth-policies-443" { + load_balancer_name = "${aws_elb.test-lb.name}" + instance_port = 443 + policy_names = [ + "${aws_elb_load_balancer_policy.test-backend-auth-policy0.policy_name}" + ] +} +` + +const testAccAWSLoadBalancerBackendServerPolicyConfig_basic1 = ` +resource "tls_private_key" "example0" { + algorithm = "RSA" +} + +resource "tls_self_signed_cert" "test-cert0" { + key_algorithm = "RSA" + private_key_pem = "${tls_private_key.example0.private_key_pem}" + + subject { + common_name = "example.com" + organization = "ACME Examples, Inc" + } + + validity_period_hours = 12 + + allowed_uses = [ + "key_encipherment", + "digital_signature", + "server_auth", + ] +} + +resource "tls_private_key" "example1" { + algorithm = "RSA" +} + +resource "tls_self_signed_cert" "test-cert1" { + key_algorithm = "RSA" + private_key_pem = "${tls_private_key.example1.private_key_pem}" + + subject { + common_name = "example.com" + organization = "ACME Examples, Inc" + } + + validity_period_hours = 12 + + allowed_uses = [ + "key_encipherment", + "digital_signature", + "server_auth", + ] +} + +resource "aws_iam_server_certificate" "test-iam-cert0" { + name_prefix = "test_cert_" + certificate_body = "${tls_self_signed_cert.test-cert0.cert_pem}" + private_key = "${tls_private_key.example0.private_key_pem}" +} + +resource "aws_elb" "test-lb" { + name = "test-aws-elb-policies-lb" + availability_zones = ["us-east-1a"] + + listener { + instance_port = 443 + instance_protocol = "https" + lb_port = 443 + lb_protocol = "https" + ssl_certificate_id = "${aws_iam_server_certificate.test-iam-cert0.arn}" + } + + tags { + Name = "tf-acc-test" + } +} + +resource "aws_elb_load_balancer_policy" "test-pubkey-policy0" { + load_balancer_name = "${aws_elb.test-lb.name}" + policy_name = "test-pubkey-policy0" + policy_type_name = "PublicKeyPolicyType" + policy_attribute = { + name = "PublicKey" + value = "${replace(replace(replace(tls_private_key.example0.public_key_pem, "\n", ""), "-----BEGIN PUBLIC KEY-----", ""), "-----END PUBLIC KEY-----", "")}" + } +} + +resource "aws_elb_load_balancer_policy" "test-pubkey-policy1" { + load_balancer_name = "${aws_elb.test-lb.name}" + policy_name = "test-pubkey-policy1" + policy_type_name = "PublicKeyPolicyType" + policy_attribute = { + name = "PublicKey" + value = "${replace(replace(replace(tls_private_key.example1.public_key_pem, "\n", ""), "-----BEGIN PUBLIC KEY-----", ""), "-----END PUBLIC KEY-----", "")}" + } +} + +resource "aws_elb_load_balancer_policy" "test-backend-auth-policy0" { + load_balancer_name = "${aws_elb.test-lb.name}" + policy_name = "test-backend-auth-policy0" + policy_type_name = "BackendServerAuthenticationPolicyType" + policy_attribute = { + name = "PublicKeyPolicyName" + value = "${aws_elb_load_balancer_policy.test-pubkey-policy1.policy_name}" + } +} + +resource "aws_elb_load_balancer_backend_server_policy" "test-backend-auth-policies-443" { + load_balancer_name = "${aws_elb.test-lb.name}" + instance_port = 443 + policy_names = [ + "${aws_elb_load_balancer_policy.test-backend-auth-policy0.policy_name}" + ] +} +` + +const testAccAWSLoadBalancerBackendServerPolicyConfig_basic2 = ` +resource "tls_private_key" "example0" { + algorithm = "RSA" +} + +resource "tls_self_signed_cert" "test-cert0" { + key_algorithm = "RSA" + private_key_pem = "${tls_private_key.example0.private_key_pem}" + + subject { + common_name = "example.com" + organization = "ACME Examples, Inc" + } + + validity_period_hours = 12 + + allowed_uses = [ + "key_encipherment", + "digital_signature", + "server_auth", + ] +} + +resource "tls_private_key" "example1" { + algorithm = "RSA" +} + +resource "tls_self_signed_cert" "test-cert1" { + key_algorithm = "RSA" + private_key_pem = "${tls_private_key.example1.private_key_pem}" + + subject { + common_name = "example.com" + organization = "ACME Examples, Inc" + } + + validity_period_hours = 12 + + allowed_uses = [ + "key_encipherment", + "digital_signature", + "server_auth", + ] +} + +resource "aws_iam_server_certificate" "test-iam-cert0" { + name_prefix = "test_cert_" + certificate_body = "${tls_self_signed_cert.test-cert0.cert_pem}" + private_key = "${tls_private_key.example0.private_key_pem}" +} + +resource "aws_elb" "test-lb" { + name = "test-aws-elb-policies-lb" + availability_zones = ["us-east-1a"] + + listener { + instance_port = 443 + instance_protocol = "https" + lb_port = 443 + lb_protocol = "https" + ssl_certificate_id = "${aws_iam_server_certificate.test-iam-cert0.arn}" + } + + tags { + Name = "tf-acc-test" + } +} +` diff --git a/builtin/providers/aws/resource_aws_elb_load_balancer_listener_policy.go b/builtin/providers/aws/resource_aws_elb_load_balancer_listener_policy.go new file mode 100644 index 000000000..494b8bd98 --- /dev/null +++ b/builtin/providers/aws/resource_aws_elb_load_balancer_listener_policy.go @@ -0,0 +1,139 @@ +package aws + +import ( + "fmt" + "strconv" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/elb" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsLoadBalancerListenerPolicies() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsLoadBalancerListenerPoliciesCreate, + Read: resourceAwsLoadBalancerListenerPoliciesRead, + Update: resourceAwsLoadBalancerListenerPoliciesCreate, + Delete: resourceAwsLoadBalancerListenerPoliciesDelete, + + Schema: map[string]*schema.Schema{ + "load_balancer_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "policy_names": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + Set: schema.HashString, + }, + + "load_balancer_port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + }, + } +} + +func resourceAwsLoadBalancerListenerPoliciesCreate(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + loadBalancerName := d.Get("load_balancer_name") + + policyNames := []*string{} + if v, ok := d.GetOk("policy_names"); ok { + policyNames = expandStringList(v.(*schema.Set).List()) + } + + setOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ + LoadBalancerName: aws.String(loadBalancerName.(string)), + LoadBalancerPort: aws.Int64(int64(d.Get("load_balancer_port").(int))), + PolicyNames: policyNames, + } + + if _, err := elbconn.SetLoadBalancerPoliciesOfListener(setOpts); err != nil { + return fmt.Errorf("Error setting LoadBalancerPoliciesOfListener: %s", err) + } + + d.SetId(fmt.Sprintf("%s:%s", *setOpts.LoadBalancerName, strconv.FormatInt(*setOpts.LoadBalancerPort, 10))) + d.Set("load_balancer_name", setOpts.LoadBalancerName) + d.Set("load_balancer_port", setOpts.LoadBalancerPort) + d.Set("policy_names", flattenStringList(setOpts.PolicyNames)) + return nil +} + +func resourceAwsLoadBalancerListenerPoliciesRead(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + loadBalancerName, loadBalancerPort := resourceAwsLoadBalancerListenerPoliciesParseId(d.Id()) + + describeElbOpts := &elb.DescribeLoadBalancersInput{ + LoadBalancerNames: []*string{aws.String(loadBalancerName)}, + } + + describeResp, err := elbconn.DescribeLoadBalancers(describeElbOpts) + + if err != nil { + if ec2err, ok := err.(awserr.Error); ok { + if ec2err.Code() == "LoadBalancerNotFound" { + return fmt.Errorf("LoadBalancerNotFound: %s", err) + } + } + return fmt.Errorf("Error retrieving ELB description: %s", err) + } + + if len(describeResp.LoadBalancerDescriptions) != 1 { + return fmt.Errorf("Unable to find ELB: %#v", describeResp.LoadBalancerDescriptions) + } + + lb := describeResp.LoadBalancerDescriptions[0] + + policyNames := []*string{} + for _, listener := range lb.ListenerDescriptions { + if loadBalancerPort != strconv.Itoa(int(*listener.Listener.LoadBalancerPort)) { + continue + } + + for _, name := range listener.PolicyNames { + policyNames = append(policyNames, name) + } + } + + d.Set("load_balancer_name", loadBalancerName) + d.Set("load_balancer_port", loadBalancerPort) + d.Set("policy_names", flattenStringList(policyNames)) + + return nil +} + +func resourceAwsLoadBalancerListenerPoliciesDelete(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + loadBalancerName, loadBalancerPort := resourceAwsLoadBalancerListenerPoliciesParseId(d.Id()) + + loadBalancerPortInt, err := strconv.ParseInt(loadBalancerPort, 10, 64) + if err != nil { + return fmt.Errorf("Error parsing loadBalancerPort as integer: %s", err) + } + + setOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ + LoadBalancerName: aws.String(loadBalancerName), + LoadBalancerPort: aws.Int64(loadBalancerPortInt), + PolicyNames: []*string{}, + } + + if _, err := elbconn.SetLoadBalancerPoliciesOfListener(setOpts); err != nil { + return fmt.Errorf("Error setting LoadBalancerPoliciesOfListener: %s", err) + } + + return nil +} + +func resourceAwsLoadBalancerListenerPoliciesParseId(id string) (string, string) { + parts := strings.SplitN(id, ":", 2) + return parts[0], parts[1] +} diff --git a/builtin/providers/aws/resource_aws_elb_load_balancer_listener_policy_test.go b/builtin/providers/aws/resource_aws_elb_load_balancer_listener_policy_test.go new file mode 100644 index 000000000..4bc7ac17f --- /dev/null +++ b/builtin/providers/aws/resource_aws_elb_load_balancer_listener_policy_test.go @@ -0,0 +1,233 @@ +package aws + +import ( + "fmt" + "strings" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/elb" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSLoadBalancerListenerPolicy_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLoadBalancerListenerPolicyDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSLoadBalancerListenerPolicyConfig_basic0, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_elb_load_balancer_policy.magic-cookie-sticky"), + testAccCheckAWSLoadBalancerListenerPolicyState("test-aws-elb-policies-lb", int64(80), "magic-cookie-sticky-policy", true), + ), + }, + resource.TestStep{ + Config: testAccAWSLoadBalancerListenerPolicyConfig_basic1, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_elb_load_balancer_policy.magic-cookie-sticky"), + testAccCheckAWSLoadBalancerListenerPolicyState("test-aws-elb-policies-lb", int64(80), "magic-cookie-sticky-policy", true), + ), + }, + resource.TestStep{ + Config: testAccAWSLoadBalancerListenerPolicyConfig_basic2, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSLoadBalancerListenerPolicyState("test-aws-elb-policies-lb", int64(80), "magic-cookie-sticky-policy", false), + ), + }, + }, + }) +} + +func policyInListenerPolicies(str string, list []string) bool { + for _, v := range list { + if v == str { + return true + } + } + return false +} + +func testAccCheckAWSLoadBalancerListenerPolicyDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).elbconn + + for _, rs := range s.RootModule().Resources { + switch { + case rs.Type == "aws_elb_load_balancer_policy": + loadBalancerName, policyName := resourceAwsLoadBalancerListenerPoliciesParseId(rs.Primary.ID) + out, err := conn.DescribeLoadBalancerPolicies( + &elb.DescribeLoadBalancerPoliciesInput{ + LoadBalancerName: aws.String(loadBalancerName), + PolicyNames: []*string{aws.String(policyName)}, + }) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && (ec2err.Code() == "PolicyNotFound" || ec2err.Code() == "LoadBalancerNotFound") { + continue + } + return err + } + if len(out.PolicyDescriptions) > 0 { + return fmt.Errorf("Policy still exists") + } + case rs.Type == "aws_elb_load_listener_policy": + loadBalancerName, _ := resourceAwsLoadBalancerListenerPoliciesParseId(rs.Primary.ID) + out, err := conn.DescribeLoadBalancers( + &elb.DescribeLoadBalancersInput{ + LoadBalancerNames: []*string{aws.String(loadBalancerName)}, + }) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && (ec2err.Code() == "LoadBalancerNotFound") { + continue + } + return err + } + policyNames := []string{} + for k, _ := range rs.Primary.Attributes { + if strings.HasPrefix(k, "policy_names.") && strings.HasSuffix(k, ".name") { + value_key := fmt.Sprintf("%s.value", strings.TrimSuffix(k, ".name")) + policyNames = append(policyNames, rs.Primary.Attributes[value_key]) + } + } + for _, policyName := range policyNames { + for _, listener := range out.LoadBalancerDescriptions[0].ListenerDescriptions { + policyStrings := []string{} + for _, pol := range listener.PolicyNames { + policyStrings = append(policyStrings, *pol) + } + if policyInListenerPolicies(policyName, policyStrings) { + return fmt.Errorf("Policy still exists and is assigned") + } + } + } + default: + continue + } + } + return nil +} + +func testAccCheckAWSLoadBalancerListenerPolicyState(loadBalancerName string, loadBalancerListenerPort int64, loadBalancerListenerPolicyName string, assigned bool) resource.TestCheckFunc { + return func(s *terraform.State) error { + elbconn := testAccProvider.Meta().(*AWSClient).elbconn + + loadBalancerDescription, err := elbconn.DescribeLoadBalancers(&elb.DescribeLoadBalancersInput{ + LoadBalancerNames: []*string{aws.String(loadBalancerName)}, + }) + if err != nil { + return err + } + + for _, listener := range loadBalancerDescription.LoadBalancerDescriptions[0].ListenerDescriptions { + if *listener.Listener.LoadBalancerPort != loadBalancerListenerPort { + continue + } + policyStrings := []string{} + for _, pol := range listener.PolicyNames { + policyStrings = append(policyStrings, *pol) + } + if policyInListenerPolicies(loadBalancerListenerPolicyName, policyStrings) != assigned { + if assigned { + return fmt.Errorf("Policy no longer assigned %s not in %+v", loadBalancerListenerPolicyName, policyStrings) + } else { + return fmt.Errorf("Policy exists and is assigned") + } + } + } + + return nil + } +} + +const testAccAWSLoadBalancerListenerPolicyConfig_basic0 = ` +resource "aws_elb" "test-lb" { + name = "test-aws-elb-policies-lb" + availability_zones = ["us-east-1a"] + + listener { + instance_port = 80 + instance_protocol = "http" + lb_port = 80 + lb_protocol = "http" + } + + tags { + Name = "tf-acc-test" + } +} + +resource "aws_elb_load_balancer_policy" "magic-cookie-sticky" { + load_balancer_name = "${aws_elb.test-lb.name}" + policy_name = "magic-cookie-sticky-policy" + policy_type_name = "AppCookieStickinessPolicyType" + policy_attribute = { + name = "CookieName" + value = "magic_cookie" + } +} + +resource "aws_elb_load_balancer_listener_policy" "test-lb-listener-policies-80" { + load_balancer_name = "${aws_elb.test-lb.name}" + load_balancer_port = 80 + policy_names = [ + "${aws_elb_load_balancer_policy.magic-cookie-sticky.policy_name}", + ] +} +` + +const testAccAWSLoadBalancerListenerPolicyConfig_basic1 = ` +resource "aws_elb" "test-lb" { + name = "test-aws-elb-policies-lb" + availability_zones = ["us-east-1a"] + + listener { + instance_port = 80 + instance_protocol = "http" + lb_port = 80 + lb_protocol = "http" + } + + tags { + Name = "tf-acc-test" + } +} + +resource "aws_elb_load_balancer_policy" "magic-cookie-sticky" { + load_balancer_name = "${aws_elb.test-lb.name}" + policy_name = "magic-cookie-sticky-policy" + policy_type_name = "AppCookieStickinessPolicyType" + policy_attribute = { + name = "CookieName" + value = "unicorn_cookie" + } +} + +resource "aws_elb_load_balancer_listener_policy" "test-lb-listener-policies-80" { + load_balancer_name = "${aws_elb.test-lb.name}" + load_balancer_port = 80 + policy_names = [ + "${aws_elb_load_balancer_policy.magic-cookie-sticky.policy_name}" + ] +} +` + +const testAccAWSLoadBalancerListenerPolicyConfig_basic2 = ` +resource "aws_elb" "test-lb" { + name = "test-aws-elb-policies-lb" + availability_zones = ["us-east-1a"] + + listener { + instance_port = 80 + instance_protocol = "http" + lb_port = 80 + lb_protocol = "http" + } + + tags { + Name = "tf-acc-test" + } +} +` diff --git a/builtin/providers/aws/resource_aws_elb_load_balancer_policy.go b/builtin/providers/aws/resource_aws_elb_load_balancer_policy.go new file mode 100644 index 000000000..1d1219c80 --- /dev/null +++ b/builtin/providers/aws/resource_aws_elb_load_balancer_policy.go @@ -0,0 +1,350 @@ +package aws + +import ( + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/elb" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsLoadBalancerPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsLoadBalancerPolicyCreate, + Read: resourceAwsLoadBalancerPolicyRead, + Update: resourceAwsLoadBalancerPolicyUpdate, + Delete: resourceAwsLoadBalancerPolicyDelete, + + Schema: map[string]*schema.Schema{ + "load_balancer_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "policy_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "policy_type_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "policy_attribute": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + + "value": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + }, + } +} + +func resourceAwsLoadBalancerPolicyCreate(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + attributes := []*elb.PolicyAttribute{} + if attributedata, ok := d.GetOk("policy_attribute"); ok { + attributeSet := attributedata.(*schema.Set).List() + for _, attribute := range attributeSet { + data := attribute.(map[string]interface{}) + attributes = append(attributes, &elb.PolicyAttribute{ + AttributeName: aws.String(data["name"].(string)), + AttributeValue: aws.String(data["value"].(string)), + }) + } + } + + lbspOpts := &elb.CreateLoadBalancerPolicyInput{ + LoadBalancerName: aws.String(d.Get("load_balancer_name").(string)), + PolicyName: aws.String(d.Get("policy_name").(string)), + PolicyTypeName: aws.String(d.Get("policy_type_name").(string)), + PolicyAttributes: attributes, + } + + if _, err := elbconn.CreateLoadBalancerPolicy(lbspOpts); err != nil { + return fmt.Errorf("Error creating LoadBalancerPolicy: %s", err) + } + + d.SetId(fmt.Sprintf("%s:%s", + *lbspOpts.LoadBalancerName, + *lbspOpts.PolicyName)) + return nil +} + +func resourceAwsLoadBalancerPolicyRead(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + loadBalancerName, policyName := resourceAwsLoadBalancerPolicyParseId(d.Id()) + + request := &elb.DescribeLoadBalancerPoliciesInput{ + LoadBalancerName: aws.String(loadBalancerName), + PolicyNames: []*string{aws.String(policyName)}, + } + + getResp, err := elbconn.DescribeLoadBalancerPolicies(request) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "PolicyNotFound" { + d.SetId("") + return nil + } + return fmt.Errorf("Error retrieving policy: %s", err) + } + + if len(getResp.PolicyDescriptions) != 1 { + return fmt.Errorf("Unable to find policy %#v", getResp.PolicyDescriptions) + } + + policyDesc := getResp.PolicyDescriptions[0] + policyTypeName := policyDesc.PolicyTypeName + policyAttributes := policyDesc.PolicyAttributeDescriptions + + attributes := []map[string]string{} + for _, a := range policyAttributes { + pair := make(map[string]string) + pair["name"] = *a.AttributeName + pair["value"] = *a.AttributeValue + if (*policyTypeName == "SSLNegotiationPolicyType") && (*a.AttributeValue == "false") { + continue + } + attributes = append(attributes, pair) + } + + d.Set("policy_name", policyName) + d.Set("policy_type_name", policyTypeName) + d.Set("load_balancer_name", loadBalancerName) + d.Set("policy_attribute", attributes) + + return nil +} + +func resourceAwsLoadBalancerPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + reassignments := Reassignment{} + + loadBalancerName, policyName := resourceAwsLoadBalancerPolicyParseId(d.Id()) + + assigned, err := resourceAwsLoadBalancerPolicyAssigned(policyName, loadBalancerName, elbconn) + if err != nil { + return fmt.Errorf("Error determining assignment status of Load Balancer Policy %s: %s", policyName, err) + } + + if assigned { + reassignments, err = resourceAwsLoadBalancerPolicyUnassign(policyName, loadBalancerName, elbconn) + if err != nil { + return fmt.Errorf("Error unassigning Load Balancer Policy %s: %s", policyName, err) + } + } + + request := &elb.DeleteLoadBalancerPolicyInput{ + LoadBalancerName: aws.String(loadBalancerName), + PolicyName: aws.String(policyName), + } + + if _, err := elbconn.DeleteLoadBalancerPolicy(request); err != nil { + return fmt.Errorf("Error deleting Load Balancer Policy %s: %s", d.Id(), err) + } + + err = resourceAwsLoadBalancerPolicyCreate(d, meta) + + for _, listenerAssignment := range reassignments.listenerPolicies { + if _, err := elbconn.SetLoadBalancerPoliciesOfListener(listenerAssignment); err != nil { + return fmt.Errorf("Error setting LoadBalancerPoliciesOfListener: %s", err) + } + } + + for _, backendServerAssignment := range reassignments.backendServerPolicies { + if _, err := elbconn.SetLoadBalancerPoliciesForBackendServer(backendServerAssignment); err != nil { + return fmt.Errorf("Error setting LoadBalancerPoliciesForBackendServer: %s", err) + } + } + + return nil +} + +func resourceAwsLoadBalancerPolicyDelete(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + loadBalancerName, policyName := resourceAwsLoadBalancerPolicyParseId(d.Id()) + + assigned, err := resourceAwsLoadBalancerPolicyAssigned(policyName, loadBalancerName, elbconn) + if err != nil { + return fmt.Errorf("Error determining assignment status of Load Balancer Policy %s: %s", policyName, err) + } + + if assigned { + _, err := resourceAwsLoadBalancerPolicyUnassign(policyName, loadBalancerName, elbconn) + if err != nil { + return fmt.Errorf("Error unassigning Load Balancer Policy %s: %s", policyName, err) + } + } + + request := &elb.DeleteLoadBalancerPolicyInput{ + LoadBalancerName: aws.String(loadBalancerName), + PolicyName: aws.String(policyName), + } + + if _, err := elbconn.DeleteLoadBalancerPolicy(request); err != nil { + return fmt.Errorf("Error deleting Load Balancer Policy %s: %s", d.Id(), err) + } + return nil +} + +func resourceAwsLoadBalancerPolicyParseId(id string) (string, string) { + parts := strings.SplitN(id, ":", 2) + return parts[0], parts[1] +} + +func resourceAwsLoadBalancerPolicyAssigned(policyName, loadBalancerName string, elbconn *elb.ELB) (bool, error) { + describeElbOpts := &elb.DescribeLoadBalancersInput{ + LoadBalancerNames: []*string{aws.String(loadBalancerName)}, + } + + describeResp, err := elbconn.DescribeLoadBalancers(describeElbOpts) + + if err != nil { + if ec2err, ok := err.(awserr.Error); ok { + if ec2err.Code() == "LoadBalancerNotFound" { + return false, nil + } + } + return false, fmt.Errorf("Error retrieving ELB description: %s", err) + } + + if len(describeResp.LoadBalancerDescriptions) != 1 { + return false, fmt.Errorf("Unable to find ELB: %#v", describeResp.LoadBalancerDescriptions) + } + + lb := describeResp.LoadBalancerDescriptions[0] + assigned := false + for _, backendServer := range lb.BackendServerDescriptions { + for _, name := range backendServer.PolicyNames { + if policyName == *name { + assigned = true + break + } + } + } + + for _, listener := range lb.ListenerDescriptions { + for _, name := range listener.PolicyNames { + if policyName == *name { + assigned = true + break + } + } + } + + return assigned, nil +} + +type Reassignment struct { + backendServerPolicies []*elb.SetLoadBalancerPoliciesForBackendServerInput + listenerPolicies []*elb.SetLoadBalancerPoliciesOfListenerInput +} + +func resourceAwsLoadBalancerPolicyUnassign(policyName, loadBalancerName string, elbconn *elb.ELB) (Reassignment, error) { + reassignments := Reassignment{} + + describeElbOpts := &elb.DescribeLoadBalancersInput{ + LoadBalancerNames: []*string{aws.String(loadBalancerName)}, + } + + describeResp, err := elbconn.DescribeLoadBalancers(describeElbOpts) + + if err != nil { + if ec2err, ok := err.(awserr.Error); ok { + if ec2err.Code() == "LoadBalancerNotFound" { + return reassignments, nil + } + } + return reassignments, fmt.Errorf("Error retrieving ELB description: %s", err) + } + + if len(describeResp.LoadBalancerDescriptions) != 1 { + return reassignments, fmt.Errorf("Unable to find ELB: %#v", describeResp.LoadBalancerDescriptions) + } + + lb := describeResp.LoadBalancerDescriptions[0] + + for _, backendServer := range lb.BackendServerDescriptions { + policies := []*string{} + + for _, name := range backendServer.PolicyNames { + if policyName != *name { + policies = append(policies, name) + } + } + + if len(backendServer.PolicyNames) != len(policies) { + setOpts := &elb.SetLoadBalancerPoliciesForBackendServerInput{ + LoadBalancerName: aws.String(loadBalancerName), + InstancePort: aws.Int64(*backendServer.InstancePort), + PolicyNames: policies, + } + + reassignOpts := &elb.SetLoadBalancerPoliciesForBackendServerInput{ + LoadBalancerName: aws.String(loadBalancerName), + InstancePort: aws.Int64(*backendServer.InstancePort), + PolicyNames: backendServer.PolicyNames, + } + + reassignments.backendServerPolicies = append(reassignments.backendServerPolicies, reassignOpts) + + _, err = elbconn.SetLoadBalancerPoliciesForBackendServer(setOpts) + if err != nil { + return reassignments, fmt.Errorf("Error Setting Load Balancer Policies for Backend Server: %s", err) + } + } + } + + for _, listener := range lb.ListenerDescriptions { + policies := []*string{} + + for _, name := range listener.PolicyNames { + if policyName != *name { + policies = append(policies, name) + } + } + + if len(listener.PolicyNames) != len(policies) { + setOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ + LoadBalancerName: aws.String(loadBalancerName), + LoadBalancerPort: aws.Int64(*listener.Listener.LoadBalancerPort), + PolicyNames: policies, + } + + reassignOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ + LoadBalancerName: aws.String(loadBalancerName), + LoadBalancerPort: aws.Int64(*listener.Listener.LoadBalancerPort), + PolicyNames: listener.PolicyNames, + } + + reassignments.listenerPolicies = append(reassignments.listenerPolicies, reassignOpts) + + _, err = elbconn.SetLoadBalancerPoliciesOfListener(setOpts) + if err != nil { + return reassignments, fmt.Errorf("Error Setting Load Balancer Policies of Listener: %s", err) + } + } + } + + return reassignments, nil +} diff --git a/builtin/providers/aws/resource_aws_elb_load_balancer_policy_test.go b/builtin/providers/aws/resource_aws_elb_load_balancer_policy_test.go new file mode 100644 index 000000000..8ce166869 --- /dev/null +++ b/builtin/providers/aws/resource_aws_elb_load_balancer_policy_test.go @@ -0,0 +1,240 @@ +package aws + +import ( + "fmt" + "strconv" + "strings" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/elb" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSLoadBalancerPolicy_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLoadBalancerPolicyDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSLoadBalancerPolicyConfig_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_elb_load_balancer_policy.test-policy"), + ), + }, + }, + }) +} + +func TestAccAWSLoadBalancerPolicy_updateWhileAssigned(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLoadBalancerPolicyDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSLoadBalancerPolicyConfig_updateWhileAssigned0, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_elb_load_balancer_policy.test-policy"), + ), + }, + resource.TestStep{ + Config: testAccAWSLoadBalancerPolicyConfig_updateWhileAssigned1, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_elb_load_balancer_policy.test-policy"), + ), + }, + }, + }) +} + +func testAccCheckAWSLoadBalancerPolicyDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).elbconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_elb_load_balancer_policy" { + continue + } + + loadBalancerName, policyName := resourceAwsLoadBalancerPolicyParseId(rs.Primary.ID) + out, err := conn.DescribeLoadBalancerPolicies( + &elb.DescribeLoadBalancerPoliciesInput{ + LoadBalancerName: aws.String(loadBalancerName), + PolicyNames: []*string{aws.String(policyName)}, + }) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && (ec2err.Code() == "PolicyNotFound" || ec2err.Code() == "LoadBalancerNotFound") { + continue + } + return err + } + + if len(out.PolicyDescriptions) > 0 { + return fmt.Errorf("Policy still exists") + } + } + return nil +} + +func testAccCheckAWSLoadBalancerPolicyState(elbResource string, policyResource string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[elbResource] + if !ok { + return fmt.Errorf("Not found: %s", elbResource) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + policy, ok := s.RootModule().Resources[policyResource] + if !ok { + return fmt.Errorf("Not found: %s", policyResource) + } + + elbconn := testAccProvider.Meta().(*AWSClient).elbconn + loadBalancerName, policyName := resourceAwsLoadBalancerPolicyParseId(policy.Primary.ID) + loadBalancerPolicies, err := elbconn.DescribeLoadBalancerPolicies(&elb.DescribeLoadBalancerPoliciesInput{ + LoadBalancerName: aws.String(loadBalancerName), + PolicyNames: []*string{aws.String(policyName)}, + }) + + if err != nil { + return err + } + + for _, loadBalancerPolicy := range loadBalancerPolicies.PolicyDescriptions { + if *loadBalancerPolicy.PolicyName == policyName { + if *loadBalancerPolicy.PolicyTypeName != policy.Primary.Attributes["policy_type_name"] { + return fmt.Errorf("PolicyTypeName does not match") + } + policyAttributeCount, err := strconv.Atoi(policy.Primary.Attributes["policy_attribute.#"]) + if err != nil { + return err + } + if len(loadBalancerPolicy.PolicyAttributeDescriptions) != policyAttributeCount { + return fmt.Errorf("PolicyAttributeDescriptions length mismatch") + } + policyAttributes := make(map[string]string) + for k, v := range policy.Primary.Attributes { + if strings.HasPrefix(k, "policy_attribute.") && strings.HasSuffix(k, ".name") { + key := v + value_key := fmt.Sprintf("%s.value", strings.TrimSuffix(k, ".name")) + policyAttributes[key] = policy.Primary.Attributes[value_key] + } + } + for _, policyAttribute := range loadBalancerPolicy.PolicyAttributeDescriptions { + if *policyAttribute.AttributeValue != policyAttributes[*policyAttribute.AttributeName] { + return fmt.Errorf("PollicyAttribute Value mismatch %s != %s: %s", *policyAttribute.AttributeValue, policyAttributes[*policyAttribute.AttributeName], policyAttributes) + } + } + } + } + + return nil + } +} + +const testAccAWSLoadBalancerPolicyConfig_basic = ` +resource "aws_elb" "test-lb" { + name = "test-aws-elb-policies-lb" + availability_zones = ["us-east-1a"] + + listener { + instance_port = 80 + instance_protocol = "http" + lb_port = 80 + lb_protocol = "http" + } + + tags { + Name = "tf-acc-test" + } +} + +resource "aws_elb_load_balancer_policy" "test-policy" { + load_balancer_name = "${aws_elb.test-lb.name}" + policy_name = "test-policy-policy" + policy_type_name = "AppCookieStickinessPolicyType" + policy_attribute = { + name = "CookieName" + value = "magic_cookie" + } +} +` + +const testAccAWSLoadBalancerPolicyConfig_updateWhileAssigned0 = ` +resource "aws_elb" "test-lb" { + name = "test-aws-elb-policies-lb" + availability_zones = ["us-east-1a"] + + listener { + instance_port = 80 + instance_protocol = "http" + lb_port = 80 + lb_protocol = "http" + } + + tags { + Name = "tf-acc-test" + } +} + +resource "aws_elb_load_balancer_policy" "test-policy" { + load_balancer_name = "${aws_elb.test-lb.name}" + policy_name = "test-policy-policy" + policy_type_name = "AppCookieStickinessPolicyType" + policy_attribute = { + name = "CookieName" + value = "magic_cookie" + } +} + +resource "aws_elb_load_balancer_listener_policy" "test-lb-test-policy-80" { + load_balancer_name = "${aws_elb.test-lb.name}" + load_balancer_port = 80 + policy_names = [ + "${aws_elb_load_balancer_policy.test-policy.policy_name}" + ] +} +` + +const testAccAWSLoadBalancerPolicyConfig_updateWhileAssigned1 = ` +resource "aws_elb" "test-lb" { + name = "test-aws-elb-policies-lb" + availability_zones = ["us-east-1a"] + + listener { + instance_port = 80 + instance_protocol = "http" + lb_port = 80 + lb_protocol = "http" + } + + tags { + Name = "tf-acc-test" + } +} + +resource "aws_elb_load_balancer_policy" "test-policy" { + load_balancer_name = "${aws_elb.test-lb.name}" + policy_name = "test-policy-policy" + policy_type_name = "AppCookieStickinessPolicyType" + policy_attribute = { + name = "CookieName" + value = "unicorn_cookie" + } +} + +resource "aws_elb_load_balancer_listener_policy" "test-lb-test-policy-80" { + load_balancer_name = "${aws_elb.test-lb.name}" + load_balancer_port = 80 + policy_names = [ + "${aws_elb_load_balancer_policy.test-policy.policy_name}" + ] +} +` diff --git a/website/source/docs/providers/aws/r/elb_load_balancer_backend_server_policy.html.markdown b/website/source/docs/providers/aws/r/elb_load_balancer_backend_server_policy.html.markdown new file mode 100644 index 000000000..5c06bbdf8 --- /dev/null +++ b/website/source/docs/providers/aws/r/elb_load_balancer_backend_server_policy.html.markdown @@ -0,0 +1,85 @@ +--- +layout: "aws" +page_title: "AWS: aws_elb_load_balancer_backend_server_policy" +sidebar_current: "docs-aws-resource-elb-load-balancer-backend-server-policy" +description: |- + Attaches a load balancer policy to an ELB backend server. +--- + +# aws\_elb\_load\_balancer\_backend\_server\_policy + +Attaches a load balancer policy to an ELB backend server. + + +## Example Usage + +``` +resource "aws_elb" "wu-tang" { + name = "wu-tang" + availability_zones = ["us-east-1a"] + + listener { + instance_port = 443 + instance_protocol = "http" + lb_port = 443 + lb_protocol = "https" + ssl_certificate_id = "arn:aws:iam::000000000000:server-certificate/wu-tang.net" + } + + tags { + Name = "wu-tang" + } +} + +resource "aws_elb_load_balancer_policy" "wu-tang-ca-pubkey-policy" { + load_balancer_name = "${aws_elb.wu-tang.name}" + policy_name = "wu-tang-ca-pubkey-policy" + policy_type_name = "PublicKeyPolicyType" + policy_attribute = { + name = "PublicKey" + value = "${file("wu-tang-pubkey")}" + } +} + +resource "aws_elb_load_balancer_policy" "wu-tang-root-ca-backend-auth-policy" { + load_balancer_name = "${aws_elb.wu-tang.name}" + policy_name = "wu-tang-root-ca-backend-auth-policy" + policy_type_name = "BackendServerAuthenticationPolicyType" + policy_attribute = { + name = "PublicKeyPolicyName" + value = "${aws_elb_load_balancer_policy.wu-tang-root-ca-pubkey-policy.policy_name}" + } +} + +resource "aws_elb_load_balancer_backend_server_policy" "wu-tang-backend-auth-policies-443" { + load_balancer_name = "${aws_elb.wu-tang.name}" + instance_port = 443 + policy_names = [ + "${aws_elb_load_balancer_policy.wu-tang-root-ca-backend-auth-policy.policy_name}" + ] +} +``` + +Where the file `pubkey` in the current directoy contains only the _public key_ of the certificate. + +``` +cat wu-tang-ca.pem | openssl x509 -pubkey -noout | grep -v '\-\-\-\-' | tr -d '\n' > wu-tang-pubkey +``` + +This example shows how to enable backend authentication for an ELB as well as customize the TLS settings. + +## Argument Reference + +The following arguments are supported: + +* `load_balancer_name` - (Required) The load balancer to attach the policy to. +* `policy_names` - (Required) List of Policy Names to apply to the backend server. +* `instance_port` - (Required) The instance port to apply the policy to. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The ID of the policy. +* `load_balancer_name` - The load balancer on which the policy is defined. +* `instance_port` - The backend port the policies are applied to diff --git a/website/source/docs/providers/aws/r/elb_load_balancer_listener_policy.html.markdown b/website/source/docs/providers/aws/r/elb_load_balancer_listener_policy.html.markdown new file mode 100644 index 000000000..37e388c1b --- /dev/null +++ b/website/source/docs/providers/aws/r/elb_load_balancer_listener_policy.html.markdown @@ -0,0 +1,73 @@ +--- +layout: "aws" +page_title: "AWS: aws_elb_load_balancer_listener_policy" +sidebar_current: "docs-aws-resource-elb-load-balancer-listener-policy" +description: |- + Attaches a load balancer policy to an ELB Listener. +--- + +# aws\_elb\_load\_balancer\_listener\_policy + +Attaches a load balancer policy to an ELB Listener. + + +## Example Usage + +``` +resource "aws_elb" "wu-tang" { + name = "wu-tang" + availability_zones = ["us-east-1a"] + + listener { + instance_port = 443 + instance_protocol = "http" + lb_port = 443 + lb_protocol = "https" + ssl_certificate_id = "arn:aws:iam::000000000000:server-certificate/wu-tang.net" + } + + tags { + Name = "wu-tang" + } +} + +resource "aws_elb_load_balancer_policy" "wu-tang-ssl" { + load_balancer_name = "${aws_elb.wu-tang.name}" + policy_name = "wu-tang-ssl" + policy_type_name = "SSLNegotiationPolicyType" + policy_attribute = { + name = "ECDHE-ECDSA-AES128-GCM-SHA256" + value = "true" + } + policy_attribute = { + name = "Protocol-TLSv1.2" + value = "true" + } +} + +resource "aws_elb_load_balancer_listener_policy" "wu-tang-listener-policies-443" { + load_balancer_name = "${aws_elb.wu-tang.name}" + load_balancer_port = 443 + policy_names = [ + "${aws_elb_load_balancer_policy.wu-tang-ssl.policy_name}" + ] +} +``` + +This example shows how to customize the TLS settings of an HTTPS listener. + +## Argument Reference + +The following arguments are supported: + +* `load_balancer_name` - (Required) The load balancer to attach the policy to. +* `load_balancer_port` - (Required) The load balancer listener port to apply the policy to. +* `policy_names` - (Required) List of Policy Names to apply to the backend server. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The ID of the policy. +* `load_balancer_name` - The load balancer on which the policy is defined. +* `load_balancer_port` - The load balancer listener port the policies are applied to diff --git a/website/source/docs/providers/aws/r/elb_load_balancer_policy.html.markdown b/website/source/docs/providers/aws/r/elb_load_balancer_policy.html.markdown new file mode 100644 index 000000000..a5d44676d --- /dev/null +++ b/website/source/docs/providers/aws/r/elb_load_balancer_policy.html.markdown @@ -0,0 +1,108 @@ +--- +layout: "aws" +page_title: "AWS: aws_elb_load_balancer_policy" +sidebar_current: "docs-aws-resource-elb-load-balancer-policy" +description: |- + Provides a load balancer policy, which can be attached to an ELB listener or backend server. +--- + +# aws\_elb\_load\_balancer\_policy + +Provides a load balancer policy, which can be attached to an ELB listener or backend server. + +## Example Usage + +``` +resource "aws_elb" "wu-tang" { + name = "wu-tang" + availability_zones = ["us-east-1a"] + + listener { + instance_port = 443 + instance_protocol = "http" + lb_port = 443 + lb_protocol = "https" + ssl_certificate_id = "arn:aws:iam::000000000000:server-certificate/wu-tang.net" + } + + tags { + Name = "wu-tang" + } +} + +resource "aws_elb_load_balancer_policy" "wu-tang-ca-pubkey-policy" { + load_balancer_name = "${aws_elb.wu-tang.name}" + policy_name = "wu-tang-ca-pubkey-policy" + policy_type_name = "PublicKeyPolicyType" + policy_attribute = { + name = "PublicKey" + value = "${file("wu-tang-pubkey")}" + } +} + +resource "aws_elb_load_balancer_policy" "wu-tang-root-ca-backend-auth-policy" { + load_balancer_name = "${aws_elb.wu-tang.name}" + policy_name = "wu-tang-root-ca-backend-auth-policy" + policy_type_name = "BackendServerAuthenticationPolicyType" + policy_attribute = { + name = "PublicKeyPolicyName" + value = "${aws_elb_load_balancer_policy.wu-tang-root-ca-pubkey-policy.policy_name}" + } +} + +resource "aws_elb_load_balancer_policy" "wu-tang-ssl" { + load_balancer_name = "${aws_elb.wu-tang.name}" + policy_name = "wu-tang-ssl" + policy_type_name = "SSLNegotiationPolicyType" + policy_attribute = { + name = "ECDHE-ECDSA-AES128-GCM-SHA256" + value = "true" + } + policy_attribute = { + name = "Protocol-TLSv1.2" + value = "true" + } +} + +resource "aws_elb_load_balancer_backend_server_policy" "wu-tang-backend-auth-policies-443" { + load_balancer_name = "${aws_elb.wu-tang.name}" + instance_port = 443 + policy_names = [ + "${aws_elb_load_balancer_policy.wu-tang-root-ca-backend-auth-policy.policy_name}" + ] +} + +resource "aws_elb_load_balancer_listener_policy" "wu-tang-listener-policies-443" { + load_balancer_name = "${aws_elb.wu-tang.name}" + load_balancer_port = 443 + policy_names = [ + "${aws_elb_load_balancer_policy.wu-tang-ssl.policy_name}" + ] +} +``` + +Where the file `pubkey` in the current directoy contains only the _public key_ of the certificate. + +``` +cat wu-tang-ca.pem | openssl x509 -pubkey -noout | grep -v '\-\-\-\-' | tr -d '\n' > wu-tang-pubkey +``` + +This example shows how to enable backend authentication for an ELB as well as customize the TLS settings. + +## Argument Reference + +The following arguments are supported: + +* `load_balancer_name` - (Required) The load balancer on which the policy is defined. +* `policy_name` - (Required) The name of the load balancer policy. +* `policy_type_name` - (Required) The policy type. +* `policy_attribute` - (Optional) Policy attribute to apply to the policy. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The ID of the policy. +* `policy_name` - The name of the stickiness policy. +* `policy_type_name` - The policy type of the policy. +* `load_balancer_name` - The load balancer on which the policy is defined. From e812caa249ac1ecd1fd32ca252cf3c5be178ffb1 Mon Sep 17 00:00:00 2001 From: Joe Topjian Date: Sat, 2 Jul 2016 11:51:27 -0600 Subject: [PATCH 0133/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index efa7bf9c1..c9ac576e4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -182,6 +182,7 @@ BUG FIXES: * provider/openstack: Ensure CIDRs Are Lower Case [GH-6864] * provider/openstack: Rebuild Instances On Network Changes [GH-6844] * provider/openstack: Firewall rules are applied in the correct order [GH-7194] + * provider/openstack: Fix Security Group EOF Error when Adding / Removing Multiple Groups [GH-7468] * provider/vsphere: `gateway` and `ipv6_gateway` are now read from `vsphere_virtual_machine` resources [GH-6522] * provider/vsphere: `ipv*_gateway` parameters won't force a new `vsphere_virtual_machine` [GH-6635] * provider/vsphere: adding a `vsphere_virtual_machine` migration [GH-7023] From 421bda23b003a635cc56673c8686050245dad55e Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Sun, 3 Jul 2016 11:31:35 +0100 Subject: [PATCH 0134/1238] provider/azurerm: `azurerm_virtual_machine` panic at UnattendConfig (#7453) Guarding against `invalid memory address` in AdditionalUnattendConfig ``` make testacc TEST=./builtin/providers/azurerm TESTARGS='-run=TestAccAzureRMVirtualMachine_windowsUnattendedConfig' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /vendor/) TF_ACC=1 go test ./builtin/providers/azurerm -v -run=TestAccAzureRMVirtualMachine_windowsUnattendedConfig -timeout 120m === RUN TestAccAzureRMVirtualMachine_windowsUnattendedConfig --- PASS: TestAccAzureRMVirtualMachine_windowsUnattendedConfig (943.28s) PASS ok github.com/hashicorp/terraform/builtin/providers/azurerm 943.299s ``` --- builtin/providers/azurerm/resource_arm_virtual_machine.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/builtin/providers/azurerm/resource_arm_virtual_machine.go b/builtin/providers/azurerm/resource_arm_virtual_machine.go index 456d9bff6..95698b009 100644 --- a/builtin/providers/azurerm/resource_arm_virtual_machine.go +++ b/builtin/providers/azurerm/resource_arm_virtual_machine.go @@ -730,7 +730,10 @@ func flattenAzureRmVirtualMachineOsProfileWindowsConfiguration(config *compute.W c["pass"] = i.PassName c["component"] = i.ComponentName c["setting_name"] = i.SettingName - c["content"] = *i.Content + + if i.Content != nil { + c["content"] = *i.Content + } content = append(content, c) } From 8d8becdfdbe54386bc7123a1e53035ba981c2531 Mon Sep 17 00:00:00 2001 From: Joe Topjian Date: Sun, 3 Jul 2016 09:37:35 -0600 Subject: [PATCH 0135/1238] provider/openstack: Support Import of OpenStack FWaaS Resources (#7471) --- .../import_openstack_fw_firewall_v1_test.go | 29 +++++++++++++++++++ .../import_openstack_fw_policy_v1_test.go | 29 +++++++++++++++++++ .../import_openstack_fw_rule_v1_test.go | 29 +++++++++++++++++++ .../resource_openstack_fw_firewall_v1.go | 6 +++- .../resource_openstack_fw_policy_v1.go | 7 ++++- .../resource_openstack_fw_rule_v1.go | 7 +++-- 6 files changed, 103 insertions(+), 4 deletions(-) create mode 100644 builtin/providers/openstack/import_openstack_fw_firewall_v1_test.go create mode 100644 builtin/providers/openstack/import_openstack_fw_policy_v1_test.go create mode 100644 builtin/providers/openstack/import_openstack_fw_rule_v1_test.go diff --git a/builtin/providers/openstack/import_openstack_fw_firewall_v1_test.go b/builtin/providers/openstack/import_openstack_fw_firewall_v1_test.go new file mode 100644 index 000000000..771ccae85 --- /dev/null +++ b/builtin/providers/openstack/import_openstack_fw_firewall_v1_test.go @@ -0,0 +1,29 @@ +package openstack + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOpenStackFWFirewallV1_importBasic(t *testing.T) { + resourceName := "openstack_fw_firewall_v1.accept_test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFWFirewallV1Destroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testFirewallConfig, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"region"}, + }, + }, + }) +} diff --git a/builtin/providers/openstack/import_openstack_fw_policy_v1_test.go b/builtin/providers/openstack/import_openstack_fw_policy_v1_test.go new file mode 100644 index 000000000..2cbbbed79 --- /dev/null +++ b/builtin/providers/openstack/import_openstack_fw_policy_v1_test.go @@ -0,0 +1,29 @@ +package openstack + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOpenStackFWPolicyV1_importBasic(t *testing.T) { + resourceName := "openstack_fw_policy_v1.accept_test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFWPolicyV1Destroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testFirewallPolicyConfigAddRules, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"region"}, + }, + }, + }) +} diff --git a/builtin/providers/openstack/import_openstack_fw_rule_v1_test.go b/builtin/providers/openstack/import_openstack_fw_rule_v1_test.go new file mode 100644 index 000000000..f1d612510 --- /dev/null +++ b/builtin/providers/openstack/import_openstack_fw_rule_v1_test.go @@ -0,0 +1,29 @@ +package openstack + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOpenStackFWRuleV1_importBasic(t *testing.T) { + resourceName := "openstack_fw_rule_v1.accept_test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFWRuleV1Destroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testFirewallRuleConfig, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"region"}, + }, + }, + }) +} diff --git a/builtin/providers/openstack/resource_openstack_fw_firewall_v1.go b/builtin/providers/openstack/resource_openstack_fw_firewall_v1.go index 44c93a4c8..d43313763 100644 --- a/builtin/providers/openstack/resource_openstack_fw_firewall_v1.go +++ b/builtin/providers/openstack/resource_openstack_fw_firewall_v1.go @@ -17,6 +17,9 @@ func resourceFWFirewallV1() *schema.Resource { Read: resourceFWFirewallV1Read, Update: resourceFWFirewallV1Update, Delete: resourceFWFirewallV1Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "region": &schema.Schema{ @@ -105,11 +108,12 @@ func resourceFWFirewallV1Read(d *schema.ResourceData, meta interface{}) error { } firewall, err := firewalls.Get(networkingClient, d.Id()).Extract() - if err != nil { return CheckDeleted(d, err, "firewall") } + log.Printf("[DEBUG] Read OpenStack Firewall %s: %#v", d.Id(), firewall) + d.Set("name", firewall.Name) d.Set("description", firewall.Description) d.Set("policy_id", firewall.PolicyID) diff --git a/builtin/providers/openstack/resource_openstack_fw_policy_v1.go b/builtin/providers/openstack/resource_openstack_fw_policy_v1.go index ab273f8d9..339f7fd2b 100644 --- a/builtin/providers/openstack/resource_openstack_fw_policy_v1.go +++ b/builtin/providers/openstack/resource_openstack_fw_policy_v1.go @@ -16,6 +16,9 @@ func resourceFWPolicyV1() *schema.Resource { Read: resourceFWPolicyV1Read, Update: resourceFWPolicyV1Update, Delete: resourceFWPolicyV1Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "region": &schema.Schema{ @@ -111,16 +114,18 @@ func resourceFWPolicyV1Read(d *schema.ResourceData, meta interface{}) error { } policy, err := policies.Get(networkingClient, d.Id()).Extract() - if err != nil { return CheckDeleted(d, err, "FW policy") } + log.Printf("[DEBUG] Read OpenStack Firewall Policy %s: %#v", d.Id(), policy) + d.Set("name", policy.Name) d.Set("description", policy.Description) d.Set("shared", policy.Shared) d.Set("audited", policy.Audited) d.Set("tenant_id", policy.TenantID) + d.Set("rules", policy.Rules) return nil } diff --git a/builtin/providers/openstack/resource_openstack_fw_rule_v1.go b/builtin/providers/openstack/resource_openstack_fw_rule_v1.go index 15590f019..e336a60eb 100644 --- a/builtin/providers/openstack/resource_openstack_fw_rule_v1.go +++ b/builtin/providers/openstack/resource_openstack_fw_rule_v1.go @@ -15,6 +15,9 @@ func resourceFWRuleV1() *schema.Resource { Read: resourceFWRuleV1Read, Update: resourceFWRuleV1Update, Delete: resourceFWRuleV1Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "region": &schema.Schema{ @@ -123,14 +126,14 @@ func resourceFWRuleV1Read(d *schema.ResourceData, meta interface{}) error { } rule, err := rules.Get(networkingClient, d.Id()).Extract() - if err != nil { return CheckDeleted(d, err, "FW rule") } + log.Printf("[DEBUG] Read OpenStack Firewall Rule %s: %#v", d.Id(), rule) + d.Set("protocol", rule.Protocol) d.Set("action", rule.Action) - d.Set("name", rule.Name) d.Set("description", rule.Description) d.Set("ip_version", rule.IPVersion) From c1e4d297f34efc9d8e8b8bda1a49fa2fbeb2adee Mon Sep 17 00:00:00 2001 From: Joe Topjian Date: Sun, 3 Jul 2016 09:37:58 -0600 Subject: [PATCH 0136/1238] provider/openstack: Support Import openstack_compute_secgroup_v2 (#7350) --- ...port_openstack_compute_secgroup_v2_test.go | 29 +++++++++++++++++++ .../resource_openstack_compute_secgroup_v2.go | 3 ++ 2 files changed, 32 insertions(+) create mode 100644 builtin/providers/openstack/import_openstack_compute_secgroup_v2_test.go diff --git a/builtin/providers/openstack/import_openstack_compute_secgroup_v2_test.go b/builtin/providers/openstack/import_openstack_compute_secgroup_v2_test.go new file mode 100644 index 000000000..babb02426 --- /dev/null +++ b/builtin/providers/openstack/import_openstack_compute_secgroup_v2_test.go @@ -0,0 +1,29 @@ +package openstack + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeV2SecGroup_importBasic(t *testing.T) { + resourceName := "openstack_compute_secgroup_v2.foo" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeV2SecGroupDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeV2SecGroup_basic_orig, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"region"}, + }, + }, + }) +} diff --git a/builtin/providers/openstack/resource_openstack_compute_secgroup_v2.go b/builtin/providers/openstack/resource_openstack_compute_secgroup_v2.go index 996a0ac7e..e6f1585fd 100644 --- a/builtin/providers/openstack/resource_openstack_compute_secgroup_v2.go +++ b/builtin/providers/openstack/resource_openstack_compute_secgroup_v2.go @@ -20,6 +20,9 @@ func resourceComputeSecGroupV2() *schema.Resource { Read: resourceComputeSecGroupV2Read, Update: resourceComputeSecGroupV2Update, Delete: resourceComputeSecGroupV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "region": &schema.Schema{ From 04665206265c063ae6e53f1fef1cf76a346431a7 Mon Sep 17 00:00:00 2001 From: Joe Topjian Date: Sun, 3 Jul 2016 09:38:21 -0600 Subject: [PATCH 0137/1238] provider/openstack: Support Import openstack_compute_servergroup_v2 (#7349) --- ...t_openstack_compute_servergroup_v2_test.go | 29 +++++++++++++++++++ ...source_openstack_compute_servergroup_v2.go | 3 ++ 2 files changed, 32 insertions(+) create mode 100644 builtin/providers/openstack/import_openstack_compute_servergroup_v2_test.go diff --git a/builtin/providers/openstack/import_openstack_compute_servergroup_v2_test.go b/builtin/providers/openstack/import_openstack_compute_servergroup_v2_test.go new file mode 100644 index 000000000..5963ba950 --- /dev/null +++ b/builtin/providers/openstack/import_openstack_compute_servergroup_v2_test.go @@ -0,0 +1,29 @@ +package openstack + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeV2ServerGroup_importBasic(t *testing.T) { + resourceName := "openstack_compute_servergroup_v2.mysg" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeV2ServerGroupDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeV2ServerGroup_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"region"}, + }, + }, + }) +} diff --git a/builtin/providers/openstack/resource_openstack_compute_servergroup_v2.go b/builtin/providers/openstack/resource_openstack_compute_servergroup_v2.go index 64cc61ff2..c0632d0d8 100644 --- a/builtin/providers/openstack/resource_openstack_compute_servergroup_v2.go +++ b/builtin/providers/openstack/resource_openstack_compute_servergroup_v2.go @@ -14,6 +14,9 @@ func resourceComputeServerGroupV2() *schema.Resource { Read: resourceComputeServerGroupV2Read, Update: nil, Delete: resourceComputeServerGroupV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "region": &schema.Schema{ From 713c0daa520a99c4afc99cbf368e6ee4949880ef Mon Sep 17 00:00:00 2001 From: Joe Topjian Date: Sun, 3 Jul 2016 09:38:38 -0600 Subject: [PATCH 0138/1238] provider/openstack: Support Import openstack_compute_floatingip_v2 (#7348) --- ...rt_openstack_compute_floatingip_v2_test.go | 29 +++++++++++++++++++ ...esource_openstack_compute_floatingip_v2.go | 3 ++ 2 files changed, 32 insertions(+) create mode 100644 builtin/providers/openstack/import_openstack_compute_floatingip_v2_test.go diff --git a/builtin/providers/openstack/import_openstack_compute_floatingip_v2_test.go b/builtin/providers/openstack/import_openstack_compute_floatingip_v2_test.go new file mode 100644 index 000000000..12fef46d3 --- /dev/null +++ b/builtin/providers/openstack/import_openstack_compute_floatingip_v2_test.go @@ -0,0 +1,29 @@ +package openstack + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeV2FloatingIP_importBasic(t *testing.T) { + resourceName := "openstack_compute_floatingip_v2.foo" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeV2FloatingIPDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeV2FloatingIP_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"region"}, + }, + }, + }) +} diff --git a/builtin/providers/openstack/resource_openstack_compute_floatingip_v2.go b/builtin/providers/openstack/resource_openstack_compute_floatingip_v2.go index 731b5f941..46f23ead6 100644 --- a/builtin/providers/openstack/resource_openstack_compute_floatingip_v2.go +++ b/builtin/providers/openstack/resource_openstack_compute_floatingip_v2.go @@ -14,6 +14,9 @@ func resourceComputeFloatingIPV2() *schema.Resource { Read: resourceComputeFloatingIPV2Read, Update: nil, Delete: resourceComputeFloatingIPV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "region": &schema.Schema{ From 480542d2aba72c47650e4429512b3e37faf5ebc0 Mon Sep 17 00:00:00 2001 From: Joe Topjian Date: Sun, 3 Jul 2016 09:39:20 -0600 Subject: [PATCH 0139/1238] provider/openstack: Support Import openstack_compute_keypair_v2 (#7346) --- ...mport_openstack_compute_keypair_v2_test.go | 29 +++++++++++++++++++ .../resource_openstack_compute_keypair_v2.go | 3 ++ ...ource_openstack_compute_keypair_v2_test.go | 4 +-- 3 files changed, 33 insertions(+), 3 deletions(-) create mode 100644 builtin/providers/openstack/import_openstack_compute_keypair_v2_test.go diff --git a/builtin/providers/openstack/import_openstack_compute_keypair_v2_test.go b/builtin/providers/openstack/import_openstack_compute_keypair_v2_test.go new file mode 100644 index 000000000..efa87c695 --- /dev/null +++ b/builtin/providers/openstack/import_openstack_compute_keypair_v2_test.go @@ -0,0 +1,29 @@ +package openstack + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOpenStackComputeV2Keypair_importBasic(t *testing.T) { + resourceName := "openstack_compute_keypair_v2.foo" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeV2KeypairDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeV2Keypair_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"region"}, + }, + }, + }) +} diff --git a/builtin/providers/openstack/resource_openstack_compute_keypair_v2.go b/builtin/providers/openstack/resource_openstack_compute_keypair_v2.go index dcc476bd4..a5c111724 100644 --- a/builtin/providers/openstack/resource_openstack_compute_keypair_v2.go +++ b/builtin/providers/openstack/resource_openstack_compute_keypair_v2.go @@ -13,6 +13,9 @@ func resourceComputeKeypairV2() *schema.Resource { Create: resourceComputeKeypairV2Create, Read: resourceComputeKeypairV2Read, Delete: resourceComputeKeypairV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "region": &schema.Schema{ diff --git a/builtin/providers/openstack/resource_openstack_compute_keypair_v2_test.go b/builtin/providers/openstack/resource_openstack_compute_keypair_v2_test.go index da090bcd8..25ba2419e 100644 --- a/builtin/providers/openstack/resource_openstack_compute_keypair_v2_test.go +++ b/builtin/providers/openstack/resource_openstack_compute_keypair_v2_test.go @@ -83,8 +83,6 @@ func testAccCheckComputeV2KeypairExists(t *testing.T, n string, kp *keypairs.Key var testAccComputeV2Keypair_basic = fmt.Sprintf(` resource "openstack_compute_keypair_v2" "foo" { - region = "%s" name = "test-keypair-tf" public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAjpC1hwiOCCmKEWxJ4qzTTsJbKzndLo1BCz5PcwtUnflmU+gHJtWMZKpuEGVi29h0A/+ydKek1O18k10Ff+4tyFjiHDQAT9+OfgWf7+b1yK+qDip3X1C0UPMbwHlTfSGWLGZquwhvEFx9k3h/M+VtMvwR1lJ9LUyTAImnNjWG7TAIPmui30HvM2UiFEmqkr4ijq45MyX2+fLIePLRIFuu1p4whjHAQYufqyno3BS48icQb4p6iVEZPo4AE2o9oIyQvj2mx4dk5Y8CgSETOZTYDOR3rU2fZTRDRgPJDH9FWvQjF5tA0p3d9CoWWd2s6GKKbfoUIi8R/Db1BSPJwkqB jrp-hp-pc" - }`, - OS_REGION_NAME) + }`) From c744bb6fa85b98a345bf6dc63ae4153ebb297e13 Mon Sep 17 00:00:00 2001 From: David Harris Date: Sun, 3 Jul 2016 09:42:12 -0600 Subject: [PATCH 0140/1238] provider/aws: import Elastic Beanstalk Application and Environment (#7444) --- ..._aws_elastic_beanstalk_application_test.go | 28 +++++++++++++++++++ ..._aws_elastic_beanstalk_environment_test.go | 28 +++++++++++++++++++ ...ource_aws_elastic_beanstalk_application.go | 4 +++ ...ource_aws_elastic_beanstalk_environment.go | 28 +++++++++++++++---- 4 files changed, 82 insertions(+), 6 deletions(-) create mode 100644 builtin/providers/aws/import_aws_elastic_beanstalk_application_test.go create mode 100644 builtin/providers/aws/import_aws_elastic_beanstalk_environment_test.go diff --git a/builtin/providers/aws/import_aws_elastic_beanstalk_application_test.go b/builtin/providers/aws/import_aws_elastic_beanstalk_application_test.go new file mode 100644 index 000000000..f5b5abc6c --- /dev/null +++ b/builtin/providers/aws/import_aws_elastic_beanstalk_application_test.go @@ -0,0 +1,28 @@ +package aws + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAWSElasticBeanstalkApplication_importBasic(t *testing.T) { + resourceName := "aws_elastic_beanstalk_application.tftest" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckBeanstalkAppDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccBeanstalkAppConfig, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/aws/import_aws_elastic_beanstalk_environment_test.go b/builtin/providers/aws/import_aws_elastic_beanstalk_environment_test.go new file mode 100644 index 000000000..d29a5936f --- /dev/null +++ b/builtin/providers/aws/import_aws_elastic_beanstalk_environment_test.go @@ -0,0 +1,28 @@ +package aws + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAWSElasticBeanstalkEnvironment_importBasic(t *testing.T) { + resourceName := "aws_elastic_beanstalk_application.tftest" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckBeanstalkAppDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccBeanstalkEnvConfig, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/aws/resource_aws_elastic_beanstalk_application.go b/builtin/providers/aws/resource_aws_elastic_beanstalk_application.go index 8f41b0f26..212332526 100644 --- a/builtin/providers/aws/resource_aws_elastic_beanstalk_application.go +++ b/builtin/providers/aws/resource_aws_elastic_beanstalk_application.go @@ -19,6 +19,9 @@ func resourceAwsElasticBeanstalkApplication() *schema.Resource { Read: resourceAwsElasticBeanstalkApplicationRead, Update: resourceAwsElasticBeanstalkApplicationUpdate, Delete: resourceAwsElasticBeanstalkApplicationDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ @@ -94,6 +97,7 @@ func resourceAwsElasticBeanstalkApplicationRead(d *schema.ResourceData, meta int return err } + d.Set("name", a.ApplicationName) d.Set("description", a.Description) return nil } diff --git a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go b/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go index 196ced095..5e5cb0c06 100644 --- a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go +++ b/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go @@ -46,6 +46,9 @@ func resourceAwsElasticBeanstalkEnvironment() *schema.Resource { Read: resourceAwsElasticBeanstalkEnvironmentRead, Update: resourceAwsElasticBeanstalkEnvironmentUpdate, Delete: resourceAwsElasticBeanstalkEnvironmentDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, SchemaVersion: 1, MigrateState: resourceAwsElasticBeanstalkEnvironmentMigrateState, @@ -339,15 +342,12 @@ func resourceAwsElasticBeanstalkEnvironmentUpdate(d *schema.ResourceData, meta i func resourceAwsElasticBeanstalkEnvironmentRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).elasticbeanstalkconn - app := d.Get("application").(string) envId := d.Id() - tier := d.Get("tier").(string) log.Printf("[DEBUG] Elastic Beanstalk environment read %s: id %s", d.Get("name").(string), d.Id()) resp, err := conn.DescribeEnvironments(&elasticbeanstalk.DescribeEnvironmentsInput{ - ApplicationName: aws.String(app), - EnvironmentIds: []*string{aws.String(envId)}, + EnvironmentIds: []*string{aws.String(envId)}, }) if err != nil { @@ -380,6 +380,14 @@ func resourceAwsElasticBeanstalkEnvironmentRead(d *schema.ResourceData, meta int return err } + if err := d.Set("name", env.EnvironmentName); err != nil { + return err + } + + if err := d.Set("application", env.ApplicationName); err != nil { + return err + } + if err := d.Set("description", env.Description); err != nil { return err } @@ -388,8 +396,12 @@ func resourceAwsElasticBeanstalkEnvironmentRead(d *schema.ResourceData, meta int return err } - if tier == "WebServer" && env.CNAME != nil { - beanstalkCnamePrefixRegexp := regexp.MustCompile(`(^[^.]+).\w{2}-\w{4,9}-\d.elasticbeanstalk.com$`) + if err := d.Set("tier", *env.Tier.Name); err != nil { + return err + } + + if env.CNAME != nil { + beanstalkCnamePrefixRegexp := regexp.MustCompile(`(^[^.]+)(.\w{2}-\w{4,9}-\d)?.elasticbeanstalk.com$`) var cnamePrefix string cnamePrefixMatch := beanstalkCnamePrefixRegexp.FindStringSubmatch(*env.CNAME) @@ -402,6 +414,10 @@ func resourceAwsElasticBeanstalkEnvironmentRead(d *schema.ResourceData, meta int if err := d.Set("cname_prefix", cnamePrefix); err != nil { return err } + } else { + if err := d.Set("cname_prefix", ""); err != nil { + return err + } } if err := d.Set("autoscaling_groups", flattenBeanstalkAsg(resources.EnvironmentResources.AutoScalingGroups)); err != nil { From 32051156c76fd284938aa63843b735e2cd19f0cd Mon Sep 17 00:00:00 2001 From: Eric Rutherford Date: Sun, 3 Jul 2016 10:58:19 -0500 Subject: [PATCH 0141/1238] adding states that should be considered as deleted for vpc peering (#7466) --- builtin/providers/aws/resource_aws_vpc_peering_connection.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_vpc_peering_connection.go b/builtin/providers/aws/resource_aws_vpc_peering_connection.go index 2bfc341b6..c712e4574 100644 --- a/builtin/providers/aws/resource_aws_vpc_peering_connection.go +++ b/builtin/providers/aws/resource_aws_vpc_peering_connection.go @@ -108,7 +108,7 @@ func resourceAwsVPCPeeringRead(d *schema.ResourceData, meta interface{}) error { // connection is gone. Destruction isn't allowed, and it eventually // just "falls off" the console. See GH-2322 if pc.Status != nil { - if *pc.Status.Code == "failed" || *pc.Status.Code == "deleted" { + if *pc.Status.Code == "failed" || *pc.Status.Code == "deleted" || *pc.Status.Code == "rejected" || *pc.Status.Code == "deleting" || *pc.Status.Code == "expired" { log.Printf("[DEBUG] VPC Peering Connect (%s) in state (%s), removing", d.Id(), *pc.Status.Code) d.SetId("") return nil From bf63dcc172c2edcd09ca17e43ca13148d01b8c33 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Sun, 3 Jul 2016 16:59:39 +0100 Subject: [PATCH 0142/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c9ac576e4..83de62529 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -162,6 +162,7 @@ BUG FIXES: * provider/aws: Stickiness Policy exists, but isn't assigned to the ELB [GH-7188] * provider/aws: Fix issue with `manage_bundler` on `aws_opsworks_layers` [GH-7219] * provider/aws: Set Elastic Beanstalk stack name back to state [GH-7445] + * provider/aws: Allow recreation of VPC Peering Connection when state is rejected [GH-7466] * provider/azurerm: Fixes terraform crash when using SSH keys with `azurerm_virtual_machine` [GH-6766] * provider/azurerm: Fix a bug causing 'diffs do not match' on `azurerm_network_interface` resources [GH-6790] * provider/azurerm: Normalizes `availability_set_id` casing to avoid spurious diffs in `azurerm_virtual_machine` [GH-6768] From f04d8befd5c56bdaa70504782f3d67cccab3aa11 Mon Sep 17 00:00:00 2001 From: Chris McKeown Date: Tue, 5 Jul 2016 10:46:14 +0100 Subject: [PATCH 0143/1238] Corrected some ARM documentation --- .../providers/azurerm/r/template_deployment.html.markdown | 7 +++++-- .../docs/providers/azurerm/r/virtual_network.html.markdown | 4 ++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/website/source/docs/providers/azurerm/r/template_deployment.html.markdown b/website/source/docs/providers/azurerm/r/template_deployment.html.markdown index 4357a7616..f475fd3c0 100644 --- a/website/source/docs/providers/azurerm/r/template_deployment.html.markdown +++ b/website/source/docs/providers/azurerm/r/template_deployment.html.markdown @@ -83,10 +83,13 @@ The following arguments are supported: * `name` - (Required) Specifies the name of the template deployment. Changing this forces a new resource to be created. * `resource_group_name` - (Required) The name of the resource group in which to - create the template deployment. + create the template deployment. +* `deployment_mode` - (Required) Specifies the mode that is used to deploy resources. This value could be either `Incremental` or `Complete`. + Note that you will almost *always* want this to be set to `Incremental` otherwise the deployment will destroy all infrastructure not + specified within the template, and Terraform will not be aware of this. * `template_body` - (Optional) Specifies the JSON definition for the template. * `parameters` - (Optional) Specifies the name and value pairs that define the deployment parameters for the template. -* `deployment_mode` - (Optional) Specifies the mode that is used to deploy resources. This value could be either `Incremental` or `Complete`. + ## Attributes Reference diff --git a/website/source/docs/providers/azurerm/r/virtual_network.html.markdown b/website/source/docs/providers/azurerm/r/virtual_network.html.markdown index e9a4eb2ff..6d75cead6 100644 --- a/website/source/docs/providers/azurerm/r/virtual_network.html.markdown +++ b/website/source/docs/providers/azurerm/r/virtual_network.html.markdown @@ -19,6 +19,7 @@ resource "azurerm_virtual_network" "test" { resource_group_name = "${azurerm_resource_group.test.name}" address_space = ["10.0.0.0/16"] location = "West US" + dns_servers = ["10.0.0.4", "10.0.0.5"] subnet { name = "subnet1" @@ -58,8 +59,7 @@ The following arguments are supported: * `location` - (Required) The location/region where the virtual network is created. Changing this forces a new resource to be created. -* `dns_servers` - (Optional) List of names of DNS servers previously registered - on Azure. +* `dns_servers` - (Optional) List of IP addresses of DNS servers * `subnet` - (Optional) Can be specified multiple times to define multiple subnets. Each `subnet` block supports fields documented below. From e0ea51636094267783fb19dc439ca29631a6a8fa Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Tue, 5 Jul 2016 10:46:36 +0100 Subject: [PATCH 0144/1238] aws: AMI data source docs fixed (#7487) --- .../source/docs/providers/aws/d/ami.html.markdown | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/source/docs/providers/aws/d/ami.html.markdown b/website/source/docs/providers/aws/d/ami.html.markdown index b405f6b78..8df0b12c1 100644 --- a/website/source/docs/providers/aws/d/ami.html.markdown +++ b/website/source/docs/providers/aws/d/ami.html.markdown @@ -31,22 +31,22 @@ data "aws_ami" "nat_ami" { ## Argument Reference - * `most_recent` (optional): If more than one result is returned, use the most +* `most_recent` - (Optional) If more than one result is returned, use the most recent AMI. - * `executable_users`: Limit search to users with *explicit* launch permission on +* `executable_users` - (Optional) Limit search to users with *explicit* launch permission on the image. Valid items are the numeric account ID or `self`. -* `filter`: One or more name/value pairs to filter off of. There are +* `filter` - (Optional) One or more name/value pairs to filter off of. There are several valid keys, for a full reference, check out [describe-images in the AWS CLI reference][1]. -* `owners`: Limit search to specific AMI owners. Valid items are the numeric +* `owners` - (Optional) Limit search to specific AMI owners. Valid items are the numeric account ID, `amazon`, or `self`. -~> **NOTE:** one of `executable_users`, `filter`, or `owners` must be specified. +~> **NOTE:** At least one of `executable_users`, `filter`, or `owners` must be specified. -~> **NOTE:** if more or less than a single match is returned by the search, +~> **NOTE:** If more or less than a single match is returned by the search, Terraform will fail. Ensure that your search is specific enough to return a single AMI ID only, or use `most_recent` to choose the most recent one. @@ -55,7 +55,7 @@ a single AMI ID only, or use `most_recent` to choose the most recent one. `id` is set to the ID of the found AMI. In addition, the following attributes are exported: -~> **NOTE:** some values are not always set and may not be available for +~> **NOTE:** Some values are not always set and may not be available for interpolation. * `architecture` - The OS architecture of the AMI (ie: `i368` or `x86_64`). From 120d7c4f2e2caf67117debd28e7c2bdd58962f8d Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Tue, 5 Jul 2016 10:56:17 +0100 Subject: [PATCH 0145/1238] provider/aws: Only allow max. 1 health_check block for ELB --- builtin/providers/aws/resource_aws_elb.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/builtin/providers/aws/resource_aws_elb.go b/builtin/providers/aws/resource_aws_elb.go index 11016525b..422b527d1 100644 --- a/builtin/providers/aws/resource_aws_elb.go +++ b/builtin/providers/aws/resource_aws_elb.go @@ -169,6 +169,7 @@ func resourceAwsElb() *schema.Resource { Type: schema.TypeList, Optional: true, Computed: true, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "healthy_threshold": &schema.Schema{ @@ -590,9 +591,7 @@ func resourceAwsElbUpdate(d *schema.ResourceData, meta interface{}) error { if d.HasChange("health_check") { hc := d.Get("health_check").([]interface{}) - if len(hc) > 1 { - return fmt.Errorf("Only one health check per ELB is supported") - } else if len(hc) > 0 { + if len(hc) > 0 { check := hc[0].(map[string]interface{}) configureHealthCheckOpts := elb.ConfigureHealthCheckInput{ LoadBalancerName: aws.String(d.Id()), From 5b98aa739625767e2254c9fa3145cb1b506cf14a Mon Sep 17 00:00:00 2001 From: James Nugent Date: Tue, 5 Jul 2016 12:46:05 +0100 Subject: [PATCH 0146/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 83de62529..6307ed602 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -78,6 +78,7 @@ IMPROVEMENTS: * provider/aws: Add more explicit support for Skipping Final Snapshot in RDS Cluster [GH-6795] * provider/aws: Add support for S3 Bucket Acceleration [GH-6628] * provider/aws: Add support for `kms_key_id` to `aws_db_instance` [GH-6651] + * provider/aws: Specifying more than one health check on an `aws_elb` fails with an error prior to making an API request [GH-7489] * provider/aws: Add support to `aws_redshift_cluster` for `iam_roles` [GH-6647] * provider/aws: SQS use raw policy string if compact fails [GH-6724] * provider/aws: Set default description to "Managed by Terraform" [GH-6104] From 06ff7bfcfe125aa5444b876896cade4707d5e0c1 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Tue, 5 Jul 2016 13:56:05 +0100 Subject: [PATCH 0147/1238] provider/aws: Remove EFS File System from State when NotFound (#7437) Fixes #7433 When an EFS File System is created via Terraform, Deleted from the AWS console, then Terraform would give us as error as: ``` * aws_efs_file_system.file_system: FileSystemNotFound: File system 'fs-9d739e54' does not exist. status code: 404, request id: d505a682-3ec7-11e6-81d3-1d41202f0881 ``` On a 404, we now remove the EFS File System from state so that Terraform can recreate it as expected --- builtin/providers/aws/resource_aws_efs_file_system.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/builtin/providers/aws/resource_aws_efs_file_system.go b/builtin/providers/aws/resource_aws_efs_file_system.go index 3fda50933..90a34143d 100644 --- a/builtin/providers/aws/resource_aws_efs_file_system.go +++ b/builtin/providers/aws/resource_aws_efs_file_system.go @@ -100,6 +100,11 @@ func resourceAwsEfsFileSystemRead(d *schema.ResourceData, meta interface{}) erro FileSystemId: aws.String(d.Id()), }) if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "FileSystemNotFound" { + log.Printf("[WARN] EFS File System (%s) not found, error code (404)", d.Id()) + d.SetId("") + return nil + } return err } if len(resp.FileSystems) < 1 { From 689e93957a4ee0c8ebd1adfe712278d078043e33 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Tue, 5 Jul 2016 13:56:41 +0100 Subject: [PATCH 0148/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6307ed602..7c1cd9053 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -164,6 +164,7 @@ BUG FIXES: * provider/aws: Fix issue with `manage_bundler` on `aws_opsworks_layers` [GH-7219] * provider/aws: Set Elastic Beanstalk stack name back to state [GH-7445] * provider/aws: Allow recreation of VPC Peering Connection when state is rejected [GH-7466] + * provider/aws: Remove EFS File System from State when NotFound [GH-7437] * provider/azurerm: Fixes terraform crash when using SSH keys with `azurerm_virtual_machine` [GH-6766] * provider/azurerm: Fix a bug causing 'diffs do not match' on `azurerm_network_interface` resources [GH-6790] * provider/azurerm: Normalizes `availability_set_id` casing to avoid spurious diffs in `azurerm_virtual_machine` [GH-6768] From 58c324676dc86150425bf4c511362455295907b1 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Tue, 5 Jul 2016 15:50:48 +0100 Subject: [PATCH 0149/1238] provider/aws: Add support for `encryption` and `kms_key_id` to `aws_ami` (#7181) This fixes #7157. It doesn't change the way aws_ami works ``` make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSAMICopy' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSAMICopy -timeout 120m === RUN TestAccAWSAMICopy --- PASS: TestAccAWSAMICopy (479.75s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 479.769s ``` --- .../providers/aws/resource_aws_ami_copy.go | 19 +++++++++++++++++++ .../docs/providers/aws/r/ami.html.markdown | 5 +++++ 2 files changed, 24 insertions(+) diff --git a/builtin/providers/aws/resource_aws_ami_copy.go b/builtin/providers/aws/resource_aws_ami_copy.go index 521a1b83a..e02d7c370 100644 --- a/builtin/providers/aws/resource_aws_ami_copy.go +++ b/builtin/providers/aws/resource_aws_ami_copy.go @@ -24,6 +24,20 @@ func resourceAwsAmiCopy() *schema.Resource { ForceNew: true, } + resourceSchema["encrypted"] = &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + } + + resourceSchema["kms_key_id"] = &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + } + return &schema.Resource{ Create: resourceAwsAmiCopyCreate, @@ -45,6 +59,11 @@ func resourceAwsAmiCopyCreate(d *schema.ResourceData, meta interface{}) error { Description: aws.String(d.Get("description").(string)), SourceImageId: aws.String(d.Get("source_ami_id").(string)), SourceRegion: aws.String(d.Get("source_ami_region").(string)), + Encrypted: aws.Bool(d.Get("encrypted").(bool)), + } + + if v, ok := d.GetOk("kms_key_id"); ok { + req.KmsKeyId = aws.String(v.(string)) } res, err := client.CopyImage(req) diff --git a/website/source/docs/providers/aws/r/ami.html.markdown b/website/source/docs/providers/aws/r/ami.html.markdown index d9d6cd5d6..25ac04db6 100644 --- a/website/source/docs/providers/aws/r/ami.html.markdown +++ b/website/source/docs/providers/aws/r/ami.html.markdown @@ -78,6 +78,11 @@ Nested `ebs_block_device` blocks have the following structure: as the selected snapshot. * `volume_type` - (Optional) The type of EBS volume to create. Can be one of "standard" (the default), "io1" or "gp2". +* `encrypted` - (Optional) Specifies whether the destination snapshots of the copied image should be encrypted. +The default CMK for EBS is used unless a non-default AWS Key Management Service (AWS KMS) CMK is specified with KmsKeyId. +* `kms_key_id` - (Optional) The full ARN of the AWS Key Management Service (AWS KMS) CMK to use when encrypting the snapshots of +an image during a copy operation. This parameter is only required if you want to use a non-default CMK; +if this parameter is not specified, the default CMK for EBS is used Nested `ephemeral_block_device` blocks have the following structure: From b36c6fceb095ba0dfedc1b0ae80650bf2ef28c28 Mon Sep 17 00:00:00 2001 From: Clint Date: Tue, 5 Jul 2016 09:51:12 -0500 Subject: [PATCH 0150/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7c1cd9053..aac2a64d2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -99,6 +99,7 @@ IMPROVEMENTS: * provider/aws: Add inplace edit/update DB Security Group Rule Ingress [GH-7245] * provider/aws: Added support for redshift destination to firehose delivery streams [GH-7375] * provider/aws: Allow `aws_redshift_security_group` ingress rules to change [GH-5939] + * provider/aws: Add support for `encryption` and `kms_key_id` to `aws_ami` [GH-7181] * provider/azurerm: Add support for EnableIPForwarding to `azurerm_network_interface` [GH-6807] * provider/azurerm: Add support for exporting the `azurerm_storage_account` access keys [GH-6742] * provider/azurerm: The Azure SDK now exposes better error messages [GH-6976] From d60594695fb0356cbbc6a2e8dd3b16b71969229d Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Tue, 5 Jul 2016 16:34:06 +0100 Subject: [PATCH 0151/1238] provider/aws: `aws_customer_gateway` refreshing from state on deleted (#7482) state Fixes #7136 --- builtin/providers/aws/resource_aws_customer_gateway.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/builtin/providers/aws/resource_aws_customer_gateway.go b/builtin/providers/aws/resource_aws_customer_gateway.go index c7e386161..a9785477e 100644 --- a/builtin/providers/aws/resource_aws_customer_gateway.go +++ b/builtin/providers/aws/resource_aws_customer_gateway.go @@ -148,6 +148,12 @@ func resourceAwsCustomerGatewayRead(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("[ERROR] Error finding CustomerGateway: %s", d.Id()) } + if *resp.CustomerGateways[0].State == "deleted" { + log.Printf("[INFO] Customer Gateway is in `deleted` state: %s", d.Id()) + d.SetId("") + return nil + } + customerGateway := resp.CustomerGateways[0] d.Set("ip_address", customerGateway.IpAddress) d.Set("type", customerGateway.Type) From 3a177093c0cc5d2ee1880d88e05a2e38e76db30b Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Tue, 5 Jul 2016 16:34:41 +0100 Subject: [PATCH 0152/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index aac2a64d2..9c4e2c4f5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -166,6 +166,7 @@ BUG FIXES: * provider/aws: Set Elastic Beanstalk stack name back to state [GH-7445] * provider/aws: Allow recreation of VPC Peering Connection when state is rejected [GH-7466] * provider/aws: Remove EFS File System from State when NotFound [GH-7437] + * provider/aws: `aws_customer_gateway` refreshing from state on deleted state [GH-7482] * provider/azurerm: Fixes terraform crash when using SSH keys with `azurerm_virtual_machine` [GH-6766] * provider/azurerm: Fix a bug causing 'diffs do not match' on `azurerm_network_interface` resources [GH-6790] * provider/azurerm: Normalizes `availability_set_id` casing to avoid spurious diffs in `azurerm_virtual_machine` [GH-6768] From 4ba75bd54e175cd04ee7b60545a6dd10e0e82179 Mon Sep 17 00:00:00 2001 From: David Tolnay Date: Tue, 5 Jul 2016 13:04:09 -0700 Subject: [PATCH 0153/1238] Retry finding route after creating it (#7463) The symptom is that a route "fails" to create, then every subsequent `terraform apply` fails with RouteAlreadyExists. CreateRoute was succeeding but the very next DescribeRouteTables was not listing the new route. --- builtin/providers/aws/resource_aws_route.go | 22 +++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/builtin/providers/aws/resource_aws_route.go b/builtin/providers/aws/resource_aws_route.go index 2cfc19e16..9f090fad6 100644 --- a/builtin/providers/aws/resource_aws_route.go +++ b/builtin/providers/aws/resource_aws_route.go @@ -179,14 +179,18 @@ func resourceAwsRouteCreate(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("Error creating route: %s", err) } - route, err := findResourceRoute(conn, d.Get("route_table_id").(string), d.Get("destination_cidr_block").(string)) + var route *ec2.Route + err = resource.Retry(15*time.Second, func() *resource.RetryError { + route, err = findResourceRoute(conn, d.Get("route_table_id").(string), d.Get("destination_cidr_block").(string)) + return resource.RetryableError(err) + }) if err != nil { - return err + return fmt.Errorf("Error finding route after creating it: %s", err) } d.SetId(routeIDHash(d, route)) - - return resourceAwsRouteRead(d, meta) + resourceAwsRouteSetResourceData(d, route) + return nil } func resourceAwsRouteRead(d *schema.ResourceData, meta interface{}) error { @@ -195,7 +199,11 @@ func resourceAwsRouteRead(d *schema.ResourceData, meta interface{}) error { if err != nil { return err } + resourceAwsRouteSetResourceData(d, route) + return nil +} +func resourceAwsRouteSetResourceData(d *schema.ResourceData, route *ec2.Route) { d.Set("destination_prefix_list_id", route.DestinationPrefixListId) d.Set("gateway_id", route.GatewayId) d.Set("nat_gateway_id", route.NatGatewayId) @@ -205,8 +213,6 @@ func resourceAwsRouteRead(d *schema.ResourceData, meta interface{}) error { d.Set("origin", route.Origin) d.Set("state", route.State) d.Set("vpc_peering_connection_id", route.VpcPeeringConnectionId) - - return nil } func resourceAwsRouteUpdate(d *schema.ResourceData, meta interface{}) error { @@ -380,7 +386,7 @@ func findResourceRoute(conn *ec2.EC2, rtbid string, cidr string) (*ec2.Route, er } } - return nil, fmt.Errorf(` -error finding matching route for Route table (%s) and destination CIDR block (%s)`, + return nil, fmt.Errorf( + `error finding matching route for Route table (%s) and destination CIDR block (%s)`, rtbid, cidr) } From d2c698ea283a841a45e64f855f46847c450f008a Mon Sep 17 00:00:00 2001 From: Clint Date: Tue, 5 Jul 2016 15:04:54 -0500 Subject: [PATCH 0154/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9c4e2c4f5..7dab8b8ae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -167,6 +167,7 @@ BUG FIXES: * provider/aws: Allow recreation of VPC Peering Connection when state is rejected [GH-7466] * provider/aws: Remove EFS File System from State when NotFound [GH-7437] * provider/aws: `aws_customer_gateway` refreshing from state on deleted state [GH-7482] + * provider/aws: Retry finding `aws_route` after creating it [GH-7463] * provider/azurerm: Fixes terraform crash when using SSH keys with `azurerm_virtual_machine` [GH-6766] * provider/azurerm: Fix a bug causing 'diffs do not match' on `azurerm_network_interface` resources [GH-6790] * provider/azurerm: Normalizes `availability_set_id` casing to avoid spurious diffs in `azurerm_virtual_machine` [GH-6768] From de0a34fc3517893a5078f6358ca9523cd4c63490 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Tue, 5 Jul 2016 18:07:47 -0400 Subject: [PATCH 0155/1238] Update CHANGELOG.md Add core bug fixes --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7dab8b8ae..71f9251ff 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -141,6 +141,12 @@ BUG FIXES: * core: Fix issue causing syntax errors interpolating count attribute when value passed between modules [GH-6833] * core: Fix "diffs didn't match during apply" error for computed sets [GH-7205] * core: Fix issue where `terraform init .` would truncate existing files [GH-7273] + * core: Don't compare diffs between maps with computed values [GH-7249] + * core: Don't copy existing files over themselves when fetching modules [GH-7273] + * core: Always increment the state serial number when upgrading the version [GH-7402] + * core: Fix a crash during eval when we're upgrading an empty state [GH-7403] + * core: Honor the `-state-out` flag when applying with a plan file [GH-7443] + * core: Fix a panic when a `terraform_remote_state` data source doesn't exist [GH-7464] * provider/aws: Changing keys in `aws_dynamodb_table` correctly force new resources [GH-6829] * provider/aws: Fix a bug where CloudWatch alarms are created repeatedly if the user does not have permission to use the the DescribeAlarms operation [GH-7227] * provider/aws: Fix crash in `aws_elasticache_parameter_group` occuring following edits in the console [GH-6687] From 26cf9e807a49f78cda4dd36ffa1e7c803d0474af Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 6 Jul 2016 09:21:49 +0100 Subject: [PATCH 0156/1238] aws/docs: Add rds_cluster_instance undocumented fields (#7501) --- .../docs/providers/aws/r/rds_cluster_instance.html.markdown | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown b/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown index 94be8d821..dfca6cdbc 100644 --- a/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown +++ b/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown @@ -62,6 +62,10 @@ and memory, see [Scaling Aurora DB Instances][4]. Aurora currently Default `false`. See the documentation on [Creating DB Instances][6] for more details on controlling this property. * `db_subnet_group_name` - (Required if `publicly_accessible = false`, Optional otherwise) A DB subnet group to associate with this DB instance. **NOTE:** This must match the `db_subnet_group_name` of the attached [`aws_rds_cluster`](/docs/providers/aws/r/rds_cluster.html). +* `db_parameter_group_name` - (Optional) The name of the DB parameter group to associate with this instance. +* `apply_immediately` - (Optional) Specifies whether any database modifications + are applied immediately, or during the next maintenance window. Default is`false`. +* `tags` - (Optional) A mapping of tags to assign to the instance. ## Attributes Reference From eb70dec2ec98d20f45b07f7e4411ae1a55830477 Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Wed, 6 Jul 2016 11:28:13 +0200 Subject: [PATCH 0157/1238] Satify my OCD a little :wink: (#7502) --- .../docs/providers/cloudstack/r/network.html.markdown | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/source/docs/providers/cloudstack/r/network.html.markdown b/website/source/docs/providers/cloudstack/r/network.html.markdown index b89c7e1c7..961edf4b1 100644 --- a/website/source/docs/providers/cloudstack/r/network.html.markdown +++ b/website/source/docs/providers/cloudstack/r/network.html.markdown @@ -34,15 +34,15 @@ The following arguments are supported: * `cidr` - (Required) The CIDR block for the network. Changing this forces a new resource to be created. +* `gateway` - (Optional) Gateway that will be provided to the instances in this + network. Defaults to the first usable IP in the range. + * `startip` - (Optional) Start of the IP block that will be available on the network. Defaults to the second available IP in the range. * `endip` - (Optional) End of the IP block that will be available on the network. Defaults to the last available IP in the range. -* `gateway` - (Optional) Gateway that will be provided to the instances in this - network. Defaults to the first usable IP in the range. - * `network_offering` - (Required) The name or ID of the network offering to use for this network. From 6e402102272d64b428fa7cd61f61aafa93f665e8 Mon Sep 17 00:00:00 2001 From: Max Englander Date: Wed, 6 Jul 2016 08:29:47 -0400 Subject: [PATCH 0158/1238] #2087 gracefully handle non-presence of service on remote consul agent --- builtin/providers/consul/resource_consul_agent_service.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/providers/consul/resource_consul_agent_service.go b/builtin/providers/consul/resource_consul_agent_service.go index 9ede63bf3..6636060a8 100644 --- a/builtin/providers/consul/resource_consul_agent_service.go +++ b/builtin/providers/consul/resource_consul_agent_service.go @@ -106,7 +106,7 @@ func resourceConsulAgentServiceRead(d *schema.ResourceData, meta interface{}) er if services, err := agent.Services(); err != nil { return fmt.Errorf("Failed to get services from Consul agent: %v", err) } else if service, ok := services[name]; !ok { - return fmt.Errorf("Failed to get service '%s' from Consul agent", name) + d.Set("id", "") } else { d.Set("address", service.Address) d.Set("id", service.ID) From 1a4bd24e1ad83d50748171d75bd0a4c9c924ce84 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 6 Jul 2016 08:56:29 -0500 Subject: [PATCH 0159/1238] terraform: add test helper for inline config loading In scenarios with a lot of small configs, it's tedious to fan out actual dir trees in a test-fixtures dir. It also spreads out the context of the test - requiring the reader fetch a bunch of scattered 3 line files in order to understand what is being tested. Our config loading code still only reads from disk, but in the `helper/resource` acc test framework we work around this by writing inline config to temp files and loading it from there. This helper is based on that strategy. Eventually it'd be great to be able to build up a `module.Tree` from config directly, but this gets us the functionality today. Example Usage: testModuleInline(t, map[string]string{ "top.tf": ` module "middle" { source = "./middle" } `, "middle/mid.tf": ` module "bottom" { source = "./bottom" amap { foo = "bar" } } `, "middle/bottom/bot.tf": ` variable "amap" { type = "map" } `, }), --- terraform/terraform_test.go | 49 +++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/terraform/terraform_test.go b/terraform/terraform_test.go index 33412a8ce..c608d3d73 100644 --- a/terraform/terraform_test.go +++ b/terraform/terraform_test.go @@ -2,6 +2,7 @@ package terraform import ( "fmt" + "io" "io/ioutil" "os" "path/filepath" @@ -60,6 +61,54 @@ func testModule(t *testing.T, name string) *module.Tree { return mod } +// testModuleInline takes a map of path -> config strings and yields a config +// structure with those files loaded from disk +func testModuleInline(t *testing.T, config map[string]string) *module.Tree { + cfgPath, err := ioutil.TempDir("", "tf-test") + if err != nil { + t.Errorf("Error creating temporary directory for config: %s", err) + } + defer os.RemoveAll(cfgPath) + + for path, configStr := range config { + dir := filepath.Dir(path) + if dir != "." { + err := os.MkdirAll(filepath.Join(cfgPath, dir), os.FileMode(0777)) + if err != nil { + t.Fatalf("Error creating subdir: %s", err) + } + } + // Write the configuration + cfgF, err := os.Create(filepath.Join(cfgPath, path)) + if err != nil { + t.Fatalf("Error creating temporary file for config: %s", err) + } + + _, err = io.Copy(cfgF, strings.NewReader(configStr)) + cfgF.Close() + if err != nil { + t.Fatalf("Error creating temporary file for config: %s", err) + } + } + + // Parse the configuration + mod, err := module.NewTreeModule("", cfgPath) + if err != nil { + t.Fatalf("Error loading configuration: %s", err) + } + + // Load the modules + modStorage := &getter.FolderStorage{ + StorageDir: filepath.Join(cfgPath, ".tfmodules"), + } + err = mod.Load(modStorage, module.GetModeGet) + if err != nil { + t.Errorf("Error downloading modules: %s", err) + } + + return mod +} + func testStringMatch(t *testing.T, s fmt.Stringer, expected string) { actual := strings.TrimSpace(s.String()) expected = strings.TrimSpace(expected) From 1d488bdbd26cd18ad911dd485735fa597c5f6ca7 Mon Sep 17 00:00:00 2001 From: clint shryock Date: Wed, 6 Jul 2016 09:20:39 -0500 Subject: [PATCH 0160/1238] provider/aws: Do an extra lookup for the VPC Endpoint Prefix, to test Prefix --- .../resource_aws_security_group_rule_test.go | 56 ++++++++++++++++++- 1 file changed, 55 insertions(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_security_group_rule_test.go b/builtin/providers/aws/resource_aws_security_group_rule_test.go index 5ed0f680e..4dbaec474 100644 --- a/builtin/providers/aws/resource_aws_security_group_rule_test.go +++ b/builtin/providers/aws/resource_aws_security_group_rule_test.go @@ -418,6 +418,42 @@ func TestAccAWSSecurityGroupRule_Race(t *testing.T) { func TestAccAWSSecurityGroupRule_PrefixListEgress(t *testing.T) { var group ec2.SecurityGroup + var endpoint ec2.VpcEndpoint + var p ec2.IpPermission + + // This function creates the expected IPPermission with the prefix list ID from + // the VPC Endpoint created in the test + setupSG := func(*terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).ec2conn + prefixListInput := &ec2.DescribePrefixListsInput{ + Filters: []*ec2.Filter{ + {Name: aws.String("prefix-list-name"), Values: []*string{endpoint.ServiceName}}, + }, + } + + log.Printf("[DEBUG] Reading VPC Endpoint prefix list: %s", prefixListInput) + prefixListsOutput, err := conn.DescribePrefixLists(prefixListInput) + + if err != nil { + _, ok := err.(awserr.Error) + if !ok { + return fmt.Errorf("Error reading VPC Endpoint prefix list: %s", err.Error()) + } + } + + if len(prefixListsOutput.PrefixLists) != 1 { + return fmt.Errorf("There are multiple prefix lists associated with the service name '%s'. Unexpected", prefixListsOutput) + } + + p = ec2.IpPermission{ + IpProtocol: aws.String("-1"), + PrefixListIds: []*ec2.PrefixListId{ + &ec2.PrefixListId{PrefixListId: prefixListsOutput.PrefixLists[0].PrefixListId}, + }, + } + + return nil + } resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -428,7 +464,11 @@ func TestAccAWSSecurityGroupRule_PrefixListEgress(t *testing.T) { Config: testAccAWSSecurityGroupRulePrefixListEgressConfig, Check: resource.ComposeTestCheckFunc( testAccCheckAWSSecurityGroupRuleExists("aws_security_group.egress", &group), - testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.egress_1", &group, nil, "egress"), + // lookup info on the VPC Endpoint created, to populate the expected + // IP Perm + testAccCheckVpcEndpointExists("aws_vpc_endpoint.s3-us-west-2", &endpoint), + setupSG, + testAccCheckAWSSecurityGroupRuleAttributes("aws_security_group_rule.egress_1", &group, &p, "egress"), ), }, }, @@ -568,6 +608,20 @@ func testAccCheckAWSSecurityGroupRuleAttributes(n string, group *ec2.SecurityGro if remaining > 0 { continue } + + remaining = len(p.PrefixListIds) + for _, pip := range p.PrefixListIds { + for _, rpip := range r.PrefixListIds { + if *pip.PrefixListId == *rpip.PrefixListId { + remaining-- + } + } + } + + if remaining > 0 { + continue + } + matchingRule = r } From 559f14c3fa2d943d2d0f61ee24e96f899e57c376 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 6 Jul 2016 09:11:46 -0500 Subject: [PATCH 0161/1238] terraform: allow literal maps to be passed to modules Passing a literal map to a module looks like this in HCL: module "foo" { source = "./foo" somemap { somekey = "somevalue" } } The HCL parser always wraps an extra list around the map, so we need to remove that extra list wrapper when the parameter is indeed of type "map". Fixes #7140 --- terraform/context_plan_test.go | 39 +++++ terraform/eval_variable.go | 61 ++++++-- terraform/eval_variable_test.go | 142 ++++++++++++++++++ terraform/graph_config_node_variable.go | 6 + .../plan-module-map-literal/child/main.tf | 12 ++ .../plan-module-map-literal/main.tf | 7 + 6 files changed, 255 insertions(+), 12 deletions(-) create mode 100644 terraform/eval_variable_test.go create mode 100644 terraform/test-fixtures/plan-module-map-literal/child/main.tf create mode 100644 terraform/test-fixtures/plan-module-map-literal/main.tf diff --git a/terraform/context_plan_test.go b/terraform/context_plan_test.go index 18cd7efc6..9ad2d1f19 100644 --- a/terraform/context_plan_test.go +++ b/terraform/context_plan_test.go @@ -2286,3 +2286,42 @@ func TestContext2Plan_ignoreChanges(t *testing.T) { t.Fatalf("bad:\n%s\n\nexpected\n\n%s", actual, expected) } } + +func TestContext2Plan_moduleMapLiteral(t *testing.T) { + m := testModule(t, "plan-module-map-literal") + p := testProvider("aws") + p.ApplyFn = testApplyFn + p.DiffFn = func(i *InstanceInfo, s *InstanceState, c *ResourceConfig) (*InstanceDiff, error) { + // Here we verify that both the populated and empty map literals made it + // through to the resource attributes + val, _ := c.Get("tags") + m, ok := val.(map[string]interface{}) + if !ok { + t.Fatalf("Tags attr not map: %#v", val) + } + if m["foo"] != "bar" { + t.Fatalf("Bad value in tags attr: %#v", m) + } + { + val, _ := c.Get("meta") + m, ok := val.(map[string]interface{}) + if !ok { + t.Fatalf("Meta attr not map: %#v", val) + } + if len(m) != 0 { + t.Fatalf("Meta attr not empty: %#v", val) + } + } + return nil, nil + } + ctx := testContext2(t, &ContextOpts{ + Module: m, + Providers: map[string]ResourceProviderFactory{ + "aws": testProviderFuncFixed(p), + }, + }) + + if _, err := ctx.Plan(); err != nil { + t.Fatalf("err: %s", err) + } +} diff --git a/terraform/eval_variable.go b/terraform/eval_variable.go index 22d93da7f..ce6064706 100644 --- a/terraform/eval_variable.go +++ b/terraform/eval_variable.go @@ -2,6 +2,7 @@ package terraform import ( "fmt" + "log" "reflect" "strings" @@ -21,10 +22,6 @@ import ( // declared // - the path to the module (so we know which part of the tree to // compare the values against). -// -// Currently since the type system is simple, we currently do not make -// use of the values since it is only valid to pass string values. The -// structure is in place for extension of the type system, however. type EvalTypeCheckVariable struct { Variables map[string]interface{} ModulePath []string @@ -50,10 +47,6 @@ func (n *EvalTypeCheckVariable) Eval(ctx EvalContext) (interface{}, error) { } for name, declaredType := range prototypes { - // This is only necessary when we _actually_ check. It is left as a reminder - // that at the current time we are dealing with a type system consisting only - // of strings and maps - where the only valid inter-module variable type is - // string. proposedValue, ok := n.Variables[name] if !ok { // This means the default value should be used as no overriding value @@ -67,8 +60,6 @@ func (n *EvalTypeCheckVariable) Eval(ctx EvalContext) (interface{}, error) { switch declaredType { case config.VariableTypeString: - // This will need actual verification once we aren't dealing with - // a map[string]string but this is sufficient for now. switch proposedValue.(type) { case string: continue @@ -93,8 +84,6 @@ func (n *EvalTypeCheckVariable) Eval(ctx EvalContext) (interface{}, error) { name, modulePathDescription, declaredType.Printable(), hclTypeName(proposedValue)) } default: - // This will need the actual type substituting when we have more than - // just strings and maps. return nil, fmt.Errorf("variable %s%s should be type %s, got type string", name, modulePathDescription, declaredType.Printable()) } @@ -163,6 +152,54 @@ func (n *EvalVariableBlock) Eval(ctx EvalContext) (interface{}, error) { return nil, nil } +// EvalCoerceMapVariable is an EvalNode implementation that recognizes a +// specific ambiguous HCL parsing situation and resolves it. In HCL parsing, a +// bare map literal is indistinguishable from a list of maps w/ one element. +// +// We take all the same inputs as EvalTypeCheckVariable above, since we need +// both the target type and the proposed value in order to properly coerce. +type EvalCoerceMapVariable struct { + Variables map[string]interface{} + ModulePath []string + ModuleTree *module.Tree +} + +// Eval implements the EvalNode interface. See EvalCoerceMapVariable for +// details. +func (n *EvalCoerceMapVariable) Eval(ctx EvalContext) (interface{}, error) { + currentTree := n.ModuleTree + for _, pathComponent := range n.ModulePath[1:] { + currentTree = currentTree.Children()[pathComponent] + } + targetConfig := currentTree.Config() + + prototypes := make(map[string]config.VariableType) + for _, variable := range targetConfig.Variables { + prototypes[variable.Name] = variable.Type() + } + + for name, declaredType := range prototypes { + if declaredType != config.VariableTypeMap { + continue + } + + proposedValue, ok := n.Variables[name] + if !ok { + continue + } + + if list, ok := proposedValue.([]interface{}); ok && len(list) == 1 { + if m, ok := list[0].(map[string]interface{}); ok { + log.Printf("[DEBUG] EvalCoerceMapVariable: "+ + "Coercing single element list into map: %#v", m) + n.Variables[name] = m + } + } + } + + return nil, nil +} + // hclTypeName returns the name of the type that would represent this value in // a config file, or falls back to the Go type name if there's no corresponding // HCL type. This is used for formatted output, not for comparing types. diff --git a/terraform/eval_variable_test.go b/terraform/eval_variable_test.go new file mode 100644 index 000000000..05fc2b850 --- /dev/null +++ b/terraform/eval_variable_test.go @@ -0,0 +1,142 @@ +package terraform + +import ( + "reflect" + "testing" +) + +func TestCoerceMapVariable(t *testing.T) { + cases := map[string]struct { + Input *EvalCoerceMapVariable + ExpectVars map[string]interface{} + }{ + "a valid map is untouched": { + Input: &EvalCoerceMapVariable{ + Variables: map[string]interface{}{ + "amap": map[string]interface{}{"foo": "bar"}, + }, + ModulePath: []string{"root"}, + ModuleTree: testModuleInline(t, map[string]string{ + "main.tf": ` + variable "amap" { + type = "map" + } + `, + }), + }, + ExpectVars: map[string]interface{}{ + "amap": map[string]interface{}{"foo": "bar"}, + }, + }, + "a list w/ a single map element is coerced": { + Input: &EvalCoerceMapVariable{ + Variables: map[string]interface{}{ + "amap": []interface{}{ + map[string]interface{}{"foo": "bar"}, + }, + }, + ModulePath: []string{"root"}, + ModuleTree: testModuleInline(t, map[string]string{ + "main.tf": ` + variable "amap" { + type = "map" + } + `, + }), + }, + ExpectVars: map[string]interface{}{ + "amap": map[string]interface{}{"foo": "bar"}, + }, + }, + "a list w/ more than one map element is untouched": { + Input: &EvalCoerceMapVariable{ + Variables: map[string]interface{}{ + "amap": []interface{}{ + map[string]interface{}{"foo": "bar"}, + map[string]interface{}{"baz": "qux"}, + }, + }, + ModulePath: []string{"root"}, + ModuleTree: testModuleInline(t, map[string]string{ + "main.tf": ` + variable "amap" { + type = "map" + } + `, + }), + }, + ExpectVars: map[string]interface{}{ + "amap": []interface{}{ + map[string]interface{}{"foo": "bar"}, + map[string]interface{}{"baz": "qux"}, + }, + }, + }, + "list coercion also works in a module": { + Input: &EvalCoerceMapVariable{ + Variables: map[string]interface{}{ + "amap": []interface{}{ + map[string]interface{}{"foo": "bar"}, + }, + }, + ModulePath: []string{"root", "middle", "bottom"}, + ModuleTree: testModuleInline(t, map[string]string{ + "top.tf": ` + module "middle" { + source = "./middle" + } + `, + "middle/mid.tf": ` + module "bottom" { + source = "./bottom" + amap { + foo = "bar" + } + } + `, + "middle/bottom/bot.tf": ` + variable "amap" { + type = "map" + } + `, + }), + }, + ExpectVars: map[string]interface{}{ + "amap": map[string]interface{}{"foo": "bar"}, + }, + }, + "coercion only occurs when target var is a map": { + Input: &EvalCoerceMapVariable{ + Variables: map[string]interface{}{ + "alist": []interface{}{ + map[string]interface{}{"foo": "bar"}, + }, + }, + ModulePath: []string{"root"}, + ModuleTree: testModuleInline(t, map[string]string{ + "main.tf": ` + variable "alist" { + type = "list" + } + `, + }), + }, + ExpectVars: map[string]interface{}{ + "alist": []interface{}{ + map[string]interface{}{"foo": "bar"}, + }, + }, + }, + } + + for tn, tc := range cases { + _, err := tc.Input.Eval(&MockEvalContext{}) + if err != nil { + t.Fatalf("%s: Unexpected err: %s", tn, err) + } + if !reflect.DeepEqual(tc.Input.Variables, tc.ExpectVars) { + t.Fatalf("%s: Expected variables:\n\n%#v\n\nGot:\n\n%#v", + tn, tc.ExpectVars, tc.Input.Variables) + } + } +} diff --git a/terraform/graph_config_node_variable.go b/terraform/graph_config_node_variable.go index 0a86c4881..35222a547 100644 --- a/terraform/graph_config_node_variable.go +++ b/terraform/graph_config_node_variable.go @@ -176,6 +176,12 @@ func (n *GraphNodeConfigVariable) EvalTree() EvalNode { VariableValues: variables, }, + &EvalCoerceMapVariable{ + Variables: variables, + ModulePath: n.ModulePath, + ModuleTree: n.ModuleTree, + }, + &EvalTypeCheckVariable{ Variables: variables, ModulePath: n.ModulePath, diff --git a/terraform/test-fixtures/plan-module-map-literal/child/main.tf b/terraform/test-fixtures/plan-module-map-literal/child/main.tf new file mode 100644 index 000000000..5dcb7bec4 --- /dev/null +++ b/terraform/test-fixtures/plan-module-map-literal/child/main.tf @@ -0,0 +1,12 @@ +variable "amap" { + type = "map" +} + +variable "othermap" { + type = "map" +} + +resource "aws_instance" "foo" { + tags = "${var.amap}" + meta = "${var.othermap}" +} diff --git a/terraform/test-fixtures/plan-module-map-literal/main.tf b/terraform/test-fixtures/plan-module-map-literal/main.tf new file mode 100644 index 000000000..f9a3f201a --- /dev/null +++ b/terraform/test-fixtures/plan-module-map-literal/main.tf @@ -0,0 +1,7 @@ +module "child" { + source = "./child" + amap { + foo = "bar" + } + othermap {} +} From 21e2173e0a32078d4f3c7dbed1f2a868da5c99b2 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Wed, 6 Jul 2016 11:22:41 -0400 Subject: [PATCH 0162/1238] Fix nested module "unknown variable" during dest (#7496) * Fix nested module "unknown variable" during dstry During a destroy with nested modules, accessing a variable between them causes an "unknown variable accessed" during destroy. --- terraform/context_apply_test.go | 80 +++++++++++++++++++ terraform/graph_config_node_variable.go | 7 +- .../middle/bottom/bottom.tf | 1 + .../middle/middle.tf | 8 ++ .../top.tf | 4 + 5 files changed, 98 insertions(+), 2 deletions(-) create mode 100644 terraform/test-fixtures/apply-destroy-nested-module-with-attrs/middle/bottom/bottom.tf create mode 100644 terraform/test-fixtures/apply-destroy-nested-module-with-attrs/middle/middle.tf create mode 100644 terraform/test-fixtures/apply-destroy-nested-module-with-attrs/top.tf diff --git a/terraform/context_apply_test.go b/terraform/context_apply_test.go index fa0817821..a5f425a40 100644 --- a/terraform/context_apply_test.go +++ b/terraform/context_apply_test.go @@ -4704,3 +4704,83 @@ aws_instance.foo: t.Fatalf("bad: \n%s", actual) } } + +// https://github.com/hashicorp/terraform/issues/7378 +func TestContext2Apply_destroyNestedModuleWithAttrsReferencingResource(t *testing.T) { + m := testModule(t, "apply-destroy-nested-module-with-attrs") + p := testProvider("null") + p.ApplyFn = testApplyFn + p.DiffFn = testDiffFn + + var state *State + var err error + { + ctx := testContext2(t, &ContextOpts{ + Module: m, + Providers: map[string]ResourceProviderFactory{ + "null": testProviderFuncFixed(p), + }, + }) + + // First plan and apply a create operation + if _, err := ctx.Plan(); err != nil { + t.Fatalf("plan err: %s", err) + } + + state, err = ctx.Apply() + if err != nil { + t.Fatalf("apply err: %s", err) + } + } + + { + ctx := testContext2(t, &ContextOpts{ + Destroy: true, + Module: m, + State: state, + Providers: map[string]ResourceProviderFactory{ + "null": testProviderFuncFixed(p), + }, + }) + + plan, err := ctx.Plan() + if err != nil { + t.Fatalf("destroy plan err: %s", err) + } + + var buf bytes.Buffer + if err := WritePlan(plan, &buf); err != nil { + t.Fatalf("plan write err: %s", err) + } + + planFromFile, err := ReadPlan(&buf) + if err != nil { + t.Fatalf("plan read err: %s", err) + } + + ctx, err = planFromFile.Context(&ContextOpts{ + Providers: map[string]ResourceProviderFactory{ + "null": testProviderFuncFixed(p), + }, + }) + if err != nil { + t.Fatalf("err: %s", err) + } + + state, err = ctx.Apply() + if err != nil { + t.Fatalf("destroy apply err: %s", err) + } + } + + //Test that things were destroyed + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(` + +module.middle: + + `) + if actual != expected { + t.Fatalf("expected: \n%s\n\nbad: \n%s", expected, actual) + } +} diff --git a/terraform/graph_config_node_variable.go b/terraform/graph_config_node_variable.go index 0a86c4881..8565fd69a 100644 --- a/terraform/graph_config_node_variable.go +++ b/terraform/graph_config_node_variable.go @@ -130,6 +130,7 @@ func (n *GraphNodeConfigVariable) hasDestroyEdgeInPath(opts *NoopOpts, vertex da if vertex == nil { vertex = opts.Vertex } + log.Printf("[DEBUG] hasDestroyEdgeInPath: Looking for destroy edge: %s - %T", dag.VertexName(vertex), vertex) for _, v := range opts.Graph.UpEdges(vertex).List() { if len(opts.Graph.UpEdges(v).List()) > 1 { @@ -137,10 +138,12 @@ func (n *GraphNodeConfigVariable) hasDestroyEdgeInPath(opts *NoopOpts, vertex da return true } } + // Here we borrow the implementation of DestroyEdgeInclude, whose logic - // and semantics are exactly what we want here. + // and semantics are exactly what we want here. We add a check for the + // the root node, since we have to always depend on its existance. if cv, ok := vertex.(*GraphNodeConfigVariableFlat); ok { - if cv.DestroyEdgeInclude(v) { + if dag.VertexName(v) == rootNodeName || cv.DestroyEdgeInclude(v) { return true } } diff --git a/terraform/test-fixtures/apply-destroy-nested-module-with-attrs/middle/bottom/bottom.tf b/terraform/test-fixtures/apply-destroy-nested-module-with-attrs/middle/bottom/bottom.tf new file mode 100644 index 000000000..a9ce7fcc8 --- /dev/null +++ b/terraform/test-fixtures/apply-destroy-nested-module-with-attrs/middle/bottom/bottom.tf @@ -0,0 +1 @@ +variable "bottom_param" {} diff --git a/terraform/test-fixtures/apply-destroy-nested-module-with-attrs/middle/middle.tf b/terraform/test-fixtures/apply-destroy-nested-module-with-attrs/middle/middle.tf new file mode 100644 index 000000000..0fde5830b --- /dev/null +++ b/terraform/test-fixtures/apply-destroy-nested-module-with-attrs/middle/middle.tf @@ -0,0 +1,8 @@ +variable "param" {} + +resource "null_resource" "n" {} + +module "bottom" { + source = "./bottom" + bottom_param = "${var.param}" +} diff --git a/terraform/test-fixtures/apply-destroy-nested-module-with-attrs/top.tf b/terraform/test-fixtures/apply-destroy-nested-module-with-attrs/top.tf new file mode 100644 index 000000000..1b631f4d5 --- /dev/null +++ b/terraform/test-fixtures/apply-destroy-nested-module-with-attrs/top.tf @@ -0,0 +1,4 @@ +module "middle" { + source = "./middle" + param = "foo" +} From 02a16b37bc4b4e127e54565721d7de33837e03b4 Mon Sep 17 00:00:00 2001 From: Brian Schwind Date: Wed, 6 Jul 2016 16:00:16 -0400 Subject: [PATCH 0163/1238] Update S3 notification documentation When adding multiple notifications from one S3 bucket to one SQS queue, it wasn't immediately intuitive how to do this. At first I created two `aws_s3_bucket_notification` configs and it seemed to work fine, however the config for one event will overwrite the other. In order to have multiple events, you can defined the `queue` key twice, or use an array if you're working with the JSON syntax. I tried to make this more clear in the documentation. --- .../r/s3_bucket_notification.html.markdown | 66 +++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/website/source/docs/providers/aws/r/s3_bucket_notification.html.markdown b/website/source/docs/providers/aws/r/s3_bucket_notification.html.markdown index fac3a8d49..3f6564d17 100644 --- a/website/source/docs/providers/aws/r/s3_bucket_notification.html.markdown +++ b/website/source/docs/providers/aws/r/s3_bucket_notification.html.markdown @@ -135,6 +135,72 @@ resource "aws_s3_bucket_notification" "bucket_notification" { } ``` +### Add multiple notification configurations to SQS Queue + +``` +resource "aws_sqs_queue" "queue" { + name = "s3-event-notification-queue" + policy = < Date: Wed, 6 Jul 2016 19:17:35 -0700 Subject: [PATCH 0164/1238] Support `make test` if TF itself is vendored --- Makefile | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 1603ce214..358a03af5 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -TEST?=$$(go list ./... | grep -v '/vendor/' | grep -v '/builtin/bins/') +TEST?=$$(go list ./... | grep -v '/terraform/vendor/' | grep -v '/builtin/bins/') VETARGS?=-all GOFMT_FILES?=$$(find . -name '*.go' | grep -v vendor) @@ -29,7 +29,8 @@ core-dev: generate # Shorthand for quickly testing the core of Terraform (i.e. "not providers") core-test: generate - @echo "Testing core packages..." && go test -tags 'core' $(TESTARGS) $(shell go list ./... | grep -v -E 'builtin|vendor') + @echo "Testing core packages..." && \ + go test -tags 'core' $(TESTARGS) $(shell go list ./... | grep -v -E 'builtin|terraform/vendor') # Shorthand for building and installing just one plugin for local testing. # Run as (for example): make plugin-dev PLUGIN=provider-aws @@ -79,7 +80,7 @@ generate: @which stringer ; if [ $$? -ne 0 ]; then \ go get -u golang.org/x/tools/cmd/stringer; \ fi - go generate $$(go list ./... | grep -v /vendor/) + go generate $$(go list ./... | grep -v /terraform/vendor/) @go fmt command/internal_plugin_list.go > /dev/null fmt: From 0326d543666876e6011ce747cc10ea576b874c68 Mon Sep 17 00:00:00 2001 From: stack72 Date: Thu, 7 Jul 2016 13:39:20 +0100 Subject: [PATCH 0165/1238] provider/aws: `directory_service_directory` documentation Fixes #4648 The AWS Console shows that 2 subnets are required - not a minimum --- .../providers/aws/r/directory_service_directory.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/source/docs/providers/aws/r/directory_service_directory.html.markdown b/website/source/docs/providers/aws/r/directory_service_directory.html.markdown index 83f07649b..deb6fcb00 100644 --- a/website/source/docs/providers/aws/r/directory_service_directory.html.markdown +++ b/website/source/docs/providers/aws/r/directory_service_directory.html.markdown @@ -57,14 +57,14 @@ The following arguments are supported: **vpc\_settings** supports the following: -* `subnet_ids` - (Required) The identifiers of the subnets for the directory servers (min. 2 subnets in 2 different AZs). +* `subnet_ids` - (Required) The identifiers of the subnets for the directory servers (2 subnets in 2 different AZs). * `vpc_id` - (Required) The identifier of the VPC that the directory is in. **connect\_settings** supports the following: * `customer_username` - (Required) The username corresponding to the password provided. * `customer_dns_ips` - (Required) The DNS IP addresses of the domain to connect to. -* `subnet_ids` - (Required) The identifiers of the subnets for the directory servers (min. 2 subnets in 2 different AZs). +* `subnet_ids` - (Required) The identifiers of the subnets for the directory servers (2 subnets in 2 different AZs). * `vpc_id` - (Required) The identifier of the VPC that the directory is in. ## Attributes Reference From 53681390d4ccd51aad97b0d6894597d13930920c Mon Sep 17 00:00:00 2001 From: stack72 Date: Thu, 7 Jul 2016 13:45:49 +0100 Subject: [PATCH 0166/1238] provider/aws: Remove `aws_codedeploy_deployment_group` from state on 404 Fixes #4802 The manual removal will now force Terraform to remove the resource from state and then report it needs recreated --- ...esource_aws_codedeploy_deployment_group.go | 6 +++ ...ce_aws_codedeploy_deployment_group_test.go | 52 +++++++++++++++++++ 2 files changed, 58 insertions(+) diff --git a/builtin/providers/aws/resource_aws_codedeploy_deployment_group.go b/builtin/providers/aws/resource_aws_codedeploy_deployment_group.go index f1bfa49ba..72e019311 100644 --- a/builtin/providers/aws/resource_aws_codedeploy_deployment_group.go +++ b/builtin/providers/aws/resource_aws_codedeploy_deployment_group.go @@ -238,6 +238,12 @@ func resourceAwsCodeDeployDeploymentGroupRead(d *schema.ResourceData, meta inter DeploymentGroupName: aws.String(d.Get("deployment_group_name").(string)), }) if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "DeploymentGroupDoesNotExistException" { + log.Printf("[INFO] CodeDeployment DeploymentGroup %s not found", d.Get("deployment_group_name").(string)) + d.SetId("") + return nil + } + return err } diff --git a/builtin/providers/aws/resource_aws_codedeploy_deployment_group_test.go b/builtin/providers/aws/resource_aws_codedeploy_deployment_group_test.go index 3ce37f1a4..4e8955b22 100644 --- a/builtin/providers/aws/resource_aws_codedeploy_deployment_group_test.go +++ b/builtin/providers/aws/resource_aws_codedeploy_deployment_group_test.go @@ -6,6 +6,7 @@ import ( "regexp" "sort" "testing" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" @@ -107,6 +108,27 @@ func TestAccAWSCodeDeployDeploymentGroup_onPremiseTag(t *testing.T) { }) } +func TestAccAWSCodeDeployDeploymentGroup_disappears(t *testing.T) { + var group codedeploy.DeploymentGroupInfo + rName := acctest.RandString(5) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSCodeDeployDeploymentGroupDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSCodeDeployDeploymentGroup(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo", &group), + testAccAWSCodeDeployDeploymentGroupDisappears(&group), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + func TestAccAWSCodeDeployDeploymentGroup_triggerConfiguration_basic(t *testing.T) { var group codedeploy.DeploymentGroupInfo @@ -407,6 +429,36 @@ func testAccCheckAWSCodeDeployDeploymentGroupDestroy(s *terraform.State) error { return nil } +func testAccAWSCodeDeployDeploymentGroupDisappears(group *codedeploy.DeploymentGroupInfo) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).codedeployconn + opts := &codedeploy.DeleteDeploymentGroupInput{ + ApplicationName: group.ApplicationName, + DeploymentGroupName: group.DeploymentGroupName, + } + if _, err := conn.DeleteDeploymentGroup(opts); err != nil { + return err + } + return resource.Retry(40*time.Minute, func() *resource.RetryError { + opts := &codedeploy.GetDeploymentGroupInput{ + ApplicationName: group.ApplicationName, + DeploymentGroupName: group.DeploymentGroupName, + } + _, err := conn.GetDeploymentGroup(opts) + if err != nil { + codedeploy, ok := err.(awserr.Error) + if ok && codedeploy.Code() == "DeploymentGroupDoesNotExistException" { + return nil + } + return resource.NonRetryableError( + fmt.Errorf("Error retrieving CodeDeploy Deployment Group: %s", err)) + } + return resource.RetryableError(fmt.Errorf( + "Waiting for CodeDeploy Deployment Group: %v", group.DeploymentGroupName)) + }) + } +} + func testAccCheckAWSCodeDeployDeploymentGroupExists(name string, group *codedeploy.DeploymentGroupInfo) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[name] From 6f9b3aa4fd9ec9a12cb5d40c02911beff7ce79cb Mon Sep 17 00:00:00 2001 From: Mike LoSapio Date: Thu, 7 Jul 2016 09:51:15 -0400 Subject: [PATCH 0167/1238] Clarify join produces a string --- website/source/docs/configuration/interpolation.html.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/configuration/interpolation.html.md b/website/source/docs/configuration/interpolation.html.md index 47b21b719..c765e2d60 100644 --- a/website/source/docs/configuration/interpolation.html.md +++ b/website/source/docs/configuration/interpolation.html.md @@ -152,7 +152,7 @@ The supported built-in functions are: * `index(list, elem)` - Finds the index of a given element in a list. Example: `index(aws_instance.foo.*.tags.Name, "foo-test")` - * `join(delim, list)` - Joins the list with the delimiter. A list is + * `join(delim, list)` - Joins the list with the delimiter for a resultant string. A list is only possible with splat variables from resources with a count greater than one. Example: `join(",", aws_instance.foo.*.id)` From 80aeabec83c7500a2fcab75bd6774f7b2b7d07f7 Mon Sep 17 00:00:00 2001 From: stack72 Date: Thu, 7 Jul 2016 16:26:35 +0100 Subject: [PATCH 0168/1238] provider/aws: Support Import of `aws_db_instance` MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit JUst needed some rejigging of the skip_final_snapshot work as that isn't returned by the API and skipping it means the destroy fails due to missing final_snapshot_identifier ``` % make testacc TEST=./builtin/providers/aws % TESTARGS='-run=TestAccAWSDBInstance_' ✹ ✭ ==> Checking that code complies with gofmt requirements... /Users/stacko/Code/go/bin/stringer go generate $(go list ./... | grep -v /vendor/) 2016/07/07 15:28:31 Generated command/internal_plugin_list.go TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSDBInstance_ -timeout 120m === RUN TestAccAWSDBInstance_importBasic --- PASS: TestAccAWSDBInstance_importBasic (588.70s) === RUN TestAccAWSDBInstance_basic --- PASS: TestAccAWSDBInstance_basic (595.71s) === RUN TestAccAWSDBInstance_kmsKey --- PASS: TestAccAWSDBInstance_kmsKey (726.46s) === RUN TestAccAWSDBInstance_optionGroup --- PASS: TestAccAWSDBInstance_optionGroup (681.78s) === RUN TestAccAWSDBInstance_iops_update --- PASS: TestAccAWSDBInstance_iops_update (590.81s) ``` Please note that I cannot run the enhanced monitoring test in my environment as I have already got it attached to an IAM role. Running that test gives me this result: ``` ``` --- .../aws/import_aws_db_instance_test.go | 30 +++++++++++++++++++ .../providers/aws/resource_aws_db_instance.go | 13 ++++++-- 2 files changed, 40 insertions(+), 3 deletions(-) create mode 100644 builtin/providers/aws/import_aws_db_instance_test.go diff --git a/builtin/providers/aws/import_aws_db_instance_test.go b/builtin/providers/aws/import_aws_db_instance_test.go new file mode 100644 index 000000000..8079d117b --- /dev/null +++ b/builtin/providers/aws/import_aws_db_instance_test.go @@ -0,0 +1,30 @@ +package aws + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAWSDBInstance_importBasic(t *testing.T) { + resourceName := "aws_db_instance.bar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSDBInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSDBInstanceConfig, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "password", "skip_final_snapshot"}, + }, + }, + }) +} diff --git a/builtin/providers/aws/resource_aws_db_instance.go b/builtin/providers/aws/resource_aws_db_instance.go index 7732689e1..4b6c3985e 100644 --- a/builtin/providers/aws/resource_aws_db_instance.go +++ b/builtin/providers/aws/resource_aws_db_instance.go @@ -22,6 +22,9 @@ func resourceAwsDbInstance() *schema.Resource { Read: resourceAwsDbInstanceRead, Update: resourceAwsDbInstanceUpdate, Delete: resourceAwsDbInstanceDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ @@ -633,6 +636,7 @@ func resourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error { } d.Set("name", v.DBName) + d.Set("identifier", v.DBInstanceIdentifier) d.Set("username", v.MasterUsername) d.Set("engine", v.Engine) d.Set("engine_version", v.EngineVersion) @@ -753,10 +757,13 @@ func resourceAwsDbInstanceDelete(d *schema.ResourceData, meta interface{}) error opts := rds.DeleteDBInstanceInput{DBInstanceIdentifier: aws.String(d.Id())} - skipFinalSnapshot := d.Get("skip_final_snapshot").(bool) - opts.SkipFinalSnapshot = aws.Bool(skipFinalSnapshot) + skipFinalSnapshot, exists := d.GetOk("skip_final_snapshot") + if !exists { + skipFinalSnapshot = true + } + opts.SkipFinalSnapshot = aws.Bool(skipFinalSnapshot.(bool)) - if !skipFinalSnapshot { + if skipFinalSnapshot == false { if name, present := d.GetOk("final_snapshot_identifier"); present { opts.FinalDBSnapshotIdentifier = aws.String(name.(string)) } else { From 6f122c3f05b565df64f61f89041c5c6dec100db9 Mon Sep 17 00:00:00 2001 From: stack72 Date: Thu, 7 Jul 2016 16:45:56 +0100 Subject: [PATCH 0169/1238] provider/aws: Support Import of `aws_db_option_group` ``` make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSDBOptionGroup_' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSDBOptionGroup_ -timeout 120m === RUN TestAccAWSDBOptionGroup_importBasic --- PASS: TestAccAWSDBOptionGroup_importBasic (22.98s) === RUN TestAccAWSDBOptionGroup_basic --- PASS: TestAccAWSDBOptionGroup_basic (22.54s) === RUN TestAccAWSDBOptionGroup_OptionSettings --- PASS: TestAccAWSDBOptionGroup_OptionSettings (38.62s) === RUN TestAccAWSDBOptionGroup_sqlServerOptionsUpdate --- PASS: TestAccAWSDBOptionGroup_sqlServerOptionsUpdate (37.64s) === RUN TestAccAWSDBOptionGroup_multipleOptions --- PASS: TestAccAWSDBOptionGroup_multipleOptions (24.32s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 146.123s ``` --- .../aws/import_aws_db_option_group_test.go | 31 +++++++++++++++++++ .../aws/resource_aws_db_option_group.go | 8 +++-- 2 files changed, 37 insertions(+), 2 deletions(-) create mode 100644 builtin/providers/aws/import_aws_db_option_group_test.go diff --git a/builtin/providers/aws/import_aws_db_option_group_test.go b/builtin/providers/aws/import_aws_db_option_group_test.go new file mode 100644 index 000000000..3025ff9e8 --- /dev/null +++ b/builtin/providers/aws/import_aws_db_option_group_test.go @@ -0,0 +1,31 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAWSDBOptionGroup_importBasic(t *testing.T) { + resourceName := "aws_db_option_group.bar" + rName := fmt.Sprintf("option-group-test-terraform-%s", acctest.RandString(5)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSDBOptionGroupDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSDBOptionGroupBasicConfig(rName), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/aws/resource_aws_db_option_group.go b/builtin/providers/aws/resource_aws_db_option_group.go index 50f969220..6dc881406 100644 --- a/builtin/providers/aws/resource_aws_db_option_group.go +++ b/builtin/providers/aws/resource_aws_db_option_group.go @@ -19,6 +19,9 @@ func resourceAwsDbOptionGroup() *schema.Resource { Read: resourceAwsDbOptionGroupRead, Update: resourceAwsDbOptionGroupUpdate, Delete: resourceAwsDbOptionGroupDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "arn": &schema.Schema{ @@ -125,7 +128,7 @@ func resourceAwsDbOptionGroupCreate(d *schema.ResourceData, meta interface{}) er func resourceAwsDbOptionGroupRead(d *schema.ResourceData, meta interface{}) error { rdsconn := meta.(*AWSClient).rdsconn params := &rds.DescribeOptionGroupsInput{ - OptionGroupName: aws.String(d.Get("name").(string)), + OptionGroupName: aws.String(d.Id()), } log.Printf("[DEBUG] Describe DB Option Group: %#v", params) @@ -143,7 +146,7 @@ func resourceAwsDbOptionGroupRead(d *schema.ResourceData, meta interface{}) erro var option *rds.OptionGroup for _, ogl := range options.OptionGroupsList { - if *ogl.OptionGroupName == d.Get("name").(string) { + if *ogl.OptionGroupName == d.Id() { option = ogl break } @@ -153,6 +156,7 @@ func resourceAwsDbOptionGroupRead(d *schema.ResourceData, meta interface{}) erro return fmt.Errorf("Unable to find Option Group: %#v", options.OptionGroupsList) } + d.Set("name", option.OptionGroupName) d.Set("major_engine_version", option.MajorEngineVersion) d.Set("engine_name", option.EngineName) d.Set("option_group_description", option.OptionGroupDescription) From aa6e23bc4be015e4f78d7d02a8650bcc5382c971 Mon Sep 17 00:00:00 2001 From: stack72 Date: Thu, 7 Jul 2016 17:24:18 +0100 Subject: [PATCH 0170/1238] provider/aws: Support Import of `aws_ses_receipt_rule_set` ``` make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSSESReceiptRuleSet_' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSSESReceiptRuleSet_ -timeout 120m === RUN TestAccAWSSESReceiptRuleSet_importBasic --- PASS: TestAccAWSSESReceiptRuleSet_importBasic (18.60s) === RUN TestAccAWSSESReceiptRuleSet_basic --- PASS: TestAccAWSSESReceiptRuleSet_basic (26.92s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 45.550s ``` --- .../import_aws_ses_receipt_rule_set_test.go | 28 +++++++++++++++++++ .../aws/resource_aws_ses_receipt_rule_set.go | 6 ++++ 2 files changed, 34 insertions(+) create mode 100644 builtin/providers/aws/import_aws_ses_receipt_rule_set_test.go diff --git a/builtin/providers/aws/import_aws_ses_receipt_rule_set_test.go b/builtin/providers/aws/import_aws_ses_receipt_rule_set_test.go new file mode 100644 index 000000000..c5294bcb4 --- /dev/null +++ b/builtin/providers/aws/import_aws_ses_receipt_rule_set_test.go @@ -0,0 +1,28 @@ +package aws + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAWSSESReceiptRuleSet_importBasic(t *testing.T) { + resourceName := "aws_ses_receipt_rule_set.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckSESReceiptRuleSetDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSSESReceiptRuleSetConfig, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/aws/resource_aws_ses_receipt_rule_set.go b/builtin/providers/aws/resource_aws_ses_receipt_rule_set.go index 547835b37..dfaf98cf8 100644 --- a/builtin/providers/aws/resource_aws_ses_receipt_rule_set.go +++ b/builtin/providers/aws/resource_aws_ses_receipt_rule_set.go @@ -14,6 +14,9 @@ func resourceAwsSesReceiptRuleSet() *schema.Resource { Create: resourceAwsSesReceiptRuleSetCreate, Read: resourceAwsSesReceiptRuleSetRead, Delete: resourceAwsSesReceiptRuleSetDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "rule_set_name": &schema.Schema{ @@ -50,12 +53,15 @@ func resourceAwsSesReceiptRuleSetRead(d *schema.ResourceData, meta interface{}) if !ruleSetExists { log.Printf("[WARN] SES Receipt Rule Set (%s) not found", d.Id()) d.SetId("") + return nil } if err != nil { return err } + d.Set("rule_set_name", d.Id()) + return nil } From a74bd870e21525fcaca90313a1191bc98bac25e0 Mon Sep 17 00:00:00 2001 From: stack72 Date: Thu, 7 Jul 2016 17:37:56 +0100 Subject: [PATCH 0171/1238] provider/aws: Support Import of `aws_ses_receipt_filter` ``` % make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSSESReceiptFilter_' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSSESReceiptFilter_ -timeout 120m === RUN TestAccAWSSESReceiptFilter_importBasic --- PASS: TestAccAWSSESReceiptFilter_importBasic (18.18s) === RUN TestAccAWSSESReceiptFilter_basic --- PASS: TestAccAWSSESReceiptFilter_basic (18.42s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 36.633s ``` --- .../aws/import_aws_ses_receipt_filter_test.go | 28 +++++++++++++++++++ .../aws/resource_aws_ses_receipt_filter.go | 4 +++ 2 files changed, 32 insertions(+) create mode 100644 builtin/providers/aws/import_aws_ses_receipt_filter_test.go diff --git a/builtin/providers/aws/import_aws_ses_receipt_filter_test.go b/builtin/providers/aws/import_aws_ses_receipt_filter_test.go new file mode 100644 index 000000000..ecc962b85 --- /dev/null +++ b/builtin/providers/aws/import_aws_ses_receipt_filter_test.go @@ -0,0 +1,28 @@ +package aws + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAWSSESReceiptFilter_importBasic(t *testing.T) { + resourceName := "aws_ses_receipt_filter.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckSESReceiptFilterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSSESReceiptFilterConfig, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/aws/resource_aws_ses_receipt_filter.go b/builtin/providers/aws/resource_aws_ses_receipt_filter.go index 4ea7ccad2..2242d7eca 100644 --- a/builtin/providers/aws/resource_aws_ses_receipt_filter.go +++ b/builtin/providers/aws/resource_aws_ses_receipt_filter.go @@ -14,6 +14,9 @@ func resourceAwsSesReceiptFilter() *schema.Resource { Create: resourceAwsSesReceiptFilterCreate, Read: resourceAwsSesReceiptFilterRead, Delete: resourceAwsSesReceiptFilterDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ @@ -77,6 +80,7 @@ func resourceAwsSesReceiptFilterRead(d *schema.ResourceData, meta interface{}) e if *element.Name == d.Id() { d.Set("cidr", element.IpFilter.Cidr) d.Set("policy", element.IpFilter.Policy) + d.Set("name", element.Name) found = true } } From 96e90ec1f6f0538214506c1fc26f9d0c2eee02fa Mon Sep 17 00:00:00 2001 From: Clint Date: Thu, 7 Jul 2016 12:10:46 -0500 Subject: [PATCH 0172/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 71f9251ff..ddf9ae7f0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -100,6 +100,7 @@ IMPROVEMENTS: * provider/aws: Added support for redshift destination to firehose delivery streams [GH-7375] * provider/aws: Allow `aws_redshift_security_group` ingress rules to change [GH-5939] * provider/aws: Add support for `encryption` and `kms_key_id` to `aws_ami` [GH-7181] + * provider/aws: AWS prefix lists to enable security group egress to a VPC Endpoint [GH-7511] * provider/azurerm: Add support for EnableIPForwarding to `azurerm_network_interface` [GH-6807] * provider/azurerm: Add support for exporting the `azurerm_storage_account` access keys [GH-6742] * provider/azurerm: The Azure SDK now exposes better error messages [GH-6976] From a746b28fc1afb0b292ac83824f9a443a59a60ced Mon Sep 17 00:00:00 2001 From: stack72 Date: Thu, 7 Jul 2016 18:27:13 +0100 Subject: [PATCH 0173/1238] provider/aws: Support Import of `aws_redshift_cluster` ``` make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSRedshiftCluster_importBasic' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSRedshiftCluster_importBasic -timeout 120m === RUN TestAccAWSRedshiftCluster_importBasic --- PASS: TestAccAWSRedshiftCluster_importBasic (623.52s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 623.546s ``` --- .../aws/import_aws_redshift_cluster_test.go | 32 +++++++++++++++++++ .../aws/resource_aws_redshift_cluster.go | 21 ++++++++++-- 2 files changed, 50 insertions(+), 3 deletions(-) create mode 100644 builtin/providers/aws/import_aws_redshift_cluster_test.go diff --git a/builtin/providers/aws/import_aws_redshift_cluster_test.go b/builtin/providers/aws/import_aws_redshift_cluster_test.go new file mode 100644 index 000000000..b6cea1896 --- /dev/null +++ b/builtin/providers/aws/import_aws_redshift_cluster_test.go @@ -0,0 +1,32 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAWSRedshiftCluster_importBasic(t *testing.T) { + resourceName := "aws_redshift_cluster.default" + config := fmt.Sprintf(testAccAWSRedshiftClusterConfig_basic, acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSRedshiftClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: config, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"master_password", "skip_final_snapshot"}, + }, + }, + }) +} diff --git a/builtin/providers/aws/resource_aws_redshift_cluster.go b/builtin/providers/aws/resource_aws_redshift_cluster.go index 7f056aa00..7f03eb5b9 100644 --- a/builtin/providers/aws/resource_aws_redshift_cluster.go +++ b/builtin/providers/aws/resource_aws_redshift_cluster.go @@ -20,6 +20,9 @@ func resourceAwsRedshiftCluster() *schema.Resource { Read: resourceAwsRedshiftClusterRead, Update: resourceAwsRedshiftClusterUpdate, Delete: resourceAwsRedshiftClusterDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "database_name": &schema.Schema{ @@ -334,7 +337,13 @@ func resourceAwsRedshiftClusterRead(d *schema.ResourceData, meta interface{}) er return nil } + d.Set("master_username", rsc.MasterUsername) + d.Set("node_type", rsc.NodeType) + d.Set("allow_version_upgrade", rsc.AllowVersionUpgrade) d.Set("database_name", rsc.DBName) + d.Set("cluster_identifier", rsc.ClusterIdentifier) + d.Set("cluster_version", rsc.ClusterVersion) + d.Set("cluster_subnet_group_name", rsc.ClusterSubnetGroupName) d.Set("availability_zone", rsc.AvailabilityZone) d.Set("encrypted", rsc.Encrypted) @@ -346,6 +355,7 @@ func resourceAwsRedshiftClusterRead(d *schema.ResourceData, meta interface{}) er if rsc.Endpoint.Port != nil { endpoint = fmt.Sprintf("%s:%d", endpoint, *rsc.Endpoint.Port) } + d.Set("port", rsc.Endpoint.Port) d.Set("endpoint", endpoint) } d.Set("cluster_parameter_group_name", rsc.ClusterParameterGroups[0].ParameterGroupName) @@ -354,6 +364,8 @@ func resourceAwsRedshiftClusterRead(d *schema.ResourceData, meta interface{}) er } else { d.Set("cluster_type", "single-node") } + d.Set("number_of_nodes", len(rsc.ClusterNodes)) + d.Set("publicly_accessible", rsc.PubliclyAccessible) var vpcg []string for _, g := range rsc.VpcSecurityGroups { @@ -545,10 +557,13 @@ func resourceAwsRedshiftClusterDelete(d *schema.ResourceData, meta interface{}) ClusterIdentifier: aws.String(d.Id()), } - skipFinalSnapshot := d.Get("skip_final_snapshot").(bool) - deleteOpts.SkipFinalClusterSnapshot = aws.Bool(skipFinalSnapshot) + skipFinalSnapshot, exists := d.GetOk("skip_final_snapshot") + if !exists { + skipFinalSnapshot = true + } + deleteOpts.SkipFinalClusterSnapshot = aws.Bool(skipFinalSnapshot.(bool)) - if !skipFinalSnapshot { + if skipFinalSnapshot == false { if name, present := d.GetOk("final_snapshot_identifier"); present { deleteOpts.FinalClusterSnapshotIdentifier = aws.String(name.(string)) } else { From 922e626b7e6e2c171e27be7e06a718c59d7df311 Mon Sep 17 00:00:00 2001 From: John Bowler Date: Thu, 7 Jul 2016 11:11:28 -0700 Subject: [PATCH 0174/1238] Fix Makefile for consistency --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 358a03af5..2c2a1bc11 100644 --- a/Makefile +++ b/Makefile @@ -30,7 +30,7 @@ core-dev: generate # Shorthand for quickly testing the core of Terraform (i.e. "not providers") core-test: generate @echo "Testing core packages..." && \ - go test -tags 'core' $(TESTARGS) $(shell go list ./... | grep -v -E 'builtin|terraform/vendor') + go test -tags 'core' $(TESTARGS) $(shell go list ./... | grep -v -E 'terraform/(builtin|vendor)') # Shorthand for building and installing just one plugin for local testing. # Run as (for example): make plugin-dev PLUGIN=provider-aws From 3622bfddd635b0d36efd6ec43e1250cd1d576620 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Wed, 6 Jul 2016 10:48:52 -0400 Subject: [PATCH 0175/1238] Revert #7464 and allow an empty state Revert back to using a nil state. The external usage of the state shoudl always check the Empty() method. --- state/remote/remote_test.go | 76 ++++++++++++++++++++++++++++++++++++- state/remote/state.go | 6 +-- 2 files changed, 76 insertions(+), 6 deletions(-) diff --git a/state/remote/remote_test.go b/state/remote/remote_test.go index 16afccdf1..04f875747 100644 --- a/state/remote/remote_test.go +++ b/state/remote/remote_test.go @@ -2,6 +2,9 @@ package remote import ( "bytes" + "fmt" + "io/ioutil" + "os" "testing" "github.com/hashicorp/terraform/state" @@ -46,8 +49,8 @@ func TestRemoteClient_noPayload(t *testing.T) { s := &State{ Client: nilClient{}, } - if err := s.RefreshState(); err != ErrRemoteStateNotFound { - t.Fatal("expected ErrRemoteStateNotFound, got", err) + if err := s.RefreshState(); err != nil { + t.Fatal("error refreshing empty remote state") } } @@ -59,3 +62,72 @@ func (nilClient) Get() (*Payload, error) { return nil, nil } func (c nilClient) Put([]byte) error { return nil } func (c nilClient) Delete() error { return nil } + +// ensure that remote state can be properly initialized +func TestRemoteClient_stateInit(t *testing.T) { + localStateFile, err := ioutil.TempFile("", "tf") + if err != nil { + t.Fatal(err) + } + + // we need to remove the temp files so we recognize there's no local or + // remote state. + localStateFile.Close() + os.Remove(localStateFile.Name()) + //defer os.Remove(localStateFile.Name()) + fmt.Println("LOCAL:", localStateFile.Name()) + + local := &state.LocalState{ + Path: localStateFile.Name(), + } + if err := local.RefreshState(); err != nil { + t.Fatal(err) + } + localState := local.State() + + fmt.Println("localState.Empty():", localState.Empty()) + + remoteStateFile, err := ioutil.TempFile("", "tf") + if err != nil { + t.Fatal(err) + } + remoteStateFile.Close() + os.Remove(remoteStateFile.Name()) + //defer os.Remove(remoteStateFile.Name() + fmt.Println("LOCAL:", localStateFile.Name()) + fmt.Println("REMOTE:", remoteStateFile.Name()) + + remoteClient := &FileClient{ + Path: remoteStateFile.Name(), + } + + durable := &State{ + Client: remoteClient, + } + + cache := &state.CacheState{ + Cache: local, + Durable: durable, + } + + if err := cache.RefreshState(); err != nil { + t.Fatal(err) + } + + switch cache.RefreshResult() { + + // we should be "refreshing" the remote state to initialize it + case state.CacheRefreshLocalNewer: + // Write our local state out to the durable storage to start. + if err := cache.WriteState(localState); err != nil { + t.Fatal("Error preparing remote state:", err) + } + if err := cache.PersistState(); err != nil { + t.Fatal("Error preparing remote state:", err) + } + default: + + t.Fatal("unexpected refresh result:", cache.RefreshResult()) + } + +} diff --git a/state/remote/state.go b/state/remote/state.go index 5f45129d6..18427f341 100644 --- a/state/remote/state.go +++ b/state/remote/state.go @@ -2,13 +2,10 @@ package remote import ( "bytes" - "errors" "github.com/hashicorp/terraform/terraform" ) -var ErrRemoteStateNotFound = errors.New("no remote state found") - // State implements the State interfaces in the state package to handle // reading and writing the remote state. This State on its own does no // local caching so every persist will go to the remote storage and local @@ -37,8 +34,9 @@ func (s *State) RefreshState() error { return err } + // no remote state is OK if payload == nil { - return ErrRemoteStateNotFound + return nil } state, err := terraform.ReadState(bytes.NewReader(payload.Data)) From 2c27dd41bf03e6f8b75e40cfc002be441d1010fb Mon Sep 17 00:00:00 2001 From: James Bardin Date: Thu, 7 Jul 2016 15:37:57 -0400 Subject: [PATCH 0176/1238] Fix panic when there is no remote state - Check for an empty state. If nothing is referenced from that state in the config, there's nothing to do here. --- builtin/providers/terraform/data_source_state.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/builtin/providers/terraform/data_source_state.go b/builtin/providers/terraform/data_source_state.go index d8c97ebd5..c87da01fb 100644 --- a/builtin/providers/terraform/data_source_state.go +++ b/builtin/providers/terraform/data_source_state.go @@ -55,7 +55,14 @@ func dataSourceRemoteStateRead(d *schema.ResourceData, meta interface{}) error { d.SetId(time.Now().UTC().String()) outputMap := make(map[string]interface{}) - for key, val := range state.State().RootModule().Outputs { + + remoteState := state.State() + if remoteState.Empty() { + log.Println("[DEBUG] empty remote state") + return nil + } + + for key, val := range remoteState.RootModule().Outputs { outputMap[key] = val.Value } From 2943a1c978137a75edbcbda2c9c1121c5219bbe2 Mon Sep 17 00:00:00 2001 From: David Tolnay Date: Thu, 7 Jul 2016 13:24:17 -0700 Subject: [PATCH 0177/1238] Retry creation of IAM role depending on new IAM user (#7324) --- builtin/providers/aws/awserr.go | 14 +++++++++ .../providers/aws/resource_aws_iam_role.go | 13 +++++++- .../providers/aws/resource_aws_instance.go | 31 +++++++++---------- 3 files changed, 40 insertions(+), 18 deletions(-) create mode 100644 builtin/providers/aws/awserr.go diff --git a/builtin/providers/aws/awserr.go b/builtin/providers/aws/awserr.go new file mode 100644 index 000000000..8fc056801 --- /dev/null +++ b/builtin/providers/aws/awserr.go @@ -0,0 +1,14 @@ +package aws + +import ( + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +func isAWSErr(err error, code string, message string) bool { + if err, ok := err.(awserr.Error); ok { + return err.Code() == code && strings.Contains(err.Message(), message) + } + return false +} diff --git a/builtin/providers/aws/resource_aws_iam_role.go b/builtin/providers/aws/resource_aws_iam_role.go index effb95c36..43d292b0d 100644 --- a/builtin/providers/aws/resource_aws_iam_role.go +++ b/builtin/providers/aws/resource_aws_iam_role.go @@ -3,6 +3,7 @@ package aws import ( "fmt" "regexp" + "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" @@ -103,7 +104,17 @@ func resourceAwsIamRoleCreate(d *schema.ResourceData, meta interface{}) error { AssumeRolePolicyDocument: aws.String(d.Get("assume_role_policy").(string)), } - createResp, err := iamconn.CreateRole(request) + var createResp *iam.CreateRoleOutput + err := resource.Retry(10*time.Second, func() *resource.RetryError { + var err error + createResp, err = iamconn.CreateRole(request) + // IAM roles can take ~10 seconds to propagate in AWS: + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console + if isAWSErr(err, "MalformedPolicyDocument", "Invalid principal in policy") { + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + }) if err != nil { return fmt.Errorf("Error creating IAM Role %s: %s", name, err) } diff --git a/builtin/providers/aws/resource_aws_instance.go b/builtin/providers/aws/resource_aws_instance.go index e43a20b10..7681d6ed2 100644 --- a/builtin/providers/aws/resource_aws_instance.go +++ b/builtin/providers/aws/resource_aws_instance.go @@ -361,25 +361,22 @@ func resourceAwsInstanceCreate(d *schema.ResourceData, meta interface{}) error { log.Printf("[DEBUG] Run configuration: %s", runOpts) var runResp *ec2.Reservation - for i := 0; i < 5; i++ { + err = resource.Retry(10*time.Second, func() *resource.RetryError { + var err error runResp, err = conn.RunInstances(runOpts) - if awsErr, ok := err.(awserr.Error); ok { - // IAM profiles can take ~10 seconds to propagate in AWS: - // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console - if awsErr.Code() == "InvalidParameterValue" && strings.Contains(awsErr.Message(), "Invalid IAM Instance Profile") { - log.Printf("[DEBUG] Invalid IAM Instance Profile referenced, retrying...") - time.Sleep(2 * time.Second) - continue - } - - // Warn if the AWS Error involves group ids, to help identify situation - // where a user uses group ids in security_groups for the Default VPC. - // See https://github.com/hashicorp/terraform/issues/3798 - if awsErr.Code() == "InvalidParameterValue" && strings.Contains(awsErr.Message(), "groupId is invalid") { - return fmt.Errorf("Error launching instance, possible mismatch of Security Group IDs and Names. See AWS Instance docs here: %s.\n\n\tAWS Error: %s", "https://terraform.io/docs/providers/aws/r/instance.html", awsErr.Message()) - } + // IAM profiles can take ~10 seconds to propagate in AWS: + // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console + if isAWSErr(err, "InvalidParameterValue", "Invalid IAM Instance Profile") { + log.Printf("[DEBUG] Invalid IAM Instance Profile referenced, retrying...") + return resource.RetryableError(err) } - break + return resource.NonRetryableError(err) + }) + // Warn if the AWS Error involves group ids, to help identify situation + // where a user uses group ids in security_groups for the Default VPC. + // See https://github.com/hashicorp/terraform/issues/3798 + if isAWSErr(err, "InvalidParameterValue", "groupId is invalid") { + return fmt.Errorf("Error launching instance, possible mismatch of Security Group IDs and Names. See AWS Instance docs here: %s.\n\n\tAWS Error: %s", "https://terraform.io/docs/providers/aws/r/instance.html", err.(awserr.Error).Message()) } if err != nil { return fmt.Errorf("Error launching source instance: %s", err) From 74813821ec38d7c18b0efed9aad37c1b7cf1aadd Mon Sep 17 00:00:00 2001 From: James Bardin Date: Thu, 7 Jul 2016 16:18:00 -0400 Subject: [PATCH 0178/1238] Add remote state init test Verify that a remote state file is correctly initialized in the same manner as used by the `terraform remote config` --- state/remote/remote_test.go | 76 +++++++++++++++++++------------------ 1 file changed, 39 insertions(+), 37 deletions(-) diff --git a/state/remote/remote_test.go b/state/remote/remote_test.go index 04f875747..db3c795c8 100644 --- a/state/remote/remote_test.go +++ b/state/remote/remote_test.go @@ -2,7 +2,6 @@ package remote import ( "bytes" - "fmt" "io/ioutil" "os" "testing" @@ -74,18 +73,7 @@ func TestRemoteClient_stateInit(t *testing.T) { // remote state. localStateFile.Close() os.Remove(localStateFile.Name()) - //defer os.Remove(localStateFile.Name()) - fmt.Println("LOCAL:", localStateFile.Name()) - - local := &state.LocalState{ - Path: localStateFile.Name(), - } - if err := local.RefreshState(); err != nil { - t.Fatal(err) - } - localState := local.State() - - fmt.Println("localState.Empty():", localState.Empty()) + defer os.Remove(localStateFile.Name()) remoteStateFile, err := ioutil.TempFile("", "tf") if err != nil { @@ -93,41 +81,55 @@ func TestRemoteClient_stateInit(t *testing.T) { } remoteStateFile.Close() os.Remove(remoteStateFile.Name()) - //defer os.Remove(remoteStateFile.Name() - fmt.Println("LOCAL:", localStateFile.Name()) - fmt.Println("REMOTE:", remoteStateFile.Name()) + defer os.Remove(remoteStateFile.Name()) + + // Now we need an empty state to initialize the state files. + newState := terraform.NewState() + newState.Remote = &terraform.RemoteState{ + Type: "_local", + Config: map[string]string{"path": remoteStateFile.Name()}, + } remoteClient := &FileClient{ Path: remoteStateFile.Name(), } - durable := &State{ - Client: remoteClient, - } - cache := &state.CacheState{ - Cache: local, - Durable: durable, + Cache: &state.LocalState{ + Path: localStateFile.Name(), + }, + Durable: &State{ + Client: remoteClient, + }, } - if err := cache.RefreshState(); err != nil { + // This will write the local state file, and set the state field in the CacheState + err = cache.WriteState(newState) + if err != nil { t.Fatal(err) } - switch cache.RefreshResult() { - - // we should be "refreshing" the remote state to initialize it - case state.CacheRefreshLocalNewer: - // Write our local state out to the durable storage to start. - if err := cache.WriteState(localState); err != nil { - t.Fatal("Error preparing remote state:", err) - } - if err := cache.PersistState(); err != nil { - t.Fatal("Error preparing remote state:", err) - } - default: - - t.Fatal("unexpected refresh result:", cache.RefreshResult()) + // This will persist the local state we just wrote to the remote state file + err = cache.PersistState() + if err != nil { + t.Fatal(err) } + // now compare the two state files just to be sure + localData, err := ioutil.ReadFile(localStateFile.Name()) + if err != nil { + t.Fatal(err) + } + + remoteData, err := ioutil.ReadFile(remoteStateFile.Name()) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(localData, remoteData) { + t.Log("state files don't match") + t.Log("Local:\n", string(localData)) + t.Log("Remote:\n", string(remoteData)) + t.Fatal("failed to initialize remote state") + } } From a86d766bd22571e9c90d0edcd303be62e16e03c3 Mon Sep 17 00:00:00 2001 From: David Tolnay Date: Thu, 7 Jul 2016 13:45:28 -0700 Subject: [PATCH 0179/1238] provider/aws: internetgateway timing tweaks (again) (#7447) This workaround is originally from 71b30c633f4824bba9c9a7eada788cd169db7568. According to the commit message from Mitchell Hashimoto: So I think the AWS API is just broken here. In the case that the state doesn't update, just assume it did after 5 seconds. Based on my experience, this AWS API is still broken in the same way. The timeout was later increased from 5 seconds to 10 seconds in 265cc4fffa3667ecb0ca20604ce49e162fb4e5e9. The timeout (but not the timer) was removed inexplicably in GH-1325. The symptom is this error from `terraform apply`: aws_internet_gateway.test: Error waiting for internet gateway (igw-553b4731) to attach: timeout while waiting for state to become '[available]' followed by all subsequent `terraform apply` commands failing with this error: aws_internet_gateway.test: Resource.AlreadyAssociated: resource igw-553b4731 is already attached to network vpc-61bc7606 --- builtin/providers/aws/resource_aws_internet_gateway.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/builtin/providers/aws/resource_aws_internet_gateway.go b/builtin/providers/aws/resource_aws_internet_gateway.go index 338ab0833..fe9ed3ebd 100644 --- a/builtin/providers/aws/resource_aws_internet_gateway.go +++ b/builtin/providers/aws/resource_aws_internet_gateway.go @@ -319,6 +319,10 @@ func IGAttachStateRefreshFunc(conn *ec2.EC2, id string, expected string) resourc ig := resp.InternetGateways[0] + if time.Now().Sub(start) > 10*time.Second { + return ig, expected, nil + } + if len(ig.Attachments) == 0 { // No attachments, we're detached return ig, "detached", nil From db627798e613689e7fc1f4f356a362bfb696907b Mon Sep 17 00:00:00 2001 From: David Tolnay Date: Thu, 7 Jul 2016 14:06:02 -0700 Subject: [PATCH 0180/1238] provider/aws: Handle spurious failures in resourceAwsSecurityGroupRuleRead (#7377) Previously, any old HTTP error would be treated as the security_group_rule being deleted. In reality there are only a few cases where this is the right assumption. --- .../aws/resource_aws_security_group_rule.go | 33 +++++++++++++++---- 1 file changed, 26 insertions(+), 7 deletions(-) diff --git a/builtin/providers/aws/resource_aws_security_group_rule.go b/builtin/providers/aws/resource_aws_security_group_rule.go index e963ca679..d170b3d20 100644 --- a/builtin/providers/aws/resource_aws_security_group_rule.go +++ b/builtin/providers/aws/resource_aws_security_group_rule.go @@ -205,11 +205,14 @@ func resourceAwsSecurityGroupRuleRead(d *schema.ResourceData, meta interface{}) conn := meta.(*AWSClient).ec2conn sg_id := d.Get("security_group_id").(string) sg, err := findResourceSecurityGroup(conn, sg_id) - if err != nil { - log.Printf("[DEBUG] Error finding Secuirty Group (%s) for Rule (%s): %s", sg_id, d.Id(), err) + if _, notFound := err.(securityGroupNotFound); notFound { + // The security group containing this rule no longer exists. d.SetId("") return nil } + if err != nil { + return fmt.Errorf("Error finding security group (%s) for rule (%s): %s", sg_id, d.Id(), err) + } isVPC := sg.VpcId != nil && *sg.VpcId != "" @@ -312,19 +315,35 @@ func findResourceSecurityGroup(conn *ec2.EC2, id string) (*ec2.SecurityGroup, er GroupIds: []*string{aws.String(id)}, } resp, err := conn.DescribeSecurityGroups(req) + if err, ok := err.(awserr.Error); ok && err.Code() == "InvalidGroup.NotFound" { + return nil, securityGroupNotFound{id, nil} + } if err != nil { return nil, err } - - if resp == nil || len(resp.SecurityGroups) != 1 || resp.SecurityGroups[0] == nil { - return nil, fmt.Errorf( - "Expected to find one security group with ID %q, got: %#v", - id, resp.SecurityGroups) + if resp == nil { + return nil, securityGroupNotFound{id, nil} + } + if len(resp.SecurityGroups) != 1 || resp.SecurityGroups[0] == nil { + return nil, securityGroupNotFound{id, resp.SecurityGroups} } return resp.SecurityGroups[0], nil } +type securityGroupNotFound struct { + id string + securityGroups []*ec2.SecurityGroup +} + +func (err securityGroupNotFound) Error() string { + if err.securityGroups == nil { + return fmt.Sprintf("No security group with ID %q", err.id) + } + return fmt.Sprintf("Expected to find one security group with ID %q, got: %#v", + err.id, err.securityGroups) +} + // ByGroupPair implements sort.Interface for []*ec2.UserIDGroupPairs based on // GroupID or GroupName field (only one should be set). type ByGroupPair []*ec2.UserIdGroupPair From d21c6c798272a139893ef15a1abb45adbf48e841 Mon Sep 17 00:00:00 2001 From: Clint Date: Thu, 7 Jul 2016 16:07:53 -0500 Subject: [PATCH 0181/1238] Update CHANGELOG.md --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ddf9ae7f0..986873fc4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -101,6 +101,7 @@ IMPROVEMENTS: * provider/aws: Allow `aws_redshift_security_group` ingress rules to change [GH-5939] * provider/aws: Add support for `encryption` and `kms_key_id` to `aws_ami` [GH-7181] * provider/aws: AWS prefix lists to enable security group egress to a VPC Endpoint [GH-7511] + * provider/aws: Retry creation of IAM role depending on new IAM user [GH-7324] * provider/azurerm: Add support for EnableIPForwarding to `azurerm_network_interface` [GH-6807] * provider/azurerm: Add support for exporting the `azurerm_storage_account` access keys [GH-6742] * provider/azurerm: The Azure SDK now exposes better error messages [GH-6976] @@ -154,6 +155,7 @@ BUG FIXES: * provider/aws: Fix issue reattaching a VPN gateway to a VPC [GH-6987] * provider/aws: Fix issue with Root Block Devices and encrypted flag in Launch Configurations [GH-6512] * provider/aws: If more ENIs are attached to `aws_instance`, the one w/ DeviceIndex `0` is always used in context of `aws_instance` (previously unpredictable) [GH-6761] + * provider/aws: Handle spurious failures in resourceAwsSecurityGroupRuleRead [GH-7377] * provider/aws: Make 'stage_name' required in api_gateway_deployment [GH-6797] * provider/aws: Mark Lambda function as gone when it's gone [GH-6924] * provider/aws: Trim trailing `.` from `name` in `aws_route53_record` resources to prevent spurious diffs [GH-6592] From 041d3e0e61686b561f516298b85af3c4fadca812 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Fri, 8 Jul 2016 11:07:30 +0100 Subject: [PATCH 0182/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 986873fc4..6f98fe4c2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -149,6 +149,7 @@ BUG FIXES: * core: Fix a crash during eval when we're upgrading an empty state [GH-7403] * core: Honor the `-state-out` flag when applying with a plan file [GH-7443] * core: Fix a panic when a `terraform_remote_state` data source doesn't exist [GH-7464] + * provider/aws: Manual changes to `aws_codedeploy_deployment_group` resources are now detected [GH-7530] * provider/aws: Changing keys in `aws_dynamodb_table` correctly force new resources [GH-6829] * provider/aws: Fix a bug where CloudWatch alarms are created repeatedly if the user does not have permission to use the the DescribeAlarms operation [GH-7227] * provider/aws: Fix crash in `aws_elasticache_parameter_group` occuring following edits in the console [GH-6687] From d2c9cc338eafd500416db50d8690dc7131ba1f61 Mon Sep 17 00:00:00 2001 From: clint shryock Date: Wed, 6 Jul 2016 16:02:13 -0500 Subject: [PATCH 0183/1238] provider/aws: Beanstalk environments, bump the minimum timeout between API calls --- .../aws/resource_aws_elastic_beanstalk_environment.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go b/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go index 5e5cb0c06..674c80f90 100644 --- a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go +++ b/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go @@ -249,7 +249,7 @@ func resourceAwsElasticBeanstalkEnvironmentCreate(d *schema.ResourceData, meta i Refresh: environmentStateRefreshFunc(conn, d.Id()), Timeout: waitForReadyTimeOut, Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, + MinTimeout: 20 * time.Second, } _, err = stateConf.WaitForState() @@ -321,7 +321,7 @@ func resourceAwsElasticBeanstalkEnvironmentUpdate(d *schema.ResourceData, meta i Refresh: environmentStateRefreshFunc(conn, d.Id()), Timeout: waitForReadyTimeOut, Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, + MinTimeout: 20 * time.Second, } _, err = stateConf.WaitForState() @@ -567,7 +567,7 @@ func resourceAwsElasticBeanstalkEnvironmentDelete(d *schema.ResourceData, meta i Refresh: environmentStateRefreshFunc(conn, d.Id()), Timeout: waitForReadyTimeOut, Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, + MinTimeout: 20 * time.Second, } _, err = stateConf.WaitForState() From 088feb933f02c73780f1a44df28a8dc4c6290caf Mon Sep 17 00:00:00 2001 From: James Nugent Date: Thu, 7 Jul 2016 19:14:33 +0100 Subject: [PATCH 0184/1238] terraform: Add test case reproducing #7241 The reproduction of issue #7421 involves a list of maps being passed to a module, where one or more of the maps has a value which is computed (for example, from another resource). There is a failure at the point of use (via lookup interpolation) of the computed value of the form: ``` lookup: lookup failed to find 'elb' in: ${lookup(var.services[count.index], "elb")} ``` Where 'elb' is the key of the map. --- terraform/context_plan_test.go | 40 +++++++++++++++++++ terraform/terraform_test.go | 16 ++++++++ .../plan-computed-value-in-map/main.tf | 15 +++++++ .../plan-computed-value-in-map/mod/main.tf | 8 ++++ 4 files changed, 79 insertions(+) create mode 100644 terraform/test-fixtures/plan-computed-value-in-map/main.tf create mode 100644 terraform/test-fixtures/plan-computed-value-in-map/mod/main.tf diff --git a/terraform/context_plan_test.go b/terraform/context_plan_test.go index 9ad2d1f19..09ed1bd82 100644 --- a/terraform/context_plan_test.go +++ b/terraform/context_plan_test.go @@ -2325,3 +2325,43 @@ func TestContext2Plan_moduleMapLiteral(t *testing.T) { t.Fatalf("err: %s", err) } } + +func TestContext2Plan_computedValueInMap(t *testing.T) { + m := testModule(t, "plan-computed-value-in-map") + p := testProvider("aws") + p.DiffFn = func(info *InstanceInfo, state *InstanceState, c *ResourceConfig) (*InstanceDiff, error) { + switch info.Type { + case "aws_computed_source": + return &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "computed_read_only": &ResourceAttrDiff{ + NewComputed: true, + }, + }, + }, nil + } + + return testDiffFn(info, state, c) + } + ctx := testContext2(t, &ContextOpts{ + Module: m, + Providers: map[string]ResourceProviderFactory{ + "aws": testProviderFuncFixed(p), + }, + }) + + if _, err := ctx.Plan(); err != nil { + t.Fatalf("err: %s", err) + } + + plan, err := ctx.Plan() + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(plan.String()) + expected := strings.TrimSpace(testTerraformPlanComputedValueInMap) + if actual != expected { + t.Fatalf("bad:\n%s\n\nexpected\n\n%s", actual, expected) + } +} diff --git a/terraform/terraform_test.go b/terraform/terraform_test.go index c608d3d73..333e72767 100644 --- a/terraform/terraform_test.go +++ b/terraform/terraform_test.go @@ -1355,3 +1355,19 @@ aws_instance.foo: ID = bar ami = ami-abcd1234 ` + +const testTerraformPlanComputedValueInMap = ` +DIFF: + +CREATE: aws_computed_source.intermediates + computed_read_only: "" => "" + +module.test_mod: + CREATE: aws_instance.inner2 + looked_up: "" => "" + type: "" => "aws_instance" + +STATE: + + +` diff --git a/terraform/test-fixtures/plan-computed-value-in-map/main.tf b/terraform/test-fixtures/plan-computed-value-in-map/main.tf new file mode 100644 index 000000000..b820b1705 --- /dev/null +++ b/terraform/test-fixtures/plan-computed-value-in-map/main.tf @@ -0,0 +1,15 @@ +resource "aws_computed_source" "intermediates" {} + +module "test_mod" { + source = "./mod" + + services { + "exists" = "true" + "elb" = "${aws_computed_source.intermediates.computed_read_only}" + } + + services { + "otherexists" = " true" + "elb" = "${aws_computed_source.intermediates.computed_read_only}" + } +} diff --git a/terraform/test-fixtures/plan-computed-value-in-map/mod/main.tf b/terraform/test-fixtures/plan-computed-value-in-map/mod/main.tf new file mode 100644 index 000000000..82ee1e494 --- /dev/null +++ b/terraform/test-fixtures/plan-computed-value-in-map/mod/main.tf @@ -0,0 +1,8 @@ +variable "services" { + type = "list" +} + +resource "aws_instance" "inner2" { + looked_up = "${lookup(var.services[0], "elb")}" +} + From c6e03cba96cea7f222a8c1cde74d7ed362d33415 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Thu, 7 Jul 2016 19:15:32 +0100 Subject: [PATCH 0185/1238] core: Fix slice element keys on interpolateWalk Part of the interpolation walk is to detect keys which involve computed values and therefore cannot be resolved at this time. The interplation walker keeps sufficient state to be able to populate the ResourceConfig with a slice of such keys. Previously they didn't take slice indexes into account, so in the following case: ``` "services": []interface{}{ map[string]interface{}{ "elb": "___something computed___", }, map[string]interface{}{ "elb": "___something else computed___", }, map[string]interface{}{ "elb": "not computed", }, } ``` Unknown keys would be populated as follows: ``` services.elb services.elb ``` This is not sufficient information to be useful, as it is impossible to distinguish which of the `services.elb`s are unknown vs not. This commit therefore retains the slice indexes as part of the key for unknown keys - producing for the example above: ``` services.0.elb services.1.elb ``` --- config/interpolate_walk.go | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/config/interpolate_walk.go b/config/interpolate_walk.go index 143b96131..720a8b285 100644 --- a/config/interpolate_walk.go +++ b/config/interpolate_walk.go @@ -54,6 +54,9 @@ type interpolationWalkerContextFunc func(reflectwalk.Location, ast.Node) func (w *interpolationWalker) Enter(loc reflectwalk.Location) error { w.loc = loc + if loc == reflectwalk.WalkLoc { + w.sliceIndex = -1 + } return nil } @@ -72,6 +75,7 @@ func (w *interpolationWalker) Exit(loc reflectwalk.Location) error { w.cs = w.cs[:len(w.cs)-1] case reflectwalk.SliceElem: w.csKey = w.csKey[:len(w.csKey)-1] + w.sliceIndex = -1 } return nil @@ -85,7 +89,13 @@ func (w *interpolationWalker) Map(m reflect.Value) error { func (w *interpolationWalker) MapElem(m, k, v reflect.Value) error { w.csData = k w.csKey = append(w.csKey, k) - w.key = append(w.key, k.String()) + + if w.sliceIndex != -1 { + w.key = append(w.key, fmt.Sprintf("%d.%s", w.sliceIndex, k.String())) + } else { + w.key = append(w.key, k.String()) + } + w.lastValue = v return nil } @@ -164,6 +174,7 @@ func (w *interpolationWalker) Primitive(v reflect.Value) error { } else if replaceVal == UnknownVariableValue { remove = true } + if remove { w.removeCurrent() return nil From b6fff854a6e6dac2ebc99438acc05f052d9743cc Mon Sep 17 00:00:00 2001 From: James Nugent Date: Fri, 8 Jul 2016 10:11:25 +0100 Subject: [PATCH 0186/1238] core: Set all unknown keys to UnknownVariableValue As part of evaluating a variable block, there is a pass made on unknown keys setting them to the config.DefaultVariableValue sentinal value. Previously this only took into account one level of nesting and assumed all values were strings. This commit now traverses the unknown keys via lists and maps and sets unknown map keys surgically. Fixes #7241. --- terraform/eval_variable.go | 52 +++++++++++++++++++++++++++++++++++--- 1 file changed, 49 insertions(+), 3 deletions(-) diff --git a/terraform/eval_variable.go b/terraform/eval_variable.go index ce6064706..47bd2ea2b 100644 --- a/terraform/eval_variable.go +++ b/terraform/eval_variable.go @@ -4,6 +4,7 @@ import ( "fmt" "log" "reflect" + "strconv" "strings" "github.com/hashicorp/terraform/config" @@ -143,15 +144,60 @@ func (n *EvalVariableBlock) Eval(ctx EvalContext) (interface{}, error) { return nil, fmt.Errorf("Variable value for %s is not a string, list or map type", k) } - for k, _ := range rc.Raw { - if _, ok := n.VariableValues[k]; !ok { - n.VariableValues[k] = config.UnknownVariableValue + + for _, path := range rc.ComputedKeys { + log.Printf("[DEBUG] Setting Unknown Variable Value for computed key: %s", path) + err := n.setUnknownVariableValueForPath(path) + if err != nil { + return nil, err } } return nil, nil } +func (n *EvalVariableBlock) setUnknownVariableValueForPath(path string) error { + pathComponents := strings.Split(path, ".") + + if len(pathComponents) < 1 { + return fmt.Errorf("No path comoponents in %s", path) + } + + if len(pathComponents) == 1 { + // Special case the "top level" since we know the type + if _, ok := n.VariableValues[pathComponents[0]]; !ok { + n.VariableValues[pathComponents[0]] = config.UnknownVariableValue + } + return nil + } + + // Otherwise find the correct point in the tree and then set to unknown + var current interface{} = n.VariableValues[pathComponents[0]] + for i := 1; i < len(pathComponents); i++ { + switch current.(type) { + case []interface{}, []map[string]interface{}: + tCurrent := current.([]interface{}) + index, err := strconv.Atoi(pathComponents[i]) + if err != nil { + return fmt.Errorf("Cannot convert %s to slice index in path %s", + pathComponents[i], path) + } + current = tCurrent[index] + case map[string]interface{}: + tCurrent := current.(map[string]interface{}) + if val, hasVal := tCurrent[pathComponents[i]]; hasVal { + current = val + continue + } + + tCurrent[pathComponents[i]] = config.UnknownVariableValue + break + } + } + + return nil +} + // EvalCoerceMapVariable is an EvalNode implementation that recognizes a // specific ambiguous HCL parsing situation and resolves it. In HCL parsing, a // bare map literal is indistinguishable from a list of maps w/ one element. From 861ac536dd68881cf2d0b1c19a31d212bc36ddad Mon Sep 17 00:00:00 2001 From: Martin Atkins Date: Sat, 21 May 2016 15:09:55 -0700 Subject: [PATCH 0187/1238] provider/template: convert resources to data sources The template resources don't actually need to retain any state, so they are good candidates to be data sources. This includes a few tweaks to the acceptance tests -- now configured to run as unit tests -- since it seems that they have been slightly broken for a while now. In particular, the "update" cases are no longer tested because updating is not a meaningful operation for a data source. --- ...nfig.go => datasource_cloudinit_config.go} | 30 +--- .../datasource_cloudinit_config_test.go | 80 +++++++++++ ...te_file.go => datasource_template_file.go} | 39 +---- ...st.go => datasource_template_file_test.go} | 43 +----- builtin/providers/template/provider.go | 14 +- .../resource_cloudinit_config_test.go | 133 ------------------ .../terraform/data_source_state_test.go | 4 +- .../{r => d}/cloudinit_config.html.markdown | 10 +- .../providers/template/{r => d}/file.html.md | 4 +- .../providers/template/index.html.markdown | 15 +- website/source/layouts/template.erb | 12 +- 11 files changed, 122 insertions(+), 262 deletions(-) rename builtin/providers/template/{resource_cloudinit_config.go => datasource_cloudinit_config.go} (83%) create mode 100644 builtin/providers/template/datasource_cloudinit_config_test.go rename builtin/providers/template/{resource_template_file.go => datasource_template_file.go} (76%) rename builtin/providers/template/{resource_template_file_test.go => datasource_template_file_test.go} (69%) delete mode 100644 builtin/providers/template/resource_cloudinit_config_test.go rename website/source/docs/providers/template/{r => d}/cloudinit_config.html.markdown (87%) rename website/source/docs/providers/template/{r => d}/file.html.md (94%) diff --git a/builtin/providers/template/resource_cloudinit_config.go b/builtin/providers/template/datasource_cloudinit_config.go similarity index 83% rename from builtin/providers/template/resource_cloudinit_config.go rename to builtin/providers/template/datasource_cloudinit_config.go index c745b36fa..4bf8dfa34 100644 --- a/builtin/providers/template/resource_cloudinit_config.go +++ b/builtin/providers/template/datasource_cloudinit_config.go @@ -15,13 +15,9 @@ import ( "github.com/sthulb/mime/multipart" ) -func resourceCloudinitConfig() *schema.Resource { +func dataSourceCloudinitConfig() *schema.Resource { return &schema.Resource{ - Create: resourceCloudinitConfigCreate, - Delete: resourceCloudinitConfigDelete, - Update: resourceCloudinitConfigCreate, - Exists: resourceCloudinitConfigExists, - Read: resourceCloudinitConfigRead, + Read: dataSourceCloudinitConfigRead, Schema: map[string]*schema.Schema{ "part": &schema.Schema{ @@ -52,13 +48,11 @@ func resourceCloudinitConfig() *schema.Resource { Type: schema.TypeBool, Optional: true, Default: true, - ForceNew: true, }, "base64_encode": &schema.Schema{ Type: schema.TypeBool, Optional: true, Default: true, - ForceNew: true, }, "rendered": &schema.Schema{ Type: schema.TypeString, @@ -69,7 +63,7 @@ func resourceCloudinitConfig() *schema.Resource { } } -func resourceCloudinitConfigCreate(d *schema.ResourceData, meta interface{}) error { +func dataSourceCloudinitConfigRead(d *schema.ResourceData, meta interface{}) error { rendered, err := renderCloudinitConfig(d) if err != nil { return err @@ -80,24 +74,6 @@ func resourceCloudinitConfigCreate(d *schema.ResourceData, meta interface{}) err return nil } -func resourceCloudinitConfigDelete(d *schema.ResourceData, meta interface{}) error { - d.SetId("") - return nil -} - -func resourceCloudinitConfigExists(d *schema.ResourceData, meta interface{}) (bool, error) { - rendered, err := renderCloudinitConfig(d) - if err != nil { - return false, err - } - - return strconv.Itoa(hashcode.String(rendered)) == d.Id(), nil -} - -func resourceCloudinitConfigRead(d *schema.ResourceData, meta interface{}) error { - return nil -} - func renderCloudinitConfig(d *schema.ResourceData) (string, error) { gzipOutput := d.Get("gzip").(bool) base64Output := d.Get("base64_encode").(bool) diff --git a/builtin/providers/template/datasource_cloudinit_config_test.go b/builtin/providers/template/datasource_cloudinit_config_test.go new file mode 100644 index 000000000..e3e7225db --- /dev/null +++ b/builtin/providers/template/datasource_cloudinit_config_test.go @@ -0,0 +1,80 @@ +package template + +import ( + "testing" + + r "github.com/hashicorp/terraform/helper/resource" +) + +func TestRender(t *testing.T) { + testCases := []struct { + ResourceBlock string + Expected string + }{ + { + `data "template_cloudinit_config" "foo" { + gzip = false + base64_encode = false + + part { + content_type = "text/x-shellscript" + content = "baz" + } + }`, + "Content-Type: multipart/mixed; boundary=\"MIMEBOUNDARY\"\nMIME-Version: 1.0\r\n--MIMEBOUNDARY\r\nContent-Transfer-Encoding: 7bit\r\nContent-Type: text/x-shellscript\r\nMime-Version: 1.0\r\n\r\nbaz\r\n--MIMEBOUNDARY--\r\n", + }, + { + `data "template_cloudinit_config" "foo" { + gzip = false + base64_encode = false + + part { + content_type = "text/x-shellscript" + content = "baz" + filename = "foobar.sh" + } + }`, + "Content-Type: multipart/mixed; boundary=\"MIMEBOUNDARY\"\nMIME-Version: 1.0\r\n--MIMEBOUNDARY\r\nContent-Disposition: attachment; filename=\"foobar.sh\"\r\nContent-Transfer-Encoding: 7bit\r\nContent-Type: text/x-shellscript\r\nMime-Version: 1.0\r\n\r\nbaz\r\n--MIMEBOUNDARY--\r\n", + }, + { + `data "template_cloudinit_config" "foo" { + gzip = false + base64_encode = false + + part { + content_type = "text/x-shellscript" + content = "baz" + } + part { + content_type = "text/x-shellscript" + content = "ffbaz" + } + }`, + "Content-Type: multipart/mixed; boundary=\"MIMEBOUNDARY\"\nMIME-Version: 1.0\r\n--MIMEBOUNDARY\r\nContent-Transfer-Encoding: 7bit\r\nContent-Type: text/x-shellscript\r\nMime-Version: 1.0\r\n\r\nbaz\r\n--MIMEBOUNDARY\r\nContent-Transfer-Encoding: 7bit\r\nContent-Type: text/x-shellscript\r\nMime-Version: 1.0\r\n\r\nffbaz\r\n--MIMEBOUNDARY--\r\n", + }, + } + + for _, tt := range testCases { + r.UnitTest(t, r.TestCase{ + Providers: testProviders, + Steps: []r.TestStep{ + r.TestStep{ + Config: tt.ResourceBlock, + Check: r.ComposeTestCheckFunc( + r.TestCheckResourceAttr("data.template_cloudinit_config.foo", "rendered", tt.Expected), + ), + }, + }, + }) + } +} + +var testCloudInitConfig_basic = ` +data "template_cloudinit_config" "config" { + part { + content_type = "text/x-shellscript" + content = "baz" + } +}` + +var testCloudInitConfig_basic_expected = `Content-Type: multipart/mixed; boundary=\"MIMEBOUNDARY\"\nMIME-Version: 1.0\r\n--MIMEBOUNDARY\r\nContent-Transfer-Encoding: 7bit\r\nContent-Type: text/x-shellscript\r\nMime-Version: 1.0\r\n\r\nbaz\r\n--MIMEBOUNDARY--\r\n` diff --git a/builtin/providers/template/resource_template_file.go b/builtin/providers/template/datasource_template_file.go similarity index 76% rename from builtin/providers/template/resource_template_file.go rename to builtin/providers/template/datasource_template_file.go index c5b3b3b09..865a24e06 100644 --- a/builtin/providers/template/resource_template_file.go +++ b/builtin/providers/template/datasource_template_file.go @@ -4,7 +4,6 @@ import ( "crypto/sha256" "encoding/hex" "fmt" - "log" "os" "path/filepath" @@ -15,19 +14,15 @@ import ( "github.com/hashicorp/terraform/helper/schema" ) -func resourceFile() *schema.Resource { +func dataSourceFile() *schema.Resource { return &schema.Resource{ - Create: resourceFileCreate, - Delete: resourceFileDelete, - Exists: resourceFileExists, - Read: resourceFileRead, + Read: dataSourceFileRead, Schema: map[string]*schema.Schema{ "template": &schema.Schema{ Type: schema.TypeString, Optional: true, Description: "Contents of the template", - ForceNew: true, ConflictsWith: []string{"filename"}, ValidateFunc: validateTemplateAttribute, }, @@ -35,7 +30,6 @@ func resourceFile() *schema.Resource { Type: schema.TypeString, Optional: true, Description: "file to read template from", - ForceNew: true, // Make a "best effort" attempt to relativize the file path. StateFunc: func(v interface{}) string { if v == nil || v.(string) == "" { @@ -59,7 +53,6 @@ func resourceFile() *schema.Resource { Optional: true, Default: make(map[string]interface{}), Description: "variables to substitute", - ForceNew: true, }, "rendered": &schema.Schema{ Type: schema.TypeString, @@ -70,7 +63,7 @@ func resourceFile() *schema.Resource { } } -func resourceFileCreate(d *schema.ResourceData, meta interface{}) error { +func dataSourceFileRead(d *schema.ResourceData, meta interface{}) error { rendered, err := renderFile(d) if err != nil { return err @@ -80,32 +73,6 @@ func resourceFileCreate(d *schema.ResourceData, meta interface{}) error { return nil } -func resourceFileDelete(d *schema.ResourceData, meta interface{}) error { - d.SetId("") - return nil -} - -func resourceFileExists(d *schema.ResourceData, meta interface{}) (bool, error) { - rendered, err := renderFile(d) - if err != nil { - if _, ok := err.(templateRenderError); ok { - log.Printf("[DEBUG] Got error while rendering in Exists: %s", err) - log.Printf("[DEBUG] Returning false so the template re-renders using latest variables from config.") - return false, nil - } else { - return false, err - } - } - return hash(rendered) == d.Id(), nil -} - -func resourceFileRead(d *schema.ResourceData, meta interface{}) error { - // Logic is handled in Exists, which only returns true if the rendered - // contents haven't changed. That means if we get here there's nothing to - // do. - return nil -} - type templateRenderError error func renderFile(d *schema.ResourceData) (string, error) { diff --git a/builtin/providers/template/resource_template_file_test.go b/builtin/providers/template/datasource_template_file_test.go similarity index 69% rename from builtin/providers/template/resource_template_file_test.go rename to builtin/providers/template/datasource_template_file_test.go index 05e88c4db..64a64102a 100644 --- a/builtin/providers/template/resource_template_file_test.go +++ b/builtin/providers/template/datasource_template_file_test.go @@ -23,13 +23,13 @@ func TestTemplateRendering(t *testing.T) { want string }{ {`{}`, `ABC`, `ABC`}, - {`{a="foo"}`, `${a}`, `foo`}, - {`{a="hello"}`, `${replace(a, "ello", "i")}`, `hi`}, + {`{a="foo"}`, `$${a}`, `foo`}, + {`{a="hello"}`, `$${replace(a, "ello", "i")}`, `hi`}, {`{}`, `${1+2+3}`, `6`}, } for _, tt := range cases { - r.Test(t, r.TestCase{ + r.UnitTest(t, r.TestCase{ Providers: testProviders, Steps: []r.TestStep{ r.TestStep{ @@ -47,39 +47,6 @@ func TestTemplateRendering(t *testing.T) { } } -// https://github.com/hashicorp/terraform/issues/2344 -func TestTemplateVariableChange(t *testing.T) { - steps := []struct { - vars string - template string - want string - }{ - {`{a="foo"}`, `${a}`, `foo`}, - {`{b="bar"}`, `${b}`, `bar`}, - } - - var testSteps []r.TestStep - for i, step := range steps { - testSteps = append(testSteps, r.TestStep{ - Config: testTemplateConfig(step.template, step.vars), - Check: func(i int, want string) r.TestCheckFunc { - return func(s *terraform.State) error { - got := s.RootModule().Outputs["rendered"] - if want != got.Value { - return fmt.Errorf("[%d] got:\n%q\nwant:\n%q\n", i, got, want) - } - return nil - } - }(i, step.want), - }) - } - - r.Test(t, r.TestCase{ - Providers: testProviders, - Steps: testSteps, - }) -} - func TestValidateTemplateAttribute(t *testing.T) { file, err := ioutil.TempFile("", "testtemplate") if err != nil { @@ -129,11 +96,11 @@ func TestTemplateSharedMemoryRace(t *testing.T) { func testTemplateConfig(template, vars string) string { return fmt.Sprintf(` - resource "template_file" "t0" { + data "template_file" "t0" { template = "%s" vars = %s } output "rendered" { - value = "${template_file.t0.rendered}" + value = "${data.template_file.t0.rendered}" }`, template, vars) } diff --git a/builtin/providers/template/provider.go b/builtin/providers/template/provider.go index 1ebf3ae22..ece6c9f34 100644 --- a/builtin/providers/template/provider.go +++ b/builtin/providers/template/provider.go @@ -7,9 +7,19 @@ import ( func Provider() terraform.ResourceProvider { return &schema.Provider{ + DataSourcesMap: map[string]*schema.Resource{ + "template_file": dataSourceFile(), + "template_cloudinit_config": dataSourceCloudinitConfig(), + }, ResourcesMap: map[string]*schema.Resource{ - "template_file": resourceFile(), - "template_cloudinit_config": resourceCloudinitConfig(), + "template_file": schema.DataSourceResourceShim( + "template_file", + dataSourceFile(), + ), + "template_cloudinit_config": schema.DataSourceResourceShim( + "template_cloudinit_config", + dataSourceCloudinitConfig(), + ), }, } } diff --git a/builtin/providers/template/resource_cloudinit_config_test.go b/builtin/providers/template/resource_cloudinit_config_test.go deleted file mode 100644 index 9667d74fc..000000000 --- a/builtin/providers/template/resource_cloudinit_config_test.go +++ /dev/null @@ -1,133 +0,0 @@ -package template - -import ( - "testing" - - r "github.com/hashicorp/terraform/helper/resource" -) - -func TestRender(t *testing.T) { - testCases := []struct { - ResourceBlock string - Expected string - }{ - { - `resource "template_cloudinit_config" "foo" { - gzip = false - base64_encode = false - - part { - content_type = "text/x-shellscript" - content = "baz" - } - }`, - "Content-Type: multipart/mixed; boundary=\"MIMEBOUNDRY\"\nMIME-Version: 1.0\r\n--MIMEBOUNDRY\r\nContent-Transfer-Encoding: 7bit\r\nContent-Type: text/x-shellscript\r\nMime-Version: 1.0\r\n\r\nbaz\r\n--MIMEBOUNDRY--\r\n", - }, - { - `resource "template_cloudinit_config" "foo" { - gzip = false - base64_encode = false - - part { - content_type = "text/x-shellscript" - content = "baz" - filename = "foobar.sh" - } - }`, - "Content-Type: multipart/mixed; boundary=\"MIMEBOUNDRY\"\nMIME-Version: 1.0\r\n--MIMEBOUNDRY\r\nContent-Disposition: attachment; filename=\"foobar.sh\"\r\nContent-Transfer-Encoding: 7bit\r\nContent-Type: text/x-shellscript\r\nMime-Version: 1.0\r\n\r\nbaz\r\n--MIMEBOUNDRY--\r\n", - }, - { - `resource "template_cloudinit_config" "foo" { - gzip = false - base64_encode = false - - part { - content_type = "text/x-shellscript" - content = "baz" - } - part { - content_type = "text/x-shellscript" - content = "ffbaz" - } - }`, - "Content-Type: multipart/mixed; boundary=\"MIMEBOUNDRY\"\nMIME-Version: 1.0\r\n--MIMEBOUNDRY\r\nContent-Transfer-Encoding: 7bit\r\nContent-Type: text/x-shellscript\r\nMime-Version: 1.0\r\n\r\nbaz\r\n--MIMEBOUNDRY\r\nContent-Transfer-Encoding: 7bit\r\nContent-Type: text/x-shellscript\r\nMime-Version: 1.0\r\n\r\nffbaz\r\n--MIMEBOUNDRY--\r\n", - }, - { - `resource "template_cloudinit_config" "foo" { - gzip = true - base64_encode = false - - part { - content_type = "text/x-shellscript" - content = "baz" - filename = "ah" - } - part { - content_type = "text/x-shellscript" - content = "ffbaz" - } - }`, - "\x1f\x8b\b\x00\x00\tn\x88\x00\xff\xac\xce\xc1J\x031\x10\xc6\xf1{`\xdf!\xe4>VO\u0096^\xb4=xX\x05\xa9\x82\xc7\xd9݉;\x90LB2\x85\xadOo-\x88\x8b\xe2\xadÇ„\x1f\xf3\xfd\xef\x93(\x89\xc2\xfe\x98\xa9\xb5\xf1\x10\x943\x16]E\x9ei\\\xdb>\x1dd\xc4rܸ\xee\xa1\xdb\xdd=\xbd\x03\x00\x00\xff\xffmB\x8c\xeed\x01\x00\x00", - }, - } - - for _, tt := range testCases { - r.Test(t, r.TestCase{ - Providers: testProviders, - Steps: []r.TestStep{ - r.TestStep{ - Config: tt.ResourceBlock, - Check: r.ComposeTestCheckFunc( - r.TestCheckResourceAttr("template_cloudinit_config.foo", "rendered", tt.Expected), - ), - }, - }, - }) - } -} - -func TestCloudConfig_update(t *testing.T) { - r.Test(t, r.TestCase{ - Providers: testProviders, - Steps: []r.TestStep{ - r.TestStep{ - Config: testCloudInitConfig_basic, - Check: r.ComposeTestCheckFunc( - r.TestCheckResourceAttr("template_cloudinit_config.config", "rendered", testCloudInitConfig_basic_expected), - ), - }, - - r.TestStep{ - Config: testCloudInitConfig_update, - Check: r.ComposeTestCheckFunc( - r.TestCheckResourceAttr("template_cloudinit_config.config", "rendered", testCloudInitConfig_update_expected), - ), - }, - }, - }) -} - -var testCloudInitConfig_basic = ` -resource "template_cloudinit_config" "config" { - part { - content_type = "text/x-shellscript" - content = "baz" - } -}` - -var testCloudInitConfig_basic_expected = `Content-Type: multipart/mixed; boundary=\"MIMEBOUNDRY\"\nMIME-Version: 1.0\r\n--MIMEBOUNDRY\r\nContent-Transfer-Encoding: 7bit\r\nContent-Type: text/x-shellscript\r\nMime-Version: 1.0\r\n\r\nbaz\r\n--MIMEBOUNDRY--\r\n` - -var testCloudInitConfig_update = ` -resource "template_cloudinit_config" "config" { - part { - content_type = "text/x-shellscript" - content = "baz" - } - - part { - content_type = "text/x-shellscript" - content = "ffbaz" - } -}` - -var testCloudInitConfig_update_expected = `Content-Type: multipart/mixed; boundary=\"MIMEBOUNDARY\"\nMIME-Version: 1.0\r\n--MIMEBOUNDARY\r\nContent-Transfer-Encoding: 7bit\r\nContent-Type: text/x-shellscript\r\nMime-Version: 1.0\r\n\r\nbaz\r\n--MIMEBOUNDARY\r\nContent-Transfer-Encoding: 7bit\r\nContent-Type: text/x-shellscript\r\nMime-Version: 1.0\r\n\r\nffbaz\r\n--MIMEBOUNDARY--\r\n` diff --git a/builtin/providers/terraform/data_source_state_test.go b/builtin/providers/terraform/data_source_state_test.go index 26db76db6..0bab4b261 100644 --- a/builtin/providers/terraform/data_source_state_test.go +++ b/builtin/providers/terraform/data_source_state_test.go @@ -17,7 +17,7 @@ func TestState_basic(t *testing.T) { Config: testAccState_basic, Check: resource.ComposeTestCheckFunc( testAccCheckStateValue( - "terraform_remote_state.foo", "foo", "bar"), + "data.terraform_remote_state.foo", "foo", "bar"), ), }, }, @@ -64,7 +64,7 @@ func testAccCheckStateValue(id, name, value string) resource.TestCheckFunc { } const testAccState_basic = ` -resource "terraform_remote_state" "foo" { +data "terraform_remote_state" "foo" { backend = "_local" config { diff --git a/website/source/docs/providers/template/r/cloudinit_config.html.markdown b/website/source/docs/providers/template/d/cloudinit_config.html.markdown similarity index 87% rename from website/source/docs/providers/template/r/cloudinit_config.html.markdown rename to website/source/docs/providers/template/d/cloudinit_config.html.markdown index 69dc722b3..030092ea5 100644 --- a/website/source/docs/providers/template/r/cloudinit_config.html.markdown +++ b/website/source/docs/providers/template/d/cloudinit_config.html.markdown @@ -1,7 +1,7 @@ --- layout: "template" page_title: "Template: cloudinit_multipart" -sidebar_current: "docs-template-resource-cloudinit-config" +sidebar_current: "docs-template-datasource-cloudinit-config" description: |- Renders a multi-part cloud-init config from source files. --- @@ -14,7 +14,7 @@ Renders a multi-part cloud-init config from source files. ``` # Render a part using a `template_file` -resource "template_file" "script" { +data "template_file" "script" { template = "${file("${path.module}/init.tpl")}" vars { @@ -24,7 +24,7 @@ resource "template_file" "script" { # Render a multi-part cloudinit config making use of the part # above, and other source files -resource "template_cloudinit_config" "config" { +data "template_cloudinit_config" "config" { gzip = true base64_encode = true @@ -32,7 +32,7 @@ resource "template_cloudinit_config" "config" { part { filename = "init.cfg" content_type = "text/part-handler" - content = "${template_file.script.rendered}" + content = "${data.template_file.script.rendered}" } part { @@ -50,7 +50,7 @@ resource "template_cloudinit_config" "config" { resource "aws_instance" "web" { ami = "ami-d05e75b8" instance_type = "t2.micro" - user_data = "${template_cloudinit_config.config.rendered}" + user_data = "${data.template_cloudinit_config.config.rendered}" } ``` diff --git a/website/source/docs/providers/template/r/file.html.md b/website/source/docs/providers/template/d/file.html.md similarity index 94% rename from website/source/docs/providers/template/r/file.html.md rename to website/source/docs/providers/template/d/file.html.md index b0e8af469..6b8381d61 100644 --- a/website/source/docs/providers/template/r/file.html.md +++ b/website/source/docs/providers/template/d/file.html.md @@ -1,7 +1,7 @@ --- layout: "template" page_title: "Template: template_file" -sidebar_current: "docs-template-resource-file" +sidebar_current: "docs-template-datasource-file" description: |- Renders a template from a file. --- @@ -13,7 +13,7 @@ Renders a template from a file. ## Example Usage ``` -resource "template_file" "init" { +data "template_file" "init" { template = "${file("${path.module}/init.tpl")}" vars { diff --git a/website/source/docs/providers/template/index.html.markdown b/website/source/docs/providers/template/index.html.markdown index 3d180e42f..9fce6da1c 100644 --- a/website/source/docs/providers/template/index.html.markdown +++ b/website/source/docs/providers/template/index.html.markdown @@ -8,23 +8,16 @@ description: |- # Template Provider -The template provider exposes resources to use templates to generate +The template provider exposes data sources to use templates to generate strings for other Terraform resources or outputs. -The template provider is what we call a _logical provider_. This has no -impact on how it behaves, but conceptually it is important to understand. -The template provider doesn't manage any _physical_ resources; it isn't -creating servers, writing files, etc. It is used to generate attributes that -can be used for interpolation for other resources. Examples will explain -this best. - -Use the navigation to the left to read about the available resources. +Use the navigation to the left to read about the available data sources. ## Example Usage ``` # Template for initial configuration bash script -resource "template_file" "init" { +data "template_file" "init" { template = "${file("init.tpl")}" vars { @@ -36,6 +29,6 @@ resource "template_file" "init" { resource "aws_instance" "web" { # ... - user_data = "${template_file.init.rendered}" + user_data = "${data.template_file.init.rendered}" } ``` diff --git a/website/source/layouts/template.erb b/website/source/layouts/template.erb index 1d666fce2..c710a5de0 100644 --- a/website/source/layouts/template.erb +++ b/website/source/layouts/template.erb @@ -10,14 +10,14 @@ Template Provider - > - Resources + > + Data Sources From 77d197bd062d48fac1e695786b09f2e999917f2f Mon Sep 17 00:00:00 2001 From: "Christian G. Warden" Date: Fri, 8 Jul 2016 09:16:27 -0700 Subject: [PATCH 0188/1238] Grammar/Spelling Fixes for ecs_service Docs --- .../source/docs/providers/aws/r/ecs_service.html.markdown | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/source/docs/providers/aws/r/ecs_service.html.markdown b/website/source/docs/providers/aws/r/ecs_service.html.markdown index ac58f6891..33c329046 100644 --- a/website/source/docs/providers/aws/r/ecs_service.html.markdown +++ b/website/source/docs/providers/aws/r/ecs_service.html.markdown @@ -8,9 +8,9 @@ description: |- # aws\_ecs\_service --> **Note:** To prevent race condition during service deletion, make sure to set `depends_on` to related `aws_iam_role_policy`, otherwise policy may be destroyed too soon and ECS service will then stuck in `DRAINING` state. +-> **Note:** To prevent a race condition during service deletion, make sure to set `depends_on` to the related `aws_iam_role_policy`; otherwise, the policy may be destroyed too soon and the ECS service will then get stuck in the `DRAINING` state. -Provides an ECS service - effectively a task that is expected to run until an error occures or user terminates it (typically a webserver or a database). +Provides an ECS service - effectively a task that is expected to run until an error occurs or a user terminates it (typically a webserver or a database). See [ECS Services section in AWS developer guide](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html). @@ -46,7 +46,7 @@ The following arguments are supported: * `deployment_minimum_healthy_percent` - (Optional) The lower limit (as a percentage of the service's desiredCount) of the number of running tasks that must remain running and healthy in a service during a deployment. * `load_balancer` - (Optional) A load balancer block. Load balancers documented below. --> **Note:** As a result of AWS limitation a single `load_balancer` can be attached to the ECS service at most. See [related docs](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-load-balancing.html#load-balancing-concepts). +-> **Note:** As a result of an AWS limitation, a single `load_balancer` can be attached to the ECS service at most. See [related docs](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-load-balancing.html#load-balancing-concepts). Load balancers support the following: From 533e7aca3453bf3e7fd9487d716a57026b31b32c Mon Sep 17 00:00:00 2001 From: clint shryock Date: Fri, 8 Jul 2016 12:22:38 -0500 Subject: [PATCH 0189/1238] provider/aws: Add poll_interval to configure polling for ElasticBeanstalk --- ...ource_aws_elastic_beanstalk_environment.go | 68 ++++++++++++++----- ...lastic_beanstalk_environment.html.markdown | 5 +- 2 files changed, 54 insertions(+), 19 deletions(-) diff --git a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go b/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go index 674c80f90..a1a3166c2 100644 --- a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go +++ b/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go @@ -134,6 +134,24 @@ func resourceAwsElasticBeanstalkEnvironment() *schema.Resource { return }, }, + "poll_interval": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "10s", + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + duration, err := time.ParseDuration(value) + if err != nil { + errors = append(errors, fmt.Errorf( + "%q cannot be parsed as a duration: %s", k, err)) + } + if duration < 10*time.Second || duration > 60*time.Second { + errors = append(errors, fmt.Errorf( + "%q must be between 10s and 60s", k)) + } + return + }, + }, "autoscaling_groups": &schema.Schema{ Type: schema.TypeList, Computed: true, @@ -182,10 +200,6 @@ func resourceAwsElasticBeanstalkEnvironmentCreate(d *schema.ResourceData, meta i settings := d.Get("setting").(*schema.Set) solutionStack := d.Get("solution_stack_name").(string) templateName := d.Get("template_name").(string) - waitForReadyTimeOut, err := time.ParseDuration(d.Get("wait_for_ready_timeout").(string)) - if err != nil { - return err - } // TODO set tags // Note: at time of writing, you cannot view or edit Tags after creation @@ -243,13 +257,22 @@ func resourceAwsElasticBeanstalkEnvironmentCreate(d *schema.ResourceData, meta i // Assign the application name as the resource ID d.SetId(*resp.EnvironmentId) + waitForReadyTimeOut, err := time.ParseDuration(d.Get("wait_for_ready_timeout").(string)) + if err != nil { + return err + } + pollInterval, err := time.ParseDuration(d.Get("poll_interval").(string)) + if err != nil { + return err + } + stateConf := &resource.StateChangeConf{ Pending: []string{"Launching", "Updating"}, Target: []string{"Ready"}, Refresh: environmentStateRefreshFunc(conn, d.Id()), Timeout: waitForReadyTimeOut, Delay: 10 * time.Second, - MinTimeout: 20 * time.Second, + MinTimeout: pollInterval, } _, err = stateConf.WaitForState() @@ -271,10 +294,6 @@ func resourceAwsElasticBeanstalkEnvironmentUpdate(d *schema.ResourceData, meta i conn := meta.(*AWSClient).elasticbeanstalkconn envId := d.Id() - waitForReadyTimeOut, err := time.ParseDuration(d.Get("wait_for_ready_timeout").(string)) - if err != nil { - return err - } updateOpts := elasticbeanstalk.UpdateEnvironmentInput{ EnvironmentId: aws.String(envId), @@ -310,7 +329,16 @@ func resourceAwsElasticBeanstalkEnvironmentUpdate(d *schema.ResourceData, meta i // Get the current time to filter describeBeanstalkEvents messages t := time.Now() log.Printf("[DEBUG] Elastic Beanstalk Environment update opts: %s", updateOpts) - _, err = conn.UpdateEnvironment(&updateOpts) + _, err := conn.UpdateEnvironment(&updateOpts) + if err != nil { + return err + } + + waitForReadyTimeOut, err := time.ParseDuration(d.Get("wait_for_ready_timeout").(string)) + if err != nil { + return err + } + pollInterval, err := time.ParseDuration(d.Get("poll_interval").(string)) if err != nil { return err } @@ -321,7 +349,7 @@ func resourceAwsElasticBeanstalkEnvironmentUpdate(d *schema.ResourceData, meta i Refresh: environmentStateRefreshFunc(conn, d.Id()), Timeout: waitForReadyTimeOut, Delay: 10 * time.Second, - MinTimeout: 20 * time.Second, + MinTimeout: pollInterval, } _, err = stateConf.WaitForState() @@ -542,11 +570,6 @@ func resourceAwsElasticBeanstalkEnvironmentSettingsRead(d *schema.ResourceData, func resourceAwsElasticBeanstalkEnvironmentDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).elasticbeanstalkconn - waitForReadyTimeOut, err := time.ParseDuration(d.Get("wait_for_ready_timeout").(string)) - if err != nil { - return err - } - opts := elasticbeanstalk.TerminateEnvironmentInput{ EnvironmentId: aws.String(d.Id()), TerminateResources: aws.Bool(true), @@ -555,19 +578,28 @@ func resourceAwsElasticBeanstalkEnvironmentDelete(d *schema.ResourceData, meta i // Get the current time to filter describeBeanstalkEvents messages t := time.Now() log.Printf("[DEBUG] Elastic Beanstalk Environment terminate opts: %s", opts) - _, err = conn.TerminateEnvironment(&opts) + _, err := conn.TerminateEnvironment(&opts) if err != nil { return err } + waitForReadyTimeOut, err := time.ParseDuration(d.Get("wait_for_ready_timeout").(string)) + if err != nil { + return err + } + pollInterval, err := time.ParseDuration(d.Get("poll_interval").(string)) + if err != nil { + return err + } + stateConf := &resource.StateChangeConf{ Pending: []string{"Terminating"}, Target: []string{"Terminated"}, Refresh: environmentStateRefreshFunc(conn, d.Id()), Timeout: waitForReadyTimeOut, Delay: 10 * time.Second, - MinTimeout: 20 * time.Second, + MinTimeout: pollInterval, } _, err = stateConf.WaitForState() diff --git a/website/source/docs/providers/aws/r/elastic_beanstalk_environment.html.markdown b/website/source/docs/providers/aws/r/elastic_beanstalk_environment.html.markdown index bdd008477..6f6674ddd 100644 --- a/website/source/docs/providers/aws/r/elastic_beanstalk_environment.html.markdown +++ b/website/source/docs/providers/aws/r/elastic_beanstalk_environment.html.markdown @@ -51,10 +51,13 @@ The following arguments are supported: off of. Example stacks can be found in the [Amazon API documentation][1] * `template_name` – (Optional) The name of the Elastic Beanstalk Configuration template to use in deployment -* `wait_for_ready_timeout` - (Default: "10m") The maximum +* `wait_for_ready_timeout` - (Default: `10m`) The maximum [duration](https://golang.org/pkg/time/#ParseDuration) that Terraform should wait for an Elastic Beanstalk Environment to be in a ready state before timing out. +* `poll_interval` – (Default: `10s`) The time between polling the AWS API to +check if changes have been applied. Use this to adjust the rate of API calls +for any `create` or `update` action. Minimum `10s`, maximum `60s` * `tags` – (Optional) A set of tags to apply to the Environment. **Note:** at this time the Elastic Beanstalk API does not provide a programatic way of changing these tags after initial application From 9b065f32e23f22a96c2ba9cc930475d8ea75d18f Mon Sep 17 00:00:00 2001 From: "jorge.marey" Date: Fri, 13 Nov 2015 12:50:31 +0100 Subject: [PATCH 0190/1238] Include content option for file provisioner - Include new option in file provisioner. Now content or source can be provided. Content will create a temp file and copy there the contents. - Later that file will be used as source. - Include test to check that changes are working correctly. --- .../provisioners/file/resource_provisioner.go | 64 ++++++++++++++----- .../file/resource_provisioner_test.go | 35 +++++++++- 2 files changed, 82 insertions(+), 17 deletions(-) diff --git a/builtin/provisioners/file/resource_provisioner.go b/builtin/provisioners/file/resource_provisioner.go index 9484d3f18..ac87d7583 100644 --- a/builtin/provisioners/file/resource_provisioner.go +++ b/builtin/provisioners/file/resource_provisioner.go @@ -2,12 +2,12 @@ package file import ( "fmt" + "io/ioutil" "log" "os" "time" "github.com/hashicorp/terraform/communicator" - "github.com/hashicorp/terraform/helper/config" "github.com/hashicorp/terraform/terraform" "github.com/mitchellh/go-homedir" ) @@ -26,18 +26,13 @@ func (p *ResourceProvisioner) Apply( return err } - // Get the source and destination - sRaw := c.Config["source"] - src, ok := sRaw.(string) - if !ok { - return fmt.Errorf("Unsupported 'source' type! Must be string.") - } - - src, err = homedir.Expand(src) + // Get the source + src, err := p.getSrc(c) if err != nil { return err } + // Get destination dRaw := c.Config["destination"] dst, ok := dRaw.(string) if !ok { @@ -48,13 +43,52 @@ func (p *ResourceProvisioner) Apply( // Validate checks if the required arguments are configured func (p *ResourceProvisioner) Validate(c *terraform.ResourceConfig) (ws []string, es []error) { - v := &config.Validator{ - Required: []string{ - "source", - "destination", - }, + numDst := 0 + numSrc := 0 + for name := range c.Raw { + switch name { + case "destination": + numDst++ + case "source", "content": + numSrc++ + default: + es = append(es, fmt.Errorf("Unknown configuration '%s'", name)) + } } - return v.Validate(c) + if numSrc != 1 || numDst != 1 { + es = append(es, fmt.Errorf("Must provide one of 'content' or 'source' and 'destination' to file")) + } + return +} + +// getSrc returns the file to use as source +func (p *ResourceProvisioner) getSrc(c *terraform.ResourceConfig) (string, error) { + var src string + + sRaw, ok := c.Config["source"] + if ok { + if src, ok = sRaw.(string); !ok { + return "", fmt.Errorf("Unsupported 'source' type! Must be string.") + } + } + + content, ok := c.Config["content"] + if ok { + file, err := ioutil.TempFile("", "tf-file-content") + if err != nil { + return "", err + } + contentStr, ok := content.(string) + if !ok { + return "", fmt.Errorf("Unsupported 'content' type! Must be string.") + } + if _, err = file.WriteString(contentStr); err != nil { + return "", err + } + src = file.Name() + } + + return homedir.Expand(src) } // copyFiles is used to copy the files from a source to a destination diff --git a/builtin/provisioners/file/resource_provisioner_test.go b/builtin/provisioners/file/resource_provisioner_test.go index 0fc990cb4..713c8aa0d 100644 --- a/builtin/provisioners/file/resource_provisioner_test.go +++ b/builtin/provisioners/file/resource_provisioner_test.go @@ -11,7 +11,7 @@ func TestResourceProvisioner_impl(t *testing.T) { var _ terraform.ResourceProvisioner = new(ResourceProvisioner) } -func TestResourceProvider_Validate_good(t *testing.T) { +func TestResourceProvider_Validate_good_source(t *testing.T) { c := testConfig(t, map[string]interface{}{ "source": "/tmp/foo", "destination": "/tmp/bar", @@ -26,7 +26,22 @@ func TestResourceProvider_Validate_good(t *testing.T) { } } -func TestResourceProvider_Validate_bad(t *testing.T) { +func TestResourceProvider_Validate_good_content(t *testing.T) { + c := testConfig(t, map[string]interface{}{ + "content": "value to copy", + "destination": "/tmp/bar", + }) + p := new(ResourceProvisioner) + warn, errs := p.Validate(c) + if len(warn) > 0 { + t.Fatalf("Warnings: %v", warn) + } + if len(errs) > 0 { + t.Fatalf("Errors: %v", errs) + } +} + +func TestResourceProvider_Validate_bad_not_destination(t *testing.T) { c := testConfig(t, map[string]interface{}{ "source": "nope", }) @@ -40,6 +55,22 @@ func TestResourceProvider_Validate_bad(t *testing.T) { } } +func TestResourceProvider_Validate_bad_to_many_src(t *testing.T) { + c := testConfig(t, map[string]interface{}{ + "source": "nope", + "content": "value to copy", + "destination": "/tmp/bar", + }) + p := new(ResourceProvisioner) + warn, errs := p.Validate(c) + if len(warn) > 0 { + t.Fatalf("Warnings: %v", warn) + } + if len(errs) == 0 { + t.Fatalf("Should have errors") + } +} + func testConfig( t *testing.T, c map[string]interface{}) *terraform.ResourceConfig { From 800f7d2e06ec3ecff20e774dd2234fb24e2ad1ea Mon Sep 17 00:00:00 2001 From: "jorge.marey" Date: Mon, 22 Feb 2016 17:45:15 +0100 Subject: [PATCH 0191/1238] Update documentation --- .../source/docs/provisioners/file.html.markdown | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/website/source/docs/provisioners/file.html.markdown b/website/source/docs/provisioners/file.html.markdown index be50ed374..368219baa 100644 --- a/website/source/docs/provisioners/file.html.markdown +++ b/website/source/docs/provisioners/file.html.markdown @@ -24,6 +24,12 @@ resource "aws_instance" "web" { destination = "/etc/myapp.conf" } + # Copies the string in content into /tmp/file.log + provisioner "file" { + content = "ami used: ${self.ami}" + destination = "/tmp/file.log" + } + # Copies the configs.d folder to /etc/configs.d provisioner "file" { source = "conf/configs.d" @@ -42,8 +48,14 @@ resource "aws_instance" "web" { The following arguments are supported: -* `source` - (Required) This is the source file or folder. It can be specified as relative - to the current working directory or as an absolute path. +* `source` - This is the source file or folder. It can be specified as relative + to the current working directory or as an absolute path. This cannot be provided with `content`. + +* `content` - This is the content to copy on the destination. If destination is a file, + the content will be written on that file, in case of a directory a file named + *tf-file-content* is created. It's recommended to use a file as destination. A + [`template_file`](/docs/providers/template/r/file.html) might be referenced in here, or + any interpolation syntax for that matter. This cannot be provided with `source`. * `destination` - (Required) This is the destination path. It must be specified as an absolute path. From 8beafe25ae5d126cc355b9d17e030d1e38db7744 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Fri, 8 Jul 2016 19:34:37 +0100 Subject: [PATCH 0192/1238] provisioner/file: Clean up temporary files --- .../provisioners/file/resource_provisioner.go | 22 ++++++++++++------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/builtin/provisioners/file/resource_provisioner.go b/builtin/provisioners/file/resource_provisioner.go index ac87d7583..2cd060b63 100644 --- a/builtin/provisioners/file/resource_provisioner.go +++ b/builtin/provisioners/file/resource_provisioner.go @@ -27,10 +27,13 @@ func (p *ResourceProvisioner) Apply( } // Get the source - src, err := p.getSrc(c) + src, deleteSource, err := p.getSrc(c) if err != nil { return err } + if deleteSource { + defer os.Remove(src) + } // Get destination dRaw := c.Config["destination"] @@ -62,13 +65,13 @@ func (p *ResourceProvisioner) Validate(c *terraform.ResourceConfig) (ws []string } // getSrc returns the file to use as source -func (p *ResourceProvisioner) getSrc(c *terraform.ResourceConfig) (string, error) { +func (p *ResourceProvisioner) getSrc(c *terraform.ResourceConfig) (string, bool, error) { var src string sRaw, ok := c.Config["source"] if ok { if src, ok = sRaw.(string); !ok { - return "", fmt.Errorf("Unsupported 'source' type! Must be string.") + return "", false, fmt.Errorf("Unsupported 'source' type! Must be string.") } } @@ -76,19 +79,22 @@ func (p *ResourceProvisioner) getSrc(c *terraform.ResourceConfig) (string, error if ok { file, err := ioutil.TempFile("", "tf-file-content") if err != nil { - return "", err + return "", true, err } + contentStr, ok := content.(string) if !ok { - return "", fmt.Errorf("Unsupported 'content' type! Must be string.") + return "", true, fmt.Errorf("Unsupported 'content' type! Must be string.") } if _, err = file.WriteString(contentStr); err != nil { - return "", err + return "", true, err } - src = file.Name() + + return file.Name(), true, nil } - return homedir.Expand(src) + expansion, err := homedir.Expand(src) + return expansion, false, err } // copyFiles is used to copy the files from a source to a destination From 201f8bdea2c9845468213ed613f64fe6cd28c0e0 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Fri, 8 Jul 2016 19:52:59 +0100 Subject: [PATCH 0193/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6f98fe4c2..beaab6ddf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -136,6 +136,7 @@ IMPROVEMENTS: * provider/vsphere: Add DiskEnableUUID option to `vsphere_virtual_machine` [GH-7088] * provider/vsphere: Virtual Machine and File resources handle Read errors properley [GH-7220] * provider/vsphere: set uuid as `vsphere_virtual_machine` output [GH-4382] + * provisioner/file: File provisioners may now have file content set as an attribute [GH-7561] BUG FIXES: From 14cea95e86a9ddb6870644082362a1b6d1a1cb3c Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Fri, 8 Jul 2016 09:14:06 -0500 Subject: [PATCH 0194/1238] terraform: another set of ignore_changes fixes This set of changes addresses two bug scenarios: (1) When an ignored change canceled a resource replacement, any downstream resources referencing computer attributes on that resource would get "diffs didn't match" errors. This happened because the `EvalDiff` implementation was calling `state.MergeDiff(diff)` on the unfiltered diff. Generally this is what you want, so that downstream references catch the "incoming" values. When there's a potential for the diff to change, thought, this results in problems w/ references. Here we solve this by doing away with the separate `EvalNode` for `ignore_changes` processing and integrating it into `EvalDiff`. This allows us to only call `MergeDiff` with the final, filtered diff. (2) When a resource had an ignored change but was still being replaced anyways, the diff was being improperly filtered. This would cause problems during apply when not all attributes were available to perform the replacement. We solve that by deferring actual attribute removal until after we've decided that we do not have to replace the resource. --- builtin/providers/test/resource.go | 6 ++ builtin/providers/test/resource_test.go | 100 ++++++++++++++++++ terraform/context_apply_test.go | 95 +++++++++++++++++ terraform/eval_diff.go | 91 ++++++++++++++++ terraform/eval_ignore_changes.go | 87 --------------- .../apply-ignore-changes-dep/main.tf | 12 +++ terraform/transform_resource.go | 13 +-- 7 files changed, 306 insertions(+), 98 deletions(-) delete mode 100644 terraform/eval_ignore_changes.go create mode 100644 terraform/test-fixtures/apply-ignore-changes-dep/main.tf diff --git a/builtin/providers/test/resource.go b/builtin/providers/test/resource.go index b2d554bd4..2017d7639 100644 --- a/builtin/providers/test/resource.go +++ b/builtin/providers/test/resource.go @@ -35,6 +35,12 @@ func testResource() *schema.Resource { Optional: true, Computed: true, }, + "optional_computed_force_new": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, "computed_read_only": { Type: schema.TypeString, Computed: true, diff --git a/builtin/providers/test/resource_test.go b/builtin/providers/test/resource_test.go index 35878e0b1..198101363 100644 --- a/builtin/providers/test/resource_test.go +++ b/builtin/providers/test/resource_test.go @@ -293,6 +293,106 @@ resource "test_resource" "foo" { }) } +func TestResource_ignoreChangesDependent(t *testing.T) { + resource.UnitTest(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testAccCheckResourceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource" "foo" { + count = 2 + required = "yep" + required_map { key = "value" } + + optional_force_new = "one" + lifecycle { + ignore_changes = ["optional_force_new"] + } +} +resource "test_resource" "bar" { + count = 2 + required = "yep" + required_map { key = "value" } + optional = "${element(test_resource.foo.*.id, count.index)}" +} + `), + Check: func(s *terraform.State) error { + return nil + }, + }, + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource" "foo" { + count = 2 + required = "yep" + required_map { key = "value" } + + optional_force_new = "two" + lifecycle { + ignore_changes = ["optional_force_new"] + } +} +resource "test_resource" "bar" { + count = 2 + required = "yep" + required_map { key = "value" } + optional = "${element(test_resource.foo.*.id, count.index)}" +} + `), + Check: func(s *terraform.State) error { + return nil + }, + }, + }, + }) +} + +func TestResource_ignoreChangesStillReplaced(t *testing.T) { + resource.UnitTest(t, resource.TestCase{ + Providers: testAccProviders, + CheckDestroy: testAccCheckResourceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource" "foo" { + required = "yep" + required_map = { + key = "value" + } + optional_force_new = "one" + optional_bool = true + lifecycle { + ignore_changes = ["optional_bool"] + } +} + `), + Check: func(s *terraform.State) error { + return nil + }, + }, + resource.TestStep{ + Config: strings.TrimSpace(` +resource "test_resource" "foo" { + required = "yep" + required_map = { + key = "value" + } + optional_force_new = "two" + optional_bool = false + lifecycle { + ignore_changes = ["optional_bool"] + } +} + `), + Check: func(s *terraform.State) error { + return nil + }, + }, + }, + }) +} + func testAccCheckResourceDestroy(s *terraform.State) error { return nil } diff --git a/terraform/context_apply_test.go b/terraform/context_apply_test.go index a5f425a40..142f735c8 100644 --- a/terraform/context_apply_test.go +++ b/terraform/context_apply_test.go @@ -4700,6 +4700,101 @@ aws_instance.foo: required_field = set type = aws_instance `) + if actual != expected { + t.Fatalf("expected:\n%s\ngot:\n%s", expected, actual) + } +} + +func TestContext2Apply_ignoreChangesWithDep(t *testing.T) { + m := testModule(t, "apply-ignore-changes-dep") + p := testProvider("aws") + p.ApplyFn = testApplyFn + p.DiffFn = func(i *InstanceInfo, s *InstanceState, c *ResourceConfig) (*InstanceDiff, error) { + switch i.Type { + case "aws_instance": + newAmi, _ := c.Get("ami") + return &InstanceDiff{ + Attributes: map[string]*ResourceAttrDiff{ + "ami": &ResourceAttrDiff{ + Old: s.Attributes["ami"], + New: newAmi.(string), + RequiresNew: true, + }, + }, + }, nil + case "aws_eip": + return testDiffFn(i, s, c) + default: + t.Fatalf("Unexpected type: %s", i.Type) + return nil, nil + } + } + s := &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "aws_instance.foo.0": &ResourceState{ + Primary: &InstanceState{ + ID: "i-abc123", + Attributes: map[string]string{ + "ami": "ami-abcd1234", + "id": "i-abc123", + }, + }, + }, + "aws_instance.foo.1": &ResourceState{ + Primary: &InstanceState{ + ID: "i-bcd234", + Attributes: map[string]string{ + "ami": "ami-abcd1234", + "id": "i-bcd234", + }, + }, + }, + "aws_eip.foo.0": &ResourceState{ + Primary: &InstanceState{ + ID: "eip-abc123", + Attributes: map[string]string{ + "id": "eip-abc123", + "instance": "i-abc123", + }, + }, + }, + "aws_eip.foo.1": &ResourceState{ + Primary: &InstanceState{ + ID: "eip-bcd234", + Attributes: map[string]string{ + "id": "eip-bcd234", + "instance": "i-bcd234", + }, + }, + }, + }, + }, + }, + } + ctx := testContext2(t, &ContextOpts{ + Module: m, + Providers: map[string]ResourceProviderFactory{ + "aws": testProviderFuncFixed(p), + }, + State: s, + }) + + if p, err := ctx.Plan(); err != nil { + t.Fatalf("err: %s", err) + } else { + t.Logf(p.String()) + } + + state, err := ctx.Apply() + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(state.String()) + expected := strings.TrimSpace(s.String()) if actual != expected { t.Fatalf("bad: \n%s", actual) } diff --git a/terraform/eval_diff.go b/terraform/eval_diff.go index 6cbd20e03..4a5027d60 100644 --- a/terraform/eval_diff.go +++ b/terraform/eval_diff.go @@ -3,6 +3,9 @@ package terraform import ( "fmt" "log" + "strings" + + "github.com/hashicorp/terraform/config" ) // EvalCompareDiff is an EvalNode implementation that compares two diffs @@ -73,6 +76,10 @@ type EvalDiff struct { State **InstanceState OutputDiff **InstanceDiff OutputState **InstanceState + + // Resource is needed to fetch the ignore_changes list so we can + // filter user-requested ignored attributes from the diff. + Resource *config.Resource } // TODO: test @@ -132,6 +139,10 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) { } } + if err := n.processIgnoreChanges(diff); err != nil { + return nil, err + } + // Call post-refresh hook err = ctx.Hook(func(h Hook) (HookAction, error) { return h.PostDiff(n.Info, diff) @@ -156,6 +167,86 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) { return nil, nil } +func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error { + if diff == nil || n.Resource == nil || n.Resource.Id() == "" { + return nil + } + ignoreChanges := n.Resource.Lifecycle.IgnoreChanges + + if len(ignoreChanges) == 0 { + return nil + } + + changeType := diff.ChangeType() + + // If we're just creating the resource, we shouldn't alter the + // Diff at all + if changeType == DiffCreate { + return nil + } + + ignorableAttrKeys := make(map[string]bool) + for _, ignoredKey := range ignoreChanges { + for k := range diff.Attributes { + if strings.HasPrefix(k, ignoredKey) { + ignorableAttrKeys[k] = true + } + } + } + + // If we are replacing the resource, then we expect there to be a bunch of + // extraneous attribute diffs we need to filter out for the other + // non-requires-new attributes going from "" -> "configval" or "" -> + // "". Filtering these out allows us to see if we might be able to + // skip this diff altogether. + if changeType == DiffDestroyCreate { + for k, v := range diff.Attributes { + if v.Empty() || v.NewComputed { + ignorableAttrKeys[k] = true + } + } + + // Here we emulate the implementation of diff.RequiresNew() with one small + // tweak, we ignore the "id" attribute diff that gets added by EvalDiff, + // since that was added in reaction to RequiresNew being true. + requiresNewAfterIgnores := false + for k, v := range diff.Attributes { + if k == "id" { + continue + } + if _, ok := ignorableAttrKeys[k]; ok { + continue + } + if v.RequiresNew == true { + requiresNewAfterIgnores = true + } + } + + // If we still require resource replacement after ignores, we + // can't touch the diff, as all of the attributes will be + // required to process the replacement. + if requiresNewAfterIgnores { + return nil + } + + // Here we undo the two reactions to RequireNew in EvalDiff - the "id" + // attribute diff and the Destroy boolean field + log.Printf("[DEBUG] Removing 'id' diff and setting Destroy to false " + + "because after ignore_changes, this diff no longer requires replacement") + delete(diff.Attributes, "id") + diff.Destroy = false + } + + // If we didn't hit any of our early exit conditions, we can filter the diff. + for k := range ignorableAttrKeys { + log.Printf("[DEBUG] [EvalIgnoreChanges] %s - Ignoring diff attribute: %s", + n.Resource.Id(), k) + delete(diff.Attributes, k) + } + + return nil +} + // EvalDiffDestroy is an EvalNode implementation that returns a plain // destroy diff. type EvalDiffDestroy struct { diff --git a/terraform/eval_ignore_changes.go b/terraform/eval_ignore_changes.go deleted file mode 100644 index 6c7222a8b..000000000 --- a/terraform/eval_ignore_changes.go +++ /dev/null @@ -1,87 +0,0 @@ -package terraform - -import ( - "log" - "strings" - - "github.com/hashicorp/terraform/config" -) - -// EvalIgnoreChanges is an EvalNode implementation that removes diff -// attributes if their name matches names provided by the resource's -// IgnoreChanges lifecycle. -type EvalIgnoreChanges struct { - Resource *config.Resource - Diff **InstanceDiff - WasChangeType *DiffChangeType -} - -func (n *EvalIgnoreChanges) Eval(ctx EvalContext) (interface{}, error) { - if n.Diff == nil || *n.Diff == nil || n.Resource == nil || n.Resource.Id() == "" { - return nil, nil - } - - diff := *n.Diff - ignoreChanges := n.Resource.Lifecycle.IgnoreChanges - - if len(ignoreChanges) == 0 { - return nil, nil - } - - changeType := diff.ChangeType() - // Let the passed in change type pointer override what the diff currently has. - if n.WasChangeType != nil && *n.WasChangeType != DiffInvalid { - changeType = *n.WasChangeType - } - - // If we're just creating the resource, we shouldn't alter the - // Diff at all - if changeType == DiffCreate { - return nil, nil - } - - for _, ignoredName := range ignoreChanges { - for name := range diff.Attributes { - if strings.HasPrefix(name, ignoredName) { - delete(diff.Attributes, name) - } - } - } - - // If we are replacing the resource, then we expect there to be a bunch of - // extraneous attribute diffs we need to filter out for the other - // non-requires-new attributes going from "" -> "configval" or "" -> - // "". Filtering these out allows us to see if we might be able to - // skip this diff altogether. - if changeType == DiffDestroyCreate { - for k, v := range diff.Attributes { - if v.Empty() || v.NewComputed { - delete(diff.Attributes, k) - } - } - - // Here we emulate the implementation of diff.RequiresNew() with one small - // tweak, we ignore the "id" attribute diff that gets added by EvalDiff, - // since that was added in reaction to RequiresNew being true. - requiresNewAfterIgnores := false - for k, v := range diff.Attributes { - if k == "id" { - continue - } - if v.RequiresNew == true { - requiresNewAfterIgnores = true - } - } - - // Here we undo the two reactions to RequireNew in EvalDiff - the "id" - // attribute diff and the Destroy boolean field - if !requiresNewAfterIgnores { - log.Printf("[DEBUG] Removing 'id' diff and setting Destroy to false " + - "because after ignore_changes, this diff no longer requires replacement") - delete(diff.Attributes, "id") - diff.Destroy = false - } - } - - return nil, nil -} diff --git a/terraform/test-fixtures/apply-ignore-changes-dep/main.tf b/terraform/test-fixtures/apply-ignore-changes-dep/main.tf new file mode 100644 index 000000000..301d2da27 --- /dev/null +++ b/terraform/test-fixtures/apply-ignore-changes-dep/main.tf @@ -0,0 +1,12 @@ +resource "aws_instance" "foo" { + count = 2 + ami = "ami-bcd456" + lifecycle { + ignore_changes = ["ami"] + } +} + +resource "aws_eip" "foo" { + count = 2 + instance = "${element(aws_instance.foo.*.id, count.index)}" +} diff --git a/terraform/transform_resource.go b/terraform/transform_resource.go index 78f62be33..2ab485cde 100644 --- a/terraform/transform_resource.go +++ b/terraform/transform_resource.go @@ -345,6 +345,7 @@ func (n *graphNodeExpandedResource) managedResourceEvalNodes(resource *Resource, &EvalDiff{ Info: info, Config: &resourceConfig, + Resource: n.Resource, Provider: &provider, State: &state, OutputDiff: &diff, @@ -354,10 +355,6 @@ func (n *graphNodeExpandedResource) managedResourceEvalNodes(resource *Resource, Resource: n.Resource, Diff: &diff, }, - &EvalIgnoreChanges{ - Resource: n.Resource, - Diff: &diff, - }, &EvalWriteState{ Name: n.stateId(), ResourceType: n.Resource.Type, @@ -404,7 +401,6 @@ func (n *graphNodeExpandedResource) managedResourceEvalNodes(resource *Resource, var err error var createNew bool var createBeforeDestroyEnabled bool - var wasChangeType DiffChangeType nodes = append(nodes, &EvalOpFilter{ Ops: []walkOperation{walkApply, walkDestroy}, Node: &EvalSequence{ @@ -426,7 +422,6 @@ func (n *graphNodeExpandedResource) managedResourceEvalNodes(resource *Resource, return true, EvalEarlyExitError{} } - wasChangeType = diffApply.ChangeType() diffApply.Destroy = false return true, nil }, @@ -477,16 +472,12 @@ func (n *graphNodeExpandedResource) managedResourceEvalNodes(resource *Resource, &EvalDiff{ Info: info, Config: &resourceConfig, + Resource: n.Resource, Provider: &provider, Diff: &diffApply, State: &state, OutputDiff: &diffApply, }, - &EvalIgnoreChanges{ - Resource: n.Resource, - Diff: &diffApply, - WasChangeType: &wasChangeType, - }, // Get the saved diff &EvalReadDiff{ From 82d6d4a69144ce781aec1058ddc73a0aeb18b69a Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Sat, 9 Jul 2016 10:53:54 +0100 Subject: [PATCH 0195/1238] aws/docs: Fix Elastic Transcoder docs --- .../aws/r/elastic_transcoder_pipeline.html.markdown | 8 +++----- .../aws/r/elastic_transcoder_preset.html.markdown | 4 +--- website/source/layouts/aws.erb | 4 ++-- 3 files changed, 6 insertions(+), 10 deletions(-) diff --git a/website/source/docs/providers/aws/r/elastic_transcoder_pipeline.html.markdown b/website/source/docs/providers/aws/r/elastic_transcoder_pipeline.html.markdown index 0e6bd9cf0..a3b7bd983 100644 --- a/website/source/docs/providers/aws/r/elastic_transcoder_pipeline.html.markdown +++ b/website/source/docs/providers/aws/r/elastic_transcoder_pipeline.html.markdown @@ -1,7 +1,7 @@ --- layout: "aws" page_title: "AWS: aws_elastictranscoder_pipeline" -sidebar_current: "docs-aws-resource-elastic-transcoder" +sidebar_current: "docs-aws-resource-elastic-transcoder-pipeline" description: |- Provides an Elastic Transcoder pipeline resource. --- @@ -12,8 +12,6 @@ Provides an Elastic Transcoder pipeline resource. ## Example Usage -### Elastic Transcoder Pipeline - ``` resource "aws_elastictranscoder_pipeline" "bar" { input_bucket = "${aws_s3_bucket.input_bucket.bucket}" @@ -34,7 +32,7 @@ resource "aws_elastictranscoder_pipeline" "bar" { ## Argument Reference -See ["Create Pipeline"](http://docs.aws.amazon.com/elastictranscoder/latest/developerguide/create-pipeline.html) in the AWS docs for reference. +See ["Create Pipeline"](http://docs.aws.amazon.com/elastictranscoder/latest/developerguide/create-pipeline.html) in the AWS docs for reference. The following arguments are supported: @@ -54,7 +52,7 @@ which you want Elastic Transcoder to save transcoded files and playlists: which bucket to use, and the storage class that you want to assign to the files. If you specify values for `content_config`, you must also specify values for `thumbnail_config`. If you specify values for `content_config` and -`thumbnail_config`, omit the `output_bucket` object. +`thumbnail_config`, omit the `output_bucket` object. The `content_config` object supports the following: diff --git a/website/source/docs/providers/aws/r/elastic_transcoder_preset.html.markdown b/website/source/docs/providers/aws/r/elastic_transcoder_preset.html.markdown index f34c34df6..128a4da6c 100644 --- a/website/source/docs/providers/aws/r/elastic_transcoder_preset.html.markdown +++ b/website/source/docs/providers/aws/r/elastic_transcoder_preset.html.markdown @@ -1,7 +1,7 @@ --- layout: "aws" page_title: "AWS: aws_elastictranscoder_preset" -sidebar_current: "docs-aws-resource-elastic-transcoder" +sidebar_current: "docs-aws-resource-elastic-transcoder-preset" description: |- Provides an Elastic Transcoder preset resource. --- @@ -12,8 +12,6 @@ Provides an Elastic Transcoder preset resource. ## Example Usage -### Elastic Transcoder Preset - ``` resource "aws_elastictranscoder_preset" "bar" { container = "mp4" diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb index d120c1c16..75dd6410d 100644 --- a/website/source/layouts/aws.erb +++ b/website/source/layouts/aws.erb @@ -376,11 +376,11 @@ From 01b972b1d68c5d1ff472838c773c57297070fc66 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Sat, 9 Jul 2016 11:09:10 +0100 Subject: [PATCH 0196/1238] docs/aws: Promote aws_ami data source more --- .../providers/aws/r/instance.html.markdown | 19 ++++++-- .../aws/r/launch_configuration.html.markdown | 45 +++++++++++++++++-- 2 files changed, 58 insertions(+), 6 deletions(-) diff --git a/website/source/docs/providers/aws/r/instance.html.markdown b/website/source/docs/providers/aws/r/instance.html.markdown index 92409aff4..227a5f3d5 100644 --- a/website/source/docs/providers/aws/r/instance.html.markdown +++ b/website/source/docs/providers/aws/r/instance.html.markdown @@ -14,14 +14,27 @@ and deleted. Instances also support [provisioning](/docs/provisioners/index.html ## Example Usage ``` -# Create a new instance of the `ami-408c7f28` (Ubuntu 14.04) on an +# Create a new instance of the latest Ubuntu 14.04 on an # t1.micro node with an AWS Tag naming it "HelloWorld" provider "aws" { region = "us-east-1" } - + +data "aws_ami" "ubuntu" { + most_recent = true + filter { + name = "name" + values = ["ubuntu/images/ebs/ubuntu-trusty-14.04-amd64-server-*"] + } + filter { + name = "virtualization-type" + values = ["paravirtual"] + } + owners = ["099720109477"] # Canonical +} + resource "aws_instance" "web" { - ami = "ami-408c7f28" + ami = "${data.aws_ami.ubuntu.id}" instance_type = "t1.micro" tags { Name = "HelloWorld" diff --git a/website/source/docs/providers/aws/r/launch_configuration.html.markdown b/website/source/docs/providers/aws/r/launch_configuration.html.markdown index dfe85aa18..d6e46ac84 100644 --- a/website/source/docs/providers/aws/r/launch_configuration.html.markdown +++ b/website/source/docs/providers/aws/r/launch_configuration.html.markdown @@ -13,9 +13,22 @@ Provides a resource to create a new launch configuration, used for autoscaling g ## Example Usage ``` +data "aws_ami" "ubuntu" { + most_recent = true + filter { + name = "name" + values = ["ubuntu/images/ebs/ubuntu-trusty-14.04-amd64-server-*"] + } + filter { + name = "virtualization-type" + values = ["paravirtual"] + } + owners = ["099720109477"] # Canonical +} + resource "aws_launch_configuration" "as_conf" { name = "web_config" - image_id = "ami-408c7f28" + image_id = "${data.aws_ami.ubuntu.id}" instance_type = "t1.micro" } ``` @@ -31,9 +44,22 @@ Either omit the Launch Configuration `name` attribute, or specify a partial name with `name_prefix`. Example: ``` +data "aws_ami" "ubuntu" { + most_recent = true + filter { + name = "name" + values = ["ubuntu/images/ebs/ubuntu-trusty-14.04-amd64-server-*"] + } + filter { + name = "virtualization-type" + values = ["paravirtual"] + } + owners = ["099720109477"] # Canonical +} + resource "aws_launch_configuration" "as_conf" { name_prefix = "terraform-lc-example-" - image_id = "ami-408c7f28" + image_id = "${data.aws_ami.ubuntu.id}" instance_type = "t1.micro" lifecycle { @@ -65,8 +91,21 @@ documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-spot-in for more information or how to launch [Spot Instances][3] with Terraform. ``` +data "aws_ami" "ubuntu" { + most_recent = true + filter { + name = "name" + values = ["ubuntu/images/ebs/ubuntu-trusty-14.04-amd64-server-*"] + } + filter { + name = "virtualization-type" + values = ["paravirtual"] + } + owners = ["099720109477"] # Canonical +} + resource "aws_launch_configuration" "as_conf" { - image_id = "ami-408c7f28" + image_id = "${data.aws_ami.ubuntu.id}" instance_type = "t1.micro" spot_price = "0.001" lifecycle { From f78f0434cd66808b92b1df08850f22cf9de025ac Mon Sep 17 00:00:00 2001 From: stack72 Date: Sun, 10 Jul 2016 22:33:02 +0100 Subject: [PATCH 0197/1238] provider/azurerm: Support Import for `azurerm_availability_set` ``` % make testacc TEST=./builtin/providers/azurerm TESTARGS='-run=TestAccAzureRMAvailabilitySet_' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) TF_ACC=1 go test ./builtin/providers/azurerm -v -run=TestAccAzureRMAvailabilitySet_ -timeout 120m === RUN TestAccAzureRMAvailabilitySet_importBasic --- PASS: TestAccAzureRMAvailabilitySet_importBasic (150.75s) === RUN TestAccAzureRMAvailabilitySet_basic --- PASS: TestAccAzureRMAvailabilitySet_basic (103.37s) === RUN TestAccAzureRMAvailabilitySet_withTags --- PASS: TestAccAzureRMAvailabilitySet_withTags (137.65s) === RUN TestAccAzureRMAvailabilitySet_withDomainCounts --- PASS: TestAccAzureRMAvailabilitySet_withDomainCounts (88.78s) PASS ok github.com/hashicorp/terraform/builtin/providers/azurerm 480.564s ``` --- .../import_arm_availability_set_test.go | 36 +++++++++++++++++++ .../azurerm/resource_arm_availability_set.go | 5 +++ 2 files changed, 41 insertions(+) create mode 100644 builtin/providers/azurerm/import_arm_availability_set_test.go diff --git a/builtin/providers/azurerm/import_arm_availability_set_test.go b/builtin/providers/azurerm/import_arm_availability_set_test.go new file mode 100644 index 000000000..2b6cef679 --- /dev/null +++ b/builtin/providers/azurerm/import_arm_availability_set_test.go @@ -0,0 +1,36 @@ +package azurerm + +import ( + "testing" + + "fmt" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAzureRMAvailabilitySet_importBasic(t *testing.T) { + resourceName := "azurerm_availability_set.test" + + ri := acctest.RandInt() + config := fmt.Sprintf(testAccAzureRMVAvailabilitySet_basic, ri, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMAvailabilitySetDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: config, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"resource_group_name"}, + //this isn't returned from the API! + }, + }, + }) +} diff --git a/builtin/providers/azurerm/resource_arm_availability_set.go b/builtin/providers/azurerm/resource_arm_availability_set.go index b9baaae2e..fd6edf766 100644 --- a/builtin/providers/azurerm/resource_arm_availability_set.go +++ b/builtin/providers/azurerm/resource_arm_availability_set.go @@ -16,6 +16,9 @@ func resourceArmAvailabilitySet() *schema.Resource { Read: resourceArmAvailabilitySetRead, Update: resourceArmAvailabilitySetCreate, Delete: resourceArmAvailabilitySetDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": { @@ -125,6 +128,8 @@ func resourceArmAvailabilitySetRead(d *schema.ResourceData, meta interface{}) er availSet := *resp.Properties d.Set("platform_update_domain_count", availSet.PlatformUpdateDomainCount) d.Set("platform_fault_domain_count", availSet.PlatformFaultDomainCount) + d.Set("name", resp.Name) + d.Set("location", resp.Location) flattenAndSetTags(d, resp.Tags) From 95c0a74df7d0b0502486331703e3eed2abefb210 Mon Sep 17 00:00:00 2001 From: dkalleg Date: Sun, 10 Jul 2016 14:36:59 -0700 Subject: [PATCH 0198/1238] Adding disk keep_on_remove support on delete case (#7169) This both fixes the keep_on_remove reference in the disk update case as well as adds logic to safely eject disks in the destroy case. --- .../resource_vsphere_virtual_machine.go | 29 +++++- .../resource_vsphere_virtual_machine_test.go | 96 +++++++++++++++++++ 2 files changed, 123 insertions(+), 2 deletions(-) diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go index 30bb8924d..4998d63a4 100644 --- a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go +++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go @@ -509,8 +509,8 @@ func resourceVSphereVirtualMachineUpdate(d *schema.ResourceData, meta interface{ virtualDisk := devices.FindByKey(int32(disk["key"].(int))) keep := false - if v, ok := d.GetOk("keep_on_remove"); ok { - keep = v.(bool) + if v, ok := disk["keep_on_remove"].(bool); ok { + keep = v } err = vm.RemoveDevice(context.TODO(), keep, virtualDisk) @@ -1093,6 +1093,11 @@ func resourceVSphereVirtualMachineDelete(d *schema.ResourceData, meta interface{ if err != nil { return err } + devices, err := vm.Device(context.TODO()) + if err != nil { + log.Printf("[DEBUG] resourceVSphereVirtualMachineDelete - Failed to get device list: %v", err) + return err + } log.Printf("[INFO] Deleting virtual machine: %s", d.Id()) state, err := vm.PowerState(context.TODO()) @@ -1112,6 +1117,26 @@ func resourceVSphereVirtualMachineDelete(d *schema.ResourceData, meta interface{ } } + // Safely eject any disks the user marked as keep_on_remove + if vL, ok := d.GetOk("disk"); ok { + if diskSet, ok := vL.(*schema.Set); ok { + + for _, value := range diskSet.List() { + disk := value.(map[string]interface{}) + + if v, ok := disk["keep_on_remove"].(bool); ok && v == true { + log.Printf("[DEBUG] not destroying %v", disk["name"]) + virtualDisk := devices.FindByKey(int32(disk["key"].(int))) + err = vm.RemoveDevice(context.TODO(), true, virtualDisk) + if err != nil { + log.Printf("[ERROR] Update Remove Disk - Error removing disk: %v", err) + return err + } + } + } + } + } + task, err := vm.Destroy(context.TODO()) if err != nil { return err diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go index a69c80567..4ebeab093 100644 --- a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go +++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go @@ -1154,3 +1154,99 @@ func testAccCheckVSphereVirtualMachineExists(n string, vm *virtualMachine) resou return nil } } + +const testAccCheckVSphereVirtualMachineConfig_keepOnRemove = ` +resource "vsphere_virtual_machine" "keep_disk" { + name = "terraform-test" +` + testAccTemplateBasicBody + ` + disk { + size = 1 + iops = 500 + controller_type = "scsi" + name = "one" + keep_on_remove = true + } +} +` + +func TestAccVSphereVirtualMachine_keepOnRemove(t *testing.T) { + var vm virtualMachine + basic_vars := setupTemplateBasicBodyVars() + config := basic_vars.testSprintfTemplateBody(testAccCheckVSphereVirtualMachineConfig_keepOnRemove) + var datastore string + if v := os.Getenv("VSPHERE_DATASTORE"); v != "" { + datastore = v + } + var datacenter string + if v := os.Getenv("VSPHERE_DATACENTER"); v != "" { + datacenter = v + } + + vmName := "vsphere_virtual_machine.keep_disk" + test_exists, test_name, test_cpu, test_mem, test_num_disk, test_num_of_nic, test_nic_label := + TestFuncData{vm: vm, label: basic_vars.label, vmName: vmName, numDisks: "2"}.testCheckFuncBasic() + + log.Printf("[DEBUG] template= %s", testAccCheckVSphereVirtualMachineConfig_keepOnRemove) + log.Printf("[DEBUG] template config= %s", config) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckVSphereVirtualMachineDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: config, + Check: resource.ComposeTestCheckFunc( + test_exists, test_name, test_cpu, test_mem, test_num_disk, test_num_of_nic, test_nic_label, + ), + }, + resource.TestStep{ + Config: " ", + Check: checkForDisk(datacenter, datastore, "terraform-test", "one.vmdk"), + }, + }, + }) +} + +func checkForDisk(datacenter string, datastore string, vmName string, path string) resource.TestCheckFunc { + return func(s *terraform.State) error { + client := testAccProvider.Meta().(*govmomi.Client) + finder := find.NewFinder(client.Client, true) + + dc, err := getDatacenter(client, datacenter) + if err != nil { + return err + } + finder.SetDatacenter(dc) + + ds, err := finder.Datastore(context.TODO(), datastore) + if err != nil { + log.Printf("[ERROR] checkForDisk - Couldn't find Datastore '%v': %v", datastore, err) + return err + } + + diskPath := vmName + "/" + path + + _, err = ds.Stat(context.TODO(), diskPath) + if err != nil { + log.Printf("[ERROR] checkForDisk - Couldn't stat file '%v': %v", diskPath, err) + return err + } + + // Cleanup + fileManager := object.NewFileManager(client.Client) + task, err := fileManager.DeleteDatastoreFile(context.TODO(), ds.Path(vmName), dc) + if err != nil { + log.Printf("[ERROR] checkForDisk - Couldn't delete vm folder '%v': %v", vmName, err) + return err + } + + _, err = task.WaitForResult(context.TODO(), nil) + if err != nil { + log.Printf("[ERROR] checForDisk - Failed while deleting vm folder '%v': %v", vmName, err) + return err + } + + return nil + } +} From 1845fd8c928547e421a4aabaab795f84dc6e59d7 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Sun, 10 Jul 2016 22:39:18 +0100 Subject: [PATCH 0199/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index beaab6ddf..fa8a7660f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -136,6 +136,7 @@ IMPROVEMENTS: * provider/vsphere: Add DiskEnableUUID option to `vsphere_virtual_machine` [GH-7088] * provider/vsphere: Virtual Machine and File resources handle Read errors properley [GH-7220] * provider/vsphere: set uuid as `vsphere_virtual_machine` output [GH-4382] + * provider/vsphere: Add support for `keep_on_remove` to `vsphere_virtual_machine` [GH-7169] * provisioner/file: File provisioners may now have file content set as an attribute [GH-7561] BUG FIXES: From d70b9d334b64e8e1fdfa115d4c7055ed68d57fc8 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 11 Jul 2016 00:50:09 +0300 Subject: [PATCH 0200/1238] Revert "Adding disk keep_on_remove support on delete case" (#7572) --- .../resource_vsphere_virtual_machine.go | 29 +----- .../resource_vsphere_virtual_machine_test.go | 96 ------------------- 2 files changed, 2 insertions(+), 123 deletions(-) diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go index 4998d63a4..30bb8924d 100644 --- a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go +++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go @@ -509,8 +509,8 @@ func resourceVSphereVirtualMachineUpdate(d *schema.ResourceData, meta interface{ virtualDisk := devices.FindByKey(int32(disk["key"].(int))) keep := false - if v, ok := disk["keep_on_remove"].(bool); ok { - keep = v + if v, ok := d.GetOk("keep_on_remove"); ok { + keep = v.(bool) } err = vm.RemoveDevice(context.TODO(), keep, virtualDisk) @@ -1093,11 +1093,6 @@ func resourceVSphereVirtualMachineDelete(d *schema.ResourceData, meta interface{ if err != nil { return err } - devices, err := vm.Device(context.TODO()) - if err != nil { - log.Printf("[DEBUG] resourceVSphereVirtualMachineDelete - Failed to get device list: %v", err) - return err - } log.Printf("[INFO] Deleting virtual machine: %s", d.Id()) state, err := vm.PowerState(context.TODO()) @@ -1117,26 +1112,6 @@ func resourceVSphereVirtualMachineDelete(d *schema.ResourceData, meta interface{ } } - // Safely eject any disks the user marked as keep_on_remove - if vL, ok := d.GetOk("disk"); ok { - if diskSet, ok := vL.(*schema.Set); ok { - - for _, value := range diskSet.List() { - disk := value.(map[string]interface{}) - - if v, ok := disk["keep_on_remove"].(bool); ok && v == true { - log.Printf("[DEBUG] not destroying %v", disk["name"]) - virtualDisk := devices.FindByKey(int32(disk["key"].(int))) - err = vm.RemoveDevice(context.TODO(), true, virtualDisk) - if err != nil { - log.Printf("[ERROR] Update Remove Disk - Error removing disk: %v", err) - return err - } - } - } - } - } - task, err := vm.Destroy(context.TODO()) if err != nil { return err diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go index 4ebeab093..a69c80567 100644 --- a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go +++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go @@ -1154,99 +1154,3 @@ func testAccCheckVSphereVirtualMachineExists(n string, vm *virtualMachine) resou return nil } } - -const testAccCheckVSphereVirtualMachineConfig_keepOnRemove = ` -resource "vsphere_virtual_machine" "keep_disk" { - name = "terraform-test" -` + testAccTemplateBasicBody + ` - disk { - size = 1 - iops = 500 - controller_type = "scsi" - name = "one" - keep_on_remove = true - } -} -` - -func TestAccVSphereVirtualMachine_keepOnRemove(t *testing.T) { - var vm virtualMachine - basic_vars := setupTemplateBasicBodyVars() - config := basic_vars.testSprintfTemplateBody(testAccCheckVSphereVirtualMachineConfig_keepOnRemove) - var datastore string - if v := os.Getenv("VSPHERE_DATASTORE"); v != "" { - datastore = v - } - var datacenter string - if v := os.Getenv("VSPHERE_DATACENTER"); v != "" { - datacenter = v - } - - vmName := "vsphere_virtual_machine.keep_disk" - test_exists, test_name, test_cpu, test_mem, test_num_disk, test_num_of_nic, test_nic_label := - TestFuncData{vm: vm, label: basic_vars.label, vmName: vmName, numDisks: "2"}.testCheckFuncBasic() - - log.Printf("[DEBUG] template= %s", testAccCheckVSphereVirtualMachineConfig_keepOnRemove) - log.Printf("[DEBUG] template config= %s", config) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckVSphereVirtualMachineDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: config, - Check: resource.ComposeTestCheckFunc( - test_exists, test_name, test_cpu, test_mem, test_num_disk, test_num_of_nic, test_nic_label, - ), - }, - resource.TestStep{ - Config: " ", - Check: checkForDisk(datacenter, datastore, "terraform-test", "one.vmdk"), - }, - }, - }) -} - -func checkForDisk(datacenter string, datastore string, vmName string, path string) resource.TestCheckFunc { - return func(s *terraform.State) error { - client := testAccProvider.Meta().(*govmomi.Client) - finder := find.NewFinder(client.Client, true) - - dc, err := getDatacenter(client, datacenter) - if err != nil { - return err - } - finder.SetDatacenter(dc) - - ds, err := finder.Datastore(context.TODO(), datastore) - if err != nil { - log.Printf("[ERROR] checkForDisk - Couldn't find Datastore '%v': %v", datastore, err) - return err - } - - diskPath := vmName + "/" + path - - _, err = ds.Stat(context.TODO(), diskPath) - if err != nil { - log.Printf("[ERROR] checkForDisk - Couldn't stat file '%v': %v", diskPath, err) - return err - } - - // Cleanup - fileManager := object.NewFileManager(client.Client) - task, err := fileManager.DeleteDatastoreFile(context.TODO(), ds.Path(vmName), dc) - if err != nil { - log.Printf("[ERROR] checkForDisk - Couldn't delete vm folder '%v': %v", vmName, err) - return err - } - - _, err = task.WaitForResult(context.TODO(), nil) - if err != nil { - log.Printf("[ERROR] checForDisk - Failed while deleting vm folder '%v': %v", vmName, err) - return err - } - - return nil - } -} From 0c6856f85c1703f0c0b9191b0cb111640e86b5bc Mon Sep 17 00:00:00 2001 From: Tommy Murphy Date: Mon, 11 Jul 2016 07:09:06 -0400 Subject: [PATCH 0201/1238] digitalocean tag support (#7500) * vendor: update godo to support tags * digitalocean: introduce tag resource * website: update for digitalocean_tag resource --- builtin/providers/digitalocean/provider.go | 1 + .../resource_digitalocean_droplet.go | 21 ++ .../resource_digitalocean_droplet_test.go | 55 ++++ .../digitalocean/resource_digitalocean_tag.go | 104 ++++++++ .../resource_digitalocean_tag_test.go | 93 +++++++ builtin/providers/digitalocean/tags.go | 72 +++++ builtin/providers/digitalocean/tags_test.go | 51 ++++ .../github.com/digitalocean/godo/.travis.yml | 6 - .../digitalocean/godo/droplet_actions.go | 96 +++++++ .../github.com/digitalocean/godo/droplets.go | 145 ++++++++-- vendor/github.com/digitalocean/godo/godo.go | 59 +++- .../github.com/digitalocean/godo/storage.go | 252 ++++++++++++++++++ .../digitalocean/godo/storage_actions.go | 61 +++++ vendor/github.com/digitalocean/godo/tags.go | 226 ++++++++++++++++ vendor/vendor.json | 4 +- .../docs/providers/do/r/droplet.html.markdown | 4 +- .../docs/providers/do/r/tag.html.markdown | 36 +++ 17 files changed, 1253 insertions(+), 33 deletions(-) create mode 100644 builtin/providers/digitalocean/resource_digitalocean_tag.go create mode 100644 builtin/providers/digitalocean/resource_digitalocean_tag_test.go create mode 100644 builtin/providers/digitalocean/tags.go create mode 100644 builtin/providers/digitalocean/tags_test.go delete mode 100644 vendor/github.com/digitalocean/godo/.travis.yml create mode 100644 vendor/github.com/digitalocean/godo/storage.go create mode 100644 vendor/github.com/digitalocean/godo/storage_actions.go create mode 100644 vendor/github.com/digitalocean/godo/tags.go create mode 100644 website/source/docs/providers/do/r/tag.html.markdown diff --git a/builtin/providers/digitalocean/provider.go b/builtin/providers/digitalocean/provider.go index be197a32f..3e8771212 100644 --- a/builtin/providers/digitalocean/provider.go +++ b/builtin/providers/digitalocean/provider.go @@ -23,6 +23,7 @@ func Provider() terraform.ResourceProvider { "digitalocean_floating_ip": resourceDigitalOceanFloatingIp(), "digitalocean_record": resourceDigitalOceanRecord(), "digitalocean_ssh_key": resourceDigitalOceanSSHKey(), + "digitalocean_tag": resourceDigitalOceanTag(), }, ConfigureFunc: providerConfigure, diff --git a/builtin/providers/digitalocean/resource_digitalocean_droplet.go b/builtin/providers/digitalocean/resource_digitalocean_droplet.go index 2e07e0082..ae22b9131 100644 --- a/builtin/providers/digitalocean/resource_digitalocean_droplet.go +++ b/builtin/providers/digitalocean/resource_digitalocean_droplet.go @@ -104,6 +104,12 @@ func resourceDigitalOceanDroplet() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, }, + "tags": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "user_data": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -181,6 +187,12 @@ func resourceDigitalOceanDropletCreate(d *schema.ResourceData, meta interface{}) "Error waiting for droplet (%s) to become ready: %s", d.Id(), err) } + // droplet needs to be active in order to set tags + err = setTags(client, d) + if err != nil { + return fmt.Errorf("Error setting tags: %s", err) + } + return resourceDigitalOceanDropletRead(d, meta) } @@ -236,6 +248,8 @@ func resourceDigitalOceanDropletRead(d *schema.ResourceData, meta interface{}) e "host": findIPv4AddrByType(droplet, "public"), }) + d.Set("tags", droplet.Tags) + return nil } @@ -379,6 +393,13 @@ func resourceDigitalOceanDropletUpdate(d *schema.ResourceData, meta interface{}) } } + if d.HasChange("tags") { + err = setTags(client, d) + if err != nil { + return fmt.Errorf("Error updating tags: %s", err) + } + } + return resourceDigitalOceanDropletRead(d, meta) } diff --git a/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go b/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go index 3a72e3c5d..23485cfd6 100644 --- a/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go +++ b/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go @@ -103,6 +103,40 @@ func TestAccDigitalOceanDroplet_UpdateUserData(t *testing.T) { }) } +func TestAccDigitalOceanDroplet_UpdateTags(t *testing.T) { + var afterCreate, afterUpdate godo.Droplet + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckDigitalOceanDropletDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckDigitalOceanDropletConfig_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckDigitalOceanDropletExists("digitalocean_droplet.foobar", &afterCreate), + testAccCheckDigitalOceanDropletAttributes(&afterCreate), + ), + }, + + resource.TestStep{ + Config: testAccCheckDigitalOceanDropletConfig_tag_update, + Check: resource.ComposeTestCheckFunc( + testAccCheckDigitalOceanDropletExists("digitalocean_droplet.foobar", &afterUpdate), + resource.TestCheckResourceAttr( + "digitalocean_droplet.foobar", + "tags.#", + "1"), + resource.TestCheckResourceAttr( + "digitalocean_droplet.foobar", + "tags.0", + "barbaz"), + ), + }, + }, + }) +} + func TestAccDigitalOceanDroplet_PrivateNetworkingIpv6(t *testing.T) { var droplet godo.Droplet @@ -309,6 +343,27 @@ resource "digitalocean_droplet" "foobar" { } `, testAccValidPublicKey) +var testAccCheckDigitalOceanDropletConfig_tag_update = fmt.Sprintf(` +resource "digitalocean_tag" "barbaz" { + name = "barbaz" +} + +resource "digitalocean_ssh_key" "foobar" { + name = "foobar" + public_key = "%s" +} + +resource "digitalocean_droplet" "foobar" { + name = "foo" + size = "512mb" + image = "centos-5-8-x32" + region = "nyc3" + user_data = "foobar" + ssh_keys = ["${digitalocean_ssh_key.foobar.id}"] + tags = ["${digitalocean_tag.barbaz.id}"] +} +`, testAccValidPublicKey) + var testAccCheckDigitalOceanDropletConfig_userdata_update = fmt.Sprintf(` resource "digitalocean_ssh_key" "foobar" { name = "foobar" diff --git a/builtin/providers/digitalocean/resource_digitalocean_tag.go b/builtin/providers/digitalocean/resource_digitalocean_tag.go new file mode 100644 index 000000000..2980d29e8 --- /dev/null +++ b/builtin/providers/digitalocean/resource_digitalocean_tag.go @@ -0,0 +1,104 @@ +package digitalocean + +import ( + "fmt" + "log" + + "github.com/digitalocean/godo" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceDigitalOceanTag() *schema.Resource { + return &schema.Resource{ + Create: resourceDigitalOceanTagCreate, + Read: resourceDigitalOceanTagRead, + Update: resourceDigitalOceanTagUpdate, + Delete: resourceDigitalOceanTagDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func resourceDigitalOceanTagCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*godo.Client) + + // Build up our creation options + opts := &godo.TagCreateRequest{ + Name: d.Get("name").(string), + } + + log.Printf("[DEBUG] Tag create configuration: %#v", opts) + tag, _, err := client.Tags.Create(opts) + if err != nil { + return fmt.Errorf("Error creating tag: %s", err) + } + + d.SetId(tag.Name) + log.Printf("[INFO] Tag: %s", tag.Name) + + return resourceDigitalOceanTagRead(d, meta) +} + +func resourceDigitalOceanTagRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*godo.Client) + + tag, resp, err := client.Tags.Get(d.Id()) + if err != nil { + // If the tag is somehow already destroyed, mark as + // successfully gone + if resp != nil && resp.StatusCode == 404 { + d.SetId("") + return nil + } + + return fmt.Errorf("Error retrieving tag: %s", err) + } + + d.Set("name", tag.Name) + + return nil +} + +func resourceDigitalOceanTagUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*godo.Client) + + var newName string + if v, ok := d.GetOk("name"); ok { + newName = v.(string) + } + + log.Printf("[DEBUG] tag update name: %#v", newName) + opts := &godo.TagUpdateRequest{ + Name: newName, + } + + _, err := client.Tags.Update(d.Id(), opts) + if err != nil { + return fmt.Errorf("Failed to update tag: %s", err) + } + + d.Set("name", newName) + + return resourceDigitalOceanTagRead(d, meta) +} + +func resourceDigitalOceanTagDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*godo.Client) + + log.Printf("[INFO] Deleting tag: %s", d.Id()) + _, err := client.Tags.Delete(d.Id()) + if err != nil { + return fmt.Errorf("Error deleting tag: %s", err) + } + + d.SetId("") + return nil +} diff --git a/builtin/providers/digitalocean/resource_digitalocean_tag_test.go b/builtin/providers/digitalocean/resource_digitalocean_tag_test.go new file mode 100644 index 000000000..932c3d4c7 --- /dev/null +++ b/builtin/providers/digitalocean/resource_digitalocean_tag_test.go @@ -0,0 +1,93 @@ +package digitalocean + +import ( + "fmt" + "testing" + + "github.com/digitalocean/godo" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccDigitalOceanTag_Basic(t *testing.T) { + var tag godo.Tag + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckDigitalOceanTagDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckDigitalOceanTagConfig_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckDigitalOceanTagExists("digitalocean_tag.foobar", &tag), + testAccCheckDigitalOceanTagAttributes(&tag), + resource.TestCheckResourceAttr( + "digitalocean_tag.foobar", "name", "foobar"), + ), + }, + }, + }) +} + +func testAccCheckDigitalOceanTagDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*godo.Client) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "digitalocean_tag" { + continue + } + + // Try to find the key + _, _, err := client.Tags.Get(rs.Primary.ID) + + if err == nil { + return fmt.Errorf("Tag still exists") + } + } + + return nil +} + +func testAccCheckDigitalOceanTagAttributes(tag *godo.Tag) resource.TestCheckFunc { + return func(s *terraform.State) error { + + if tag.Name != "foobar" { + return fmt.Errorf("Bad name: %s", tag.Name) + } + + return nil + } +} + +func testAccCheckDigitalOceanTagExists(n string, tag *godo.Tag) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Record ID is set") + } + + client := testAccProvider.Meta().(*godo.Client) + + // Try to find the tag + foundTag, _, err := client.Tags.Get(rs.Primary.ID) + + if err != nil { + return err + } + + *tag = *foundTag + + return nil + } +} + +var testAccCheckDigitalOceanTagConfig_basic = fmt.Sprintf(` +resource "digitalocean_tag" "foobar" { + name = "foobar" +}`) diff --git a/builtin/providers/digitalocean/tags.go b/builtin/providers/digitalocean/tags.go new file mode 100644 index 000000000..6e952cef4 --- /dev/null +++ b/builtin/providers/digitalocean/tags.go @@ -0,0 +1,72 @@ +package digitalocean + +import ( + "log" + + "github.com/digitalocean/godo" + "github.com/hashicorp/terraform/helper/schema" +) + +// setTags is a helper to set the tags for a resource. It expects the +// tags field to be named "tags" +func setTags(conn *godo.Client, d *schema.ResourceData) error { + oraw, nraw := d.GetChange("tags") + remove, create := diffTags(tagsFromSchema(oraw), tagsFromSchema(nraw)) + + log.Printf("[DEBUG] Removing tags: %#v from %s", remove, d.Id()) + for _, tag := range remove { + _, err := conn.Tags.UntagResources(tag, &godo.UntagResourcesRequest{ + Resources: []godo.Resource{ + godo.Resource{ + ID: d.Id(), + Type: godo.DropletResourceType, + }, + }, + }) + if err != nil { + return err + } + } + + log.Printf("[DEBUG] Creating tags: %s for %s", create, d.Id()) + for _, tag := range create { + _, err := conn.Tags.TagResources(tag, &godo.TagResourcesRequest{ + Resources: []godo.Resource{ + godo.Resource{ + ID: d.Id(), + Type: godo.DropletResourceType, + }, + }, + }) + if err != nil { + return err + } + } + + return nil +} + +// tagsFromSchema takes the raw schema tags and returns them as a +// properly asserted map[string]string +func tagsFromSchema(raw interface{}) map[string]string { + result := make(map[string]string) + for _, t := range raw.([]interface{}) { + result[t.(string)] = t.(string) + } + + return result +} + +// diffTags takes the old and the new tag sets and returns the difference of +// both. The remaining tags are those that need to be removed and created +func diffTags(oldTags, newTags map[string]string) (map[string]string, map[string]string) { + for k := range oldTags { + _, ok := newTags[k] + if ok { + delete(newTags, k) + delete(oldTags, k) + } + } + + return oldTags, newTags +} diff --git a/builtin/providers/digitalocean/tags_test.go b/builtin/providers/digitalocean/tags_test.go new file mode 100644 index 000000000..02686ed2d --- /dev/null +++ b/builtin/providers/digitalocean/tags_test.go @@ -0,0 +1,51 @@ +package digitalocean + +import ( + "reflect" + "testing" +) + +func TestDiffTags(t *testing.T) { + cases := []struct { + Old, New []interface{} + Create, Remove map[string]string + }{ + // Basic add/remove + { + Old: []interface{}{ + "foo", + }, + New: []interface{}{ + "bar", + }, + Create: map[string]string{ + "bar": "bar", + }, + Remove: map[string]string{ + "foo": "foo", + }, + }, + + // Noop + { + Old: []interface{}{ + "foo", + }, + New: []interface{}{ + "foo", + }, + Create: map[string]string{}, + Remove: map[string]string{}, + }, + } + + for i, tc := range cases { + r, c := diffTags(tagsFromSchema(tc.Old), tagsFromSchema(tc.New)) + if !reflect.DeepEqual(r, tc.Remove) { + t.Fatalf("%d: bad remove: %#v", i, r) + } + if !reflect.DeepEqual(c, tc.Create) { + t.Fatalf("%d: bad create: %#v", i, c) + } + } +} diff --git a/vendor/github.com/digitalocean/godo/.travis.yml b/vendor/github.com/digitalocean/godo/.travis.yml deleted file mode 100644 index 245a2f517..000000000 --- a/vendor/github.com/digitalocean/godo/.travis.yml +++ /dev/null @@ -1,6 +0,0 @@ -language: go - -go: - - 1.3 - - 1.4 - - tip diff --git a/vendor/github.com/digitalocean/godo/droplet_actions.go b/vendor/github.com/digitalocean/godo/droplet_actions.go index 7012aee7f..c01ba36a0 100644 --- a/vendor/github.com/digitalocean/godo/droplet_actions.go +++ b/vendor/github.com/digitalocean/godo/droplet_actions.go @@ -13,22 +13,31 @@ type ActionRequest map[string]interface{} // See: https://developers.digitalocean.com/documentation/v2#droplet-actions type DropletActionsService interface { Shutdown(int) (*Action, *Response, error) + ShutdownByTag(string) (*Action, *Response, error) PowerOff(int) (*Action, *Response, error) + PowerOffByTag(string) (*Action, *Response, error) PowerOn(int) (*Action, *Response, error) + PowerOnByTag(string) (*Action, *Response, error) PowerCycle(int) (*Action, *Response, error) + PowerCycleByTag(string) (*Action, *Response, error) Reboot(int) (*Action, *Response, error) Restore(int, int) (*Action, *Response, error) Resize(int, string, bool) (*Action, *Response, error) Rename(int, string) (*Action, *Response, error) Snapshot(int, string) (*Action, *Response, error) + SnapshotByTag(string, string) (*Action, *Response, error) EnableBackups(int) (*Action, *Response, error) + EnableBackupsByTag(string) (*Action, *Response, error) DisableBackups(int) (*Action, *Response, error) + DisableBackupsByTag(string) (*Action, *Response, error) PasswordReset(int) (*Action, *Response, error) RebuildByImageID(int, int) (*Action, *Response, error) RebuildByImageSlug(int, string) (*Action, *Response, error) ChangeKernel(int, int) (*Action, *Response, error) EnableIPv6(int) (*Action, *Response, error) + EnableIPv6ByTag(string) (*Action, *Response, error) EnablePrivateNetworking(int) (*Action, *Response, error) + EnablePrivateNetworkingByTag(string) (*Action, *Response, error) Upgrade(int) (*Action, *Response, error) Get(int, int) (*Action, *Response, error) GetByURI(string) (*Action, *Response, error) @@ -48,24 +57,48 @@ func (s *DropletActionsServiceOp) Shutdown(id int) (*Action, *Response, error) { return s.doAction(id, request) } +// Shutdown Droplets by Tag +func (s *DropletActionsServiceOp) ShutdownByTag(tag string) (*Action, *Response, error) { + request := &ActionRequest{"type": "shutdown"} + return s.doActionByTag(tag, request) +} + // PowerOff a Droplet func (s *DropletActionsServiceOp) PowerOff(id int) (*Action, *Response, error) { request := &ActionRequest{"type": "power_off"} return s.doAction(id, request) } +// PowerOff a Droplet by Tag +func (s *DropletActionsServiceOp) PowerOffByTag(tag string) (*Action, *Response, error) { + request := &ActionRequest{"type": "power_off"} + return s.doActionByTag(tag, request) +} + // PowerOn a Droplet func (s *DropletActionsServiceOp) PowerOn(id int) (*Action, *Response, error) { request := &ActionRequest{"type": "power_on"} return s.doAction(id, request) } +// PowerOn a Droplet by Tag +func (s *DropletActionsServiceOp) PowerOnByTag(tag string) (*Action, *Response, error) { + request := &ActionRequest{"type": "power_on"} + return s.doActionByTag(tag, request) +} + // PowerCycle a Droplet func (s *DropletActionsServiceOp) PowerCycle(id int) (*Action, *Response, error) { request := &ActionRequest{"type": "power_cycle"} return s.doAction(id, request) } +// PowerCycle a Droplet by Tag +func (s *DropletActionsServiceOp) PowerCycleByTag(tag string) (*Action, *Response, error) { + request := &ActionRequest{"type": "power_cycle"} + return s.doActionByTag(tag, request) +} + // Reboot a Droplet func (s *DropletActionsServiceOp) Reboot(id int) (*Action, *Response, error) { request := &ActionRequest{"type": "reboot"} @@ -113,18 +146,40 @@ func (s *DropletActionsServiceOp) Snapshot(id int, name string) (*Action, *Respo return s.doAction(id, request) } +// Snapshot a Droplet by Tag +func (s *DropletActionsServiceOp) SnapshotByTag(tag string, name string) (*Action, *Response, error) { + requestType := "snapshot" + request := &ActionRequest{ + "type": requestType, + "name": name, + } + return s.doActionByTag(tag, request) +} + // EnableBackups enables backups for a droplet. func (s *DropletActionsServiceOp) EnableBackups(id int) (*Action, *Response, error) { request := &ActionRequest{"type": "enable_backups"} return s.doAction(id, request) } +// EnableBackups enables backups for a droplet by Tag +func (s *DropletActionsServiceOp) EnableBackupsByTag(tag string) (*Action, *Response, error) { + request := &ActionRequest{"type": "enable_backups"} + return s.doActionByTag(tag, request) +} + // DisableBackups disables backups for a droplet. func (s *DropletActionsServiceOp) DisableBackups(id int) (*Action, *Response, error) { request := &ActionRequest{"type": "disable_backups"} return s.doAction(id, request) } +// DisableBackups disables backups for a droplet by tag +func (s *DropletActionsServiceOp) DisableBackupsByTag(tag string) (*Action, *Response, error) { + request := &ActionRequest{"type": "disable_backups"} + return s.doActionByTag(tag, request) +} + // PasswordReset resets the password for a droplet. func (s *DropletActionsServiceOp) PasswordReset(id int) (*Action, *Response, error) { request := &ActionRequest{"type": "password_reset"} @@ -155,12 +210,24 @@ func (s *DropletActionsServiceOp) EnableIPv6(id int) (*Action, *Response, error) return s.doAction(id, request) } +// EnableIPv6 enables IPv6 for a droplet by Tag +func (s *DropletActionsServiceOp) EnableIPv6ByTag(tag string) (*Action, *Response, error) { + request := &ActionRequest{"type": "enable_ipv6"} + return s.doActionByTag(tag, request) +} + // EnablePrivateNetworking enables private networking for a droplet. func (s *DropletActionsServiceOp) EnablePrivateNetworking(id int) (*Action, *Response, error) { request := &ActionRequest{"type": "enable_private_networking"} return s.doAction(id, request) } +// EnablePrivateNetworking enables private networking for a droplet by Tag +func (s *DropletActionsServiceOp) EnablePrivateNetworkingByTag(tag string) (*Action, *Response, error) { + request := &ActionRequest{"type": "enable_private_networking"} + return s.doActionByTag(tag, request) +} + // Upgrade a droplet. func (s *DropletActionsServiceOp) Upgrade(id int) (*Action, *Response, error) { request := &ActionRequest{"type": "upgrade"} @@ -192,6 +259,31 @@ func (s *DropletActionsServiceOp) doAction(id int, request *ActionRequest) (*Act return &root.Event, resp, err } +func (s *DropletActionsServiceOp) doActionByTag(tag string, request *ActionRequest) (*Action, *Response, error) { + if tag == "" { + return nil, nil, NewArgError("tag", "cannot be empty") + } + + if request == nil { + return nil, nil, NewArgError("request", "request can't be nil") + } + + path := dropletActionPathByTag(tag) + + req, err := s.client.NewRequest("POST", path, request) + if err != nil { + return nil, nil, err + } + + root := new(actionRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + + return &root.Event, resp, err +} + // Get an action for a particular droplet by id. func (s *DropletActionsServiceOp) Get(dropletID, actionID int) (*Action, *Response, error) { if dropletID < 1 { @@ -236,3 +328,7 @@ func (s *DropletActionsServiceOp) get(path string) (*Action, *Response, error) { func dropletActionPath(dropletID int) string { return fmt.Sprintf("v2/droplets/%d/actions", dropletID) } + +func dropletActionPathByTag(tag string) string { + return fmt.Sprintf("v2/droplets/actions?tag_name=%s", tag) +} diff --git a/vendor/github.com/digitalocean/godo/droplets.go b/vendor/github.com/digitalocean/godo/droplets.go index 978d41e39..c17bc3bc5 100644 --- a/vendor/github.com/digitalocean/godo/droplets.go +++ b/vendor/github.com/digitalocean/godo/droplets.go @@ -2,20 +2,25 @@ package godo import ( "encoding/json" + "errors" "fmt" ) const dropletBasePath = "v2/droplets" +var errNoNetworks = errors.New("no networks have been defined") + // DropletsService is an interface for interfacing with the droplet // endpoints of the DigitalOcean API // See: https://developers.digitalocean.com/documentation/v2#droplets type DropletsService interface { List(*ListOptions) ([]Droplet, *Response, error) + ListByTag(string, *ListOptions) ([]Droplet, *Response, error) Get(int) (*Droplet, *Response, error) Create(*DropletCreateRequest) (*Droplet, *Response, error) CreateMultiple(*DropletMultiCreateRequest) ([]Droplet, *Response, error) Delete(int) (*Response, error) + DeleteByTag(string) (*Response, error) Kernels(int, *ListOptions) ([]Kernel, *Response, error) Snapshots(int, *ListOptions) ([]Image, *Response, error) Backups(int, *ListOptions) ([]Image, *Response, error) @@ -47,9 +52,55 @@ type Droplet struct { Locked bool `json:"locked,bool,omitempty"` Status string `json:"status,omitempty"` Networks *Networks `json:"networks,omitempty"` - ActionIDs []int `json:"action_ids,omitempty"` Created string `json:"created_at,omitempty"` - Kernel *Kernel `json:"kernel, omitempty"` + Kernel *Kernel `json:"kernel,omitempty"` + Tags []string `json:"tags,ommitempty"` + VolumeIDs []string `json:"volumes"` +} + +// PublicIPv4 returns the public IPv4 address for the Droplet. +func (d *Droplet) PublicIPv4() (string, error) { + if d.Networks == nil { + return "", errNoNetworks + } + + for _, v4 := range d.Networks.V4 { + if v4.Type == "public" { + return v4.IPAddress, nil + } + } + + return "", nil +} + +// PrivateIPv4 returns the private IPv4 address for the Droplet. +func (d *Droplet) PrivateIPv4() (string, error) { + if d.Networks == nil { + return "", errNoNetworks + } + + for _, v4 := range d.Networks.V4 { + if v4.Type == "private" { + return v4.IPAddress, nil + } + } + + return "", nil +} + +// PublicIPv6 returns the private IPv6 address for the Droplet. +func (d *Droplet) PublicIPv6() (string, error) { + if d.Networks == nil { + return "", errNoNetworks + } + + for _, v4 := range d.Networks.V6 { + if v4.Type == "public" { + return v4.IPAddress, nil + } + } + + return "", nil } // Kernel object @@ -96,6 +147,27 @@ type DropletCreateImage struct { Slug string } +// DropletCreateVolume identifies a volume to attach for the create request. It +// prefers Name over ID, +type DropletCreateVolume struct { + ID string + Name string +} + +// MarshalJSON returns an object with either the name or id of the volume. It +// returns the id if the name is empty. +func (d DropletCreateVolume) MarshalJSON() ([]byte, error) { + if d.Name != "" { + return json.Marshal(struct { + Name string `json:"name"` + }{Name: d.Name}) + } + + return json.Marshal(struct { + ID string `json:"id"` + }{ID: d.ID}) +} + // MarshalJSON returns either the slug or id of the image. It returns the id // if the slug is empty. func (d DropletCreateImage) MarshalJSON() ([]byte, error) { @@ -133,9 +205,10 @@ type DropletCreateRequest struct { IPv6 bool `json:"ipv6"` PrivateNetworking bool `json:"private_networking"` UserData string `json:"user_data,omitempty"` + Volumes []DropletCreateVolume `json:"volumes,omitempty"` } - +// DropletMultiCreateRequest is a request to create multiple droplets. type DropletMultiCreateRequest struct { Names []string `json:"names"` Region string `json:"region"` @@ -186,14 +259,8 @@ func (n NetworkV6) String() string { return Stringify(n) } -// List all droplets -func (s *DropletsServiceOp) List(opt *ListOptions) ([]Droplet, *Response, error) { - path := dropletBasePath - path, err := addOptions(path, opt) - if err != nil { - return nil, nil, err - } - +// Performs a list request given a path +func (s *DropletsServiceOp) list(path string) ([]Droplet, *Response, error) { req, err := s.client.NewRequest("GET", path, nil) if err != nil { return nil, nil, err @@ -211,6 +278,28 @@ func (s *DropletsServiceOp) List(opt *ListOptions) ([]Droplet, *Response, error) return root.Droplets, resp, err } +// List all droplets +func (s *DropletsServiceOp) List(opt *ListOptions) ([]Droplet, *Response, error) { + path := dropletBasePath + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + return s.list(path) +} + +// List all droplets by tag +func (s *DropletsServiceOp) ListByTag(tag string, opt *ListOptions) ([]Droplet, *Response, error) { + path := fmt.Sprintf("%s?tag_name=%s", dropletBasePath, tag) + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + return s.list(path) +} + // Get individual droplet func (s *DropletsServiceOp) Get(dropletID int) (*Droplet, *Response, error) { if dropletID < 1 { @@ -258,7 +347,7 @@ func (s *DropletsServiceOp) Create(createRequest *DropletCreateRequest) (*Drople return root.Droplet, resp, err } -// Create multiple droplet +// CreateMultiple creates multiple droplets. func (s *DropletsServiceOp) CreateMultiple(createRequest *DropletMultiCreateRequest) ([]Droplet, *Response, error) { if createRequest == nil { return nil, nil, NewArgError("createRequest", "cannot be nil") @@ -283,14 +372,8 @@ func (s *DropletsServiceOp) CreateMultiple(createRequest *DropletMultiCreateRequ return root.Droplets, resp, err } -// Delete droplet -func (s *DropletsServiceOp) Delete(dropletID int) (*Response, error) { - if dropletID < 1 { - return nil, NewArgError("dropletID", "cannot be less than 1") - } - - path := fmt.Sprintf("%s/%d", dropletBasePath, dropletID) - +// Performs a delete request given a path +func (s *DropletsServiceOp) delete(path string) (*Response, error) { req, err := s.client.NewRequest("DELETE", path, nil) if err != nil { return nil, err @@ -301,6 +384,28 @@ func (s *DropletsServiceOp) Delete(dropletID int) (*Response, error) { return resp, err } +// Delete droplet +func (s *DropletsServiceOp) Delete(dropletID int) (*Response, error) { + if dropletID < 1 { + return nil, NewArgError("dropletID", "cannot be less than 1") + } + + path := fmt.Sprintf("%s/%d", dropletBasePath, dropletID) + + return s.delete(path) +} + +// Delete droplets by tag +func (s *DropletsServiceOp) DeleteByTag(tag string) (*Response, error) { + if tag == "" { + return nil, NewArgError("tag", "cannot be empty") + } + + path := fmt.Sprintf("%s?tag_name=%s", dropletBasePath, tag) + + return s.delete(path) +} + // Kernels lists kernels available for a droplet. func (s *DropletsServiceOp) Kernels(dropletID int, opt *ListOptions) ([]Kernel, *Response, error) { if dropletID < 1 { diff --git a/vendor/github.com/digitalocean/godo/godo.go b/vendor/github.com/digitalocean/godo/godo.go index f57b3bca6..78fc8be27 100644 --- a/vendor/github.com/digitalocean/godo/godo.go +++ b/vendor/github.com/digitalocean/godo/godo.go @@ -22,9 +22,9 @@ const ( userAgent = "godo/" + libraryVersion mediaType = "application/json" - headerRateLimit = "X-RateLimit-Limit" - headerRateRemaining = "X-RateLimit-Remaining" - headerRateReset = "X-RateLimit-Reset" + headerRateLimit = "RateLimit-Limit" + headerRateRemaining = "RateLimit-Remaining" + headerRateReset = "RateLimit-Reset" ) // Client manages communication with DigitalOcean V2 API. @@ -55,6 +55,9 @@ type Client struct { Sizes SizesService FloatingIPs FloatingIPsService FloatingIPActions FloatingIPActionsService + Storage StorageService + StorageActions StorageActionsService + Tags TagsService // Optional function called after every successful request made to the DO APIs onRequestCompleted RequestCompletionCallback @@ -93,7 +96,10 @@ type ErrorResponse struct { Response *http.Response // Error message - Message string + Message string `json:"message"` + + // RequestID returned from the API, useful to contact support. + RequestID string `json:"request_id"` } // Rate contains the rate limit for the current client. @@ -156,10 +162,49 @@ func NewClient(httpClient *http.Client) *Client { c.Sizes = &SizesServiceOp{client: c} c.FloatingIPs = &FloatingIPsServiceOp{client: c} c.FloatingIPActions = &FloatingIPActionsServiceOp{client: c} + c.Storage = &StorageServiceOp{client: c} + c.StorageActions = &StorageActionsServiceOp{client: c} + c.Tags = &TagsServiceOp{client: c} return c } +// ClientOpt are options for New. +type ClientOpt func(*Client) error + +// New returns a new DIgitalOcean API client instance. +func New(httpClient *http.Client, opts ...ClientOpt) (*Client, error) { + c := NewClient(httpClient) + for _, opt := range opts { + if err := opt(c); err != nil { + return nil, err + } + } + + return c, nil +} + +// SetBaseURL is a client option for setting the base URL. +func SetBaseURL(bu string) ClientOpt { + return func(c *Client) error { + u, err := url.Parse(bu) + if err != nil { + return err + } + + c.BaseURL = u + return nil + } +} + +// SetUserAgent is a client option for setting the user agent. +func SetUserAgent(ua string) ClientOpt { + return func(c *Client) error { + c.UserAgent = fmt.Sprintf("%s+%s", ua, c.UserAgent) + return nil + } +} + // NewRequest creates an API request. A relative URL can be provided in urlStr, which will be resolved to the // BaseURL of the Client. Relative URLS should always be specified without a preceding slash. If specified, the // value pointed to by body is JSON encoded and included in as the request body. @@ -186,7 +231,7 @@ func (c *Client) NewRequest(method, urlStr string, body interface{}) (*http.Requ req.Header.Add("Content-Type", mediaType) req.Header.Add("Accept", mediaType) - req.Header.Add("User-Agent", userAgent) + req.Header.Add("User-Agent", c.UserAgent) return req, nil } @@ -280,6 +325,10 @@ func (c *Client) Do(req *http.Request, v interface{}) (*Response, error) { return response, err } func (r *ErrorResponse) Error() string { + if r.RequestID != "" { + return fmt.Sprintf("%v %v: %d (request %q) %v", + r.Response.Request.Method, r.Response.Request.URL, r.Response.StatusCode, r.RequestID, r.Message) + } return fmt.Sprintf("%v %v: %d %v", r.Response.Request.Method, r.Response.Request.URL, r.Response.StatusCode, r.Message) } diff --git a/vendor/github.com/digitalocean/godo/storage.go b/vendor/github.com/digitalocean/godo/storage.go new file mode 100644 index 000000000..5667ff7bd --- /dev/null +++ b/vendor/github.com/digitalocean/godo/storage.go @@ -0,0 +1,252 @@ +package godo + +import ( + "fmt" + "time" +) + +const ( + storageBasePath = "v2" + storageAllocPath = storageBasePath + "/volumes" + storageSnapPath = storageBasePath + "/snapshots" +) + +// StorageService is an interface for interfacing with the storage +// endpoints of the Digital Ocean API. +// See: https://developers.digitalocean.com/documentation/v2#storage +type StorageService interface { + ListVolumes(*ListOptions) ([]Volume, *Response, error) + GetVolume(string) (*Volume, *Response, error) + CreateVolume(*VolumeCreateRequest) (*Volume, *Response, error) + DeleteVolume(string) (*Response, error) +} + +// BetaStorageService is an interface for the storage services that are +// not yet stable. The interface is not exposed in the godo.Client and +// requires type-asserting the `StorageService` to make it available. +// +// Note that Beta features will change and compiling against those +// symbols (using type-assertion) is prone to breaking your build +// if you use our master. +type BetaStorageService interface { + StorageService + + ListSnapshots(volumeID string, opts *ListOptions) ([]Snapshot, *Response, error) + GetSnapshot(string) (*Snapshot, *Response, error) + CreateSnapshot(*SnapshotCreateRequest) (*Snapshot, *Response, error) + DeleteSnapshot(string) (*Response, error) +} + +// StorageServiceOp handles communication with the storage volumes related methods of the +// DigitalOcean API. +type StorageServiceOp struct { + client *Client +} + +var _ StorageService = &StorageServiceOp{} + +// Volume represents a Digital Ocean block store volume. +type Volume struct { + ID string `json:"id"` + Region *Region `json:"region"` + Name string `json:"name"` + SizeGigaBytes int64 `json:"size_gigabytes"` + Description string `json:"description"` + DropletIDs []int `json:"droplet_ids"` + CreatedAt time.Time `json:"created_at"` +} + +func (f Volume) String() string { + return Stringify(f) +} + +type storageVolumesRoot struct { + Volumes []Volume `json:"volumes"` + Links *Links `json:"links"` +} + +type storageVolumeRoot struct { + Volume *Volume `json:"volume"` + Links *Links `json:"links,omitempty"` +} + +// VolumeCreateRequest represents a request to create a block store +// volume. +type VolumeCreateRequest struct { + Region string `json:"region"` + Name string `json:"name"` + Description string `json:"description"` + SizeGigaBytes int64 `json:"size_gigabytes"` +} + +// ListVolumes lists all storage volumes. +func (svc *StorageServiceOp) ListVolumes(opt *ListOptions) ([]Volume, *Response, error) { + path, err := addOptions(storageAllocPath, opt) + if err != nil { + return nil, nil, err + } + + req, err := svc.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + root := new(storageVolumesRoot) + resp, err := svc.client.Do(req, root) + if err != nil { + return nil, resp, err + } + + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Volumes, resp, nil +} + +// CreateVolume creates a storage volume. The name must be unique. +func (svc *StorageServiceOp) CreateVolume(createRequest *VolumeCreateRequest) (*Volume, *Response, error) { + path := storageAllocPath + + req, err := svc.client.NewRequest("POST", path, createRequest) + if err != nil { + return nil, nil, err + } + + root := new(storageVolumeRoot) + resp, err := svc.client.Do(req, root) + if err != nil { + return nil, resp, err + } + return root.Volume, resp, nil +} + +// GetVolume retrieves an individual storage volume. +func (svc *StorageServiceOp) GetVolume(id string) (*Volume, *Response, error) { + path := fmt.Sprintf("%s/%s", storageAllocPath, id) + + req, err := svc.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + root := new(storageVolumeRoot) + resp, err := svc.client.Do(req, root) + if err != nil { + return nil, resp, err + } + + return root.Volume, resp, nil +} + +// DeleteVolume deletes a storage volume. +func (svc *StorageServiceOp) DeleteVolume(id string) (*Response, error) { + path := fmt.Sprintf("%s/%s", storageAllocPath, id) + + req, err := svc.client.NewRequest("DELETE", path, nil) + if err != nil { + return nil, err + } + return svc.client.Do(req, nil) +} + +// Snapshot represents a Digital Ocean block store snapshot. +type Snapshot struct { + ID string `json:"id"` + VolumeID string `json:"volume_id"` + Region *Region `json:"region"` + Name string `json:"name"` + SizeGigaBytes int64 `json:"size_gigabytes"` + Description string `json:"description"` + CreatedAt time.Time `json:"created_at"` +} + +type storageSnapsRoot struct { + Snapshots []Snapshot `json:"snapshots"` + Links *Links `json:"links"` +} + +type storageSnapRoot struct { + Snapshot *Snapshot `json:"snapshot"` + Links *Links `json:"links,omitempty"` +} + +// SnapshotCreateRequest represents a request to create a block store +// volume. +type SnapshotCreateRequest struct { + VolumeID string `json:"volume_id"` + Name string `json:"name"` + Description string `json:"description"` +} + +// ListSnapshots lists all snapshots related to a storage volume. +func (svc *StorageServiceOp) ListSnapshots(volumeID string, opt *ListOptions) ([]Snapshot, *Response, error) { + path := fmt.Sprintf("%s/%s/snapshots", storageAllocPath, volumeID) + path, err := addOptions(path, opt) + if err != nil { + return nil, nil, err + } + + req, err := svc.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + root := new(storageSnapsRoot) + resp, err := svc.client.Do(req, root) + if err != nil { + return nil, resp, err + } + + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Snapshots, resp, nil +} + +// CreateSnapshot creates a snapshot of a storage volume. +func (svc *StorageServiceOp) CreateSnapshot(createRequest *SnapshotCreateRequest) (*Snapshot, *Response, error) { + path := fmt.Sprintf("%s/%s/snapshots", storageAllocPath, createRequest.VolumeID) + + req, err := svc.client.NewRequest("POST", path, createRequest) + if err != nil { + return nil, nil, err + } + + root := new(storageSnapRoot) + resp, err := svc.client.Do(req, root) + if err != nil { + return nil, resp, err + } + return root.Snapshot, resp, nil +} + +// GetSnapshot retrieves an individual snapshot. +func (svc *StorageServiceOp) GetSnapshot(id string) (*Snapshot, *Response, error) { + path := fmt.Sprintf("%s/%s", storageSnapPath, id) + + req, err := svc.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + root := new(storageSnapRoot) + resp, err := svc.client.Do(req, root) + if err != nil { + return nil, resp, err + } + + return root.Snapshot, resp, nil +} + +// DeleteSnapshot deletes a snapshot. +func (svc *StorageServiceOp) DeleteSnapshot(id string) (*Response, error) { + path := fmt.Sprintf("%s/%s", storageSnapPath, id) + + req, err := svc.client.NewRequest("DELETE", path, nil) + if err != nil { + return nil, err + } + return svc.client.Do(req, nil) +} diff --git a/vendor/github.com/digitalocean/godo/storage_actions.go b/vendor/github.com/digitalocean/godo/storage_actions.go new file mode 100644 index 000000000..20dc4aa59 --- /dev/null +++ b/vendor/github.com/digitalocean/godo/storage_actions.go @@ -0,0 +1,61 @@ +package godo + +import "fmt" + +// StorageActionsService is an interface for interfacing with the +// storage actions endpoints of the Digital Ocean API. +// See: https://developers.digitalocean.com/documentation/v2#storage-actions +type StorageActionsService interface { + Attach(volumeID string, dropletID int) (*Action, *Response, error) + Detach(volumeID string) (*Action, *Response, error) +} + +// StorageActionsServiceOp handles communication with the floating IPs +// action related methods of the DigitalOcean API. +type StorageActionsServiceOp struct { + client *Client +} + +// StorageAttachment represents the attachement of a block storage +// volume to a specific droplet under the device name. +type StorageAttachment struct { + DropletID int `json:"droplet_id"` +} + +// Attach a storage volume to a droplet. +func (s *StorageActionsServiceOp) Attach(volumeID string, dropletID int) (*Action, *Response, error) { + request := &ActionRequest{ + "type": "attach", + "droplet_id": dropletID, + } + return s.doAction(volumeID, request) +} + +// Detach a storage volume from a droplet. +func (s *StorageActionsServiceOp) Detach(volumeID string) (*Action, *Response, error) { + request := &ActionRequest{ + "type": "detach", + } + return s.doAction(volumeID, request) +} + +func (s *StorageActionsServiceOp) doAction(volumeID string, request *ActionRequest) (*Action, *Response, error) { + path := storageAllocationActionPath(volumeID) + + req, err := s.client.NewRequest("POST", path, request) + if err != nil { + return nil, nil, err + } + + root := new(actionRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + + return &root.Event, resp, err +} + +func storageAllocationActionPath(volumeID string) string { + return fmt.Sprintf("%s/%s/actions", storageAllocPath, volumeID) +} diff --git a/vendor/github.com/digitalocean/godo/tags.go b/vendor/github.com/digitalocean/godo/tags.go new file mode 100644 index 000000000..dd6c638ec --- /dev/null +++ b/vendor/github.com/digitalocean/godo/tags.go @@ -0,0 +1,226 @@ +package godo + +import "fmt" + +const tagsBasePath = "v2/tags" + +// TagsService is an interface for interfacing with the tags +// endpoints of the DigitalOcean API +// See: https://developers.digitalocean.com/documentation/v2#tags +type TagsService interface { + List(*ListOptions) ([]Tag, *Response, error) + Get(string) (*Tag, *Response, error) + Create(*TagCreateRequest) (*Tag, *Response, error) + Update(string, *TagUpdateRequest) (*Response, error) + Delete(string) (*Response, error) + + TagResources(string, *TagResourcesRequest) (*Response, error) + UntagResources(string, *UntagResourcesRequest) (*Response, error) +} + +// TagsServiceOp handles communication with tag related method of the +// DigitalOcean API. +type TagsServiceOp struct { + client *Client +} + +var _ TagsService = &TagsServiceOp{} + +// ResourceType represents a class of resource, currently only droplet are supported +type ResourceType string + +const ( + DropletResourceType ResourceType = "droplet" +) + +// Resource represent a single resource for associating/disassociating with tags +type Resource struct { + ID string `json:"resource_id,omit_empty"` + Type ResourceType `json:"resource_type,omit_empty"` +} + +// TaggedResources represent the set of resources a tag is attached to +type TaggedResources struct { + Droplets *TaggedDropletsResources `json:"droplets,omitempty"` +} + +// TaggedDropletsResources represent the droplet resources a tag is attached to +type TaggedDropletsResources struct { + Count int `json:"count,float64,omitempty"` + LastTagged *Droplet `json:"last_tagged,omitempty"` +} + +// Tag represent DigitalOcean tag +type Tag struct { + Name string `json:"name,omitempty"` + Resources *TaggedResources `json:"resources,omitempty"` +} + +type TagCreateRequest struct { + Name string `json:"name"` +} + +type TagUpdateRequest struct { + Name string `json:"name"` +} + +type TagResourcesRequest struct { + Resources []Resource `json:"resources"` +} + +type UntagResourcesRequest struct { + Resources []Resource `json:"resources"` +} + +type tagsRoot struct { + Tags []Tag `json:"tags"` + Links *Links `json:"links"` +} + +type tagRoot struct { + Tag *Tag `json:"tag"` +} + +// List all tags +func (s *TagsServiceOp) List(opt *ListOptions) ([]Tag, *Response, error) { + path := tagsBasePath + path, err := addOptions(path, opt) + + if err != nil { + return nil, nil, err + } + + req, err := s.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + root := new(tagsRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + if l := root.Links; l != nil { + resp.Links = l + } + + return root.Tags, resp, err +} + +// Get a single tag +func (s *TagsServiceOp) Get(name string) (*Tag, *Response, error) { + path := fmt.Sprintf("%s/%s", tagsBasePath, name) + + req, err := s.client.NewRequest("GET", path, nil) + if err != nil { + return nil, nil, err + } + + root := new(tagRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + + return root.Tag, resp, err +} + +// Create a new tag +func (s *TagsServiceOp) Create(createRequest *TagCreateRequest) (*Tag, *Response, error) { + if createRequest == nil { + return nil, nil, NewArgError("createRequest", "cannot be nil") + } + + req, err := s.client.NewRequest("POST", tagsBasePath, createRequest) + if err != nil { + return nil, nil, err + } + + root := new(tagRoot) + resp, err := s.client.Do(req, root) + if err != nil { + return nil, resp, err + } + + return root.Tag, resp, err +} + +// Update an exsting tag +func (s *TagsServiceOp) Update(name string, updateRequest *TagUpdateRequest) (*Response, error) { + if name == "" { + return nil, NewArgError("name", "cannot be empty") + } + + if updateRequest == nil { + return nil, NewArgError("updateRequest", "cannot be nil") + } + + path := fmt.Sprintf("%s/%s", tagsBasePath, name) + req, err := s.client.NewRequest("PUT", path, updateRequest) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + + return resp, err +} + +// Delete an existing tag +func (s *TagsServiceOp) Delete(name string) (*Response, error) { + if name == "" { + return nil, NewArgError("name", "cannot be empty") + } + + path := fmt.Sprintf("%s/%s", tagsBasePath, name) + req, err := s.client.NewRequest("DELETE", path, nil) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + + return resp, err +} + +// Associate resources with a tag +func (s *TagsServiceOp) TagResources(name string, tagRequest *TagResourcesRequest) (*Response, error) { + if name == "" { + return nil, NewArgError("name", "cannot be empty") + } + + if tagRequest == nil { + return nil, NewArgError("tagRequest", "cannot be nil") + } + + path := fmt.Sprintf("%s/%s/resources", tagsBasePath, name) + req, err := s.client.NewRequest("POST", path, tagRequest) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + + return resp, err +} + +// Dissociate resources with a tag +func (s *TagsServiceOp) UntagResources(name string, untagRequest *UntagResourcesRequest) (*Response, error) { + if name == "" { + return nil, NewArgError("name", "cannot be empty") + } + + if untagRequest == nil { + return nil, NewArgError("tagRequest", "cannot be nil") + } + + path := fmt.Sprintf("%s/%s/resources", tagsBasePath, name) + req, err := s.client.NewRequest("DELETE", path, untagRequest) + if err != nil { + return nil, err + } + + resp, err := s.client.Do(req, nil) + + return resp, err +} diff --git a/vendor/vendor.json b/vendor/vendor.json index d9635eb2a..205d5b5a1 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -625,9 +625,11 @@ "revisionTime": "2016-06-17T17:01:58Z" }, { + "checksumSHA1": "mbMr6wMbQnMrfIwUtej8QcGsx0A=", "comment": "v0.9.0-20-gf75d769", "path": "github.com/digitalocean/godo", - "revision": "f75d769b07edce8a73682dcf325b4404f366ab3d" + "revision": "e03ac28c3d9b216f7e9ed16bc6aa39e344d56491", + "revisionTime": "2016-06-27T19:55:12Z" }, { "path": "github.com/dylanmei/iso8601", diff --git a/website/source/docs/providers/do/r/droplet.html.markdown b/website/source/docs/providers/do/r/droplet.html.markdown index 053ab74a1..f9aab4f68 100644 --- a/website/source/docs/providers/do/r/droplet.html.markdown +++ b/website/source/docs/providers/do/r/droplet.html.markdown @@ -44,6 +44,8 @@ The following arguments are supported: the format `[12345, 123456]`. To retrieve this info, use a tool such as `curl` with the [DigitalOcean API](https://developers.digitalocean.com/#keys), to retrieve them. +* `tags` - (Optional) A list of the tags to label this droplet. A tag resource + must exist before it can be associated with a droplet. * `user_data` (Optional) - A string of the desired User Data for the Droplet. User Data is currently only available in regions with metadata listed in their features. @@ -65,4 +67,4 @@ The following attributes are exported: * `private_networking` - Is private networking enabled * `size` - The instance size * `status` - The status of the droplet - +* `tags` - The tags associated with the droplet diff --git a/website/source/docs/providers/do/r/tag.html.markdown b/website/source/docs/providers/do/r/tag.html.markdown new file mode 100644 index 000000000..562e400c6 --- /dev/null +++ b/website/source/docs/providers/do/r/tag.html.markdown @@ -0,0 +1,36 @@ +--- +layout: "digitalocean" +page_title: "DigitalOcean: digitalocean_tag" +sidebar_current: "docs-do-resource-tag" +description: |- + Provides a DigitalOcean Tag resource. +--- + +# digitalocean\_tag + +Provides a DigitalOcean Tag resource. A Tag is a label that can be applied to a +droplet resource in order to better organize or facilitate the lookups and +actions on it. Tags created with this resource can be referenced in your droplet +configuration via their ID or name. + +## Example Usage + +``` +# Create a new SSH key +resource "digitalocean_tag" "default" { + name = "foobar" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the tag + +## Attributes Reference + +The following attributes are exported: + +* `id` - The name of the tag +* `name` - The name of the tag From e4ff7649d85015a740cf5703ba349daaea74e6fe Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 11 Jul 2016 12:09:49 +0100 Subject: [PATCH 0202/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fa8a7660f..d3418109a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -65,6 +65,7 @@ FEATURES: * **New Resource:** `github_repository_collaborator` [GH-6861] * **New Resource:** `azurerm_virtual_machine_scale_set` [GH-6711] * **New Resource:** `datadog_timeboard` [GH-6900] + * **New Resource:** `digitalocean_tag` [GH-7500] * core: Tainted resources now show up in the plan and respect dependency ordering [GH-6600] * core: The `lookup` interpolation function can now have a default fall-back value specified [GH-6884] * core: The `terraform plan` command no longer persists state. [GH-6811] From ef707a89a809d079f0e975ed846ecdb1d5e35179 Mon Sep 17 00:00:00 2001 From: stack72 Date: Mon, 11 Jul 2016 12:30:22 +0100 Subject: [PATCH 0203/1238] provider/azurerm: Support Import of `azurerm_resource_group` ``` % make testacc TEST=./builtin/providers/azurerm TESTARGS='-run=TestAccAzureRMResourceGroup_' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) TF_ACC=1 go test ./builtin/providers/azurerm -v -run=TestAccAzureRMResourceGroup_ -timeout 120m === RUN TestAccAzureRMResourceGroup_importBasic --- PASS: TestAccAzureRMResourceGroup_importBasic (92.84s) === RUN TestAccAzureRMResourceGroup_basic --- PASS: TestAccAzureRMResourceGroup_basic (91.56s) === RUN TestAccAzureRMResourceGroup_withTags --- PASS: TestAccAzureRMResourceGroup_withTags (110.42s) PASS ok github.com/hashicorp/terraform/builtin/providers/azurerm 294.832s ``` --- .../azurerm/import_arm_resource_group_test.go | 35 +++++++++++++++++++ .../azurerm/resource_arm_resource_group.go | 3 ++ 2 files changed, 38 insertions(+) create mode 100644 builtin/providers/azurerm/import_arm_resource_group_test.go diff --git a/builtin/providers/azurerm/import_arm_resource_group_test.go b/builtin/providers/azurerm/import_arm_resource_group_test.go new file mode 100644 index 000000000..08815a911 --- /dev/null +++ b/builtin/providers/azurerm/import_arm_resource_group_test.go @@ -0,0 +1,35 @@ +package azurerm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAzureRMResourceGroup_importBasic(t *testing.T) { + resourceName := "azurerm_resource_group.test" + + ri := acctest.RandInt() + config := fmt.Sprintf(testAccAzureRMResourceGroup_basic, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMResourceGroupDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: config, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + //ImportStateVerifyIgnore: []string{"resource_group_name"}, + //this isn't returned from the API! + }, + }, + }) +} diff --git a/builtin/providers/azurerm/resource_arm_resource_group.go b/builtin/providers/azurerm/resource_arm_resource_group.go index 9770bf5f5..4f4382783 100644 --- a/builtin/providers/azurerm/resource_arm_resource_group.go +++ b/builtin/providers/azurerm/resource_arm_resource_group.go @@ -17,6 +17,9 @@ func resourceArmResourceGroup() *schema.Resource { Update: resourceArmResourceGroupUpdate, Exists: resourceArmResourceGroupExists, Delete: resourceArmResourceGroupDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ From f989009448150526caa2cf5c92d8485d51c6f33b Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 11 Jul 2016 13:33:39 +0100 Subject: [PATCH 0204/1238] Update import_arm_resource_group_test.go --- builtin/providers/azurerm/import_arm_resource_group_test.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/builtin/providers/azurerm/import_arm_resource_group_test.go b/builtin/providers/azurerm/import_arm_resource_group_test.go index 08815a911..aadf4817e 100644 --- a/builtin/providers/azurerm/import_arm_resource_group_test.go +++ b/builtin/providers/azurerm/import_arm_resource_group_test.go @@ -27,8 +27,6 @@ func TestAccAzureRMResourceGroup_importBasic(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - //ImportStateVerifyIgnore: []string{"resource_group_name"}, - //this isn't returned from the API! }, }, }) From c9476ea65b0b989a42506f106a507c1fc292e8b9 Mon Sep 17 00:00:00 2001 From: stack72 Date: Mon, 11 Jul 2016 14:32:03 +0100 Subject: [PATCH 0205/1238] provider/azurerm: Support Import for `azurerm_public_ip` Had to make some changes to this resource. Params were not being set in the Read func - also added a statefunc to the IPAddressAllocation as that was coming back in a different case to how we were sending it. We need to treat that property as case-insensitive ``` % make testacc TEST=./builtin/providers/azurerm TESTARGS='-run=TestAccAzureRMPublicIpStatic_' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) TF_ACC=1 go test ./builtin/providers/azurerm -v -run=TestAccAzureRMPublicIpStatic_ -timeout 120m === RUN TestAccAzureRMPublicIpStatic_importBasic --- PASS: TestAccAzureRMPublicIpStatic_importBasic (128.06s) === RUN TestAccAzureRMPublicIpStatic_basic --- PASS: TestAccAzureRMPublicIpStatic_basic (126.25s) === RUN TestAccAzureRMPublicIpStatic_withTags --- PASS: TestAccAzureRMPublicIpStatic_withTags (145.99s) === RUN TestAccAzureRMPublicIpStatic_update --- PASS: TestAccAzureRMPublicIpStatic_update (192.32s) PASS ok github.com/hashicorp/terraform/builtin/providers/azurerm 592.648s ``` --- .../azurerm/import_arm_public_ip_test.go | 34 +++++++++++++++++++ .../azurerm/resource_arm_public_ip.go | 10 ++++++ 2 files changed, 44 insertions(+) create mode 100644 builtin/providers/azurerm/import_arm_public_ip_test.go diff --git a/builtin/providers/azurerm/import_arm_public_ip_test.go b/builtin/providers/azurerm/import_arm_public_ip_test.go new file mode 100644 index 000000000..f911aa309 --- /dev/null +++ b/builtin/providers/azurerm/import_arm_public_ip_test.go @@ -0,0 +1,34 @@ +package azurerm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAzureRMPublicIpStatic_importBasic(t *testing.T) { + resourceName := "azurerm_public_ip.test" + + ri := acctest.RandInt() + config := fmt.Sprintf(testAccAzureRMVPublicIpStatic_basic, ri, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMPublicIpDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: config, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"resource_group_name"}, + }, + }, + }) +} diff --git a/builtin/providers/azurerm/resource_arm_public_ip.go b/builtin/providers/azurerm/resource_arm_public_ip.go index bde231e3d..2f94add62 100644 --- a/builtin/providers/azurerm/resource_arm_public_ip.go +++ b/builtin/providers/azurerm/resource_arm_public_ip.go @@ -17,6 +17,9 @@ func resourceArmPublicIp() *schema.Resource { Read: resourceArmPublicIpRead, Update: resourceArmPublicIpCreate, Delete: resourceArmPublicIpDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": { @@ -42,6 +45,9 @@ func resourceArmPublicIp() *schema.Resource { Type: schema.TypeString, Required: true, ValidateFunc: validatePublicIpAllocation, + StateFunc: func(val interface{}) string { + return strings.ToLower(val.(string)) + }, }, "idle_timeout_in_minutes": { @@ -167,6 +173,10 @@ func resourceArmPublicIpRead(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("Error making Read request on Azure public ip %s: %s", name, err) } + d.Set("location", resp.Location) + d.Set("name", resp.Name) + d.Set("public_ip_address_allocation", strings.ToLower(string(resp.Properties.PublicIPAllocationMethod))) + if resp.Properties.DNSSettings != nil && resp.Properties.DNSSettings.Fqdn != nil && *resp.Properties.DNSSettings.Fqdn != "" { d.Set("fqdn", resp.Properties.DNSSettings.Fqdn) } From 784a1060e994d072c5b41e6bef6c1847813813ad Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 11 Jul 2016 15:48:34 +0100 Subject: [PATCH 0206/1238] Revert "Revert "Adding disk keep_on_remove support on delete case"" --- .../resource_vsphere_virtual_machine.go | 29 +++++- .../resource_vsphere_virtual_machine_test.go | 96 +++++++++++++++++++ 2 files changed, 123 insertions(+), 2 deletions(-) diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go index 30bb8924d..4998d63a4 100644 --- a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go +++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go @@ -509,8 +509,8 @@ func resourceVSphereVirtualMachineUpdate(d *schema.ResourceData, meta interface{ virtualDisk := devices.FindByKey(int32(disk["key"].(int))) keep := false - if v, ok := d.GetOk("keep_on_remove"); ok { - keep = v.(bool) + if v, ok := disk["keep_on_remove"].(bool); ok { + keep = v } err = vm.RemoveDevice(context.TODO(), keep, virtualDisk) @@ -1093,6 +1093,11 @@ func resourceVSphereVirtualMachineDelete(d *schema.ResourceData, meta interface{ if err != nil { return err } + devices, err := vm.Device(context.TODO()) + if err != nil { + log.Printf("[DEBUG] resourceVSphereVirtualMachineDelete - Failed to get device list: %v", err) + return err + } log.Printf("[INFO] Deleting virtual machine: %s", d.Id()) state, err := vm.PowerState(context.TODO()) @@ -1112,6 +1117,26 @@ func resourceVSphereVirtualMachineDelete(d *schema.ResourceData, meta interface{ } } + // Safely eject any disks the user marked as keep_on_remove + if vL, ok := d.GetOk("disk"); ok { + if diskSet, ok := vL.(*schema.Set); ok { + + for _, value := range diskSet.List() { + disk := value.(map[string]interface{}) + + if v, ok := disk["keep_on_remove"].(bool); ok && v == true { + log.Printf("[DEBUG] not destroying %v", disk["name"]) + virtualDisk := devices.FindByKey(int32(disk["key"].(int))) + err = vm.RemoveDevice(context.TODO(), true, virtualDisk) + if err != nil { + log.Printf("[ERROR] Update Remove Disk - Error removing disk: %v", err) + return err + } + } + } + } + } + task, err := vm.Destroy(context.TODO()) if err != nil { return err diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go index a69c80567..4ebeab093 100644 --- a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go +++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go @@ -1154,3 +1154,99 @@ func testAccCheckVSphereVirtualMachineExists(n string, vm *virtualMachine) resou return nil } } + +const testAccCheckVSphereVirtualMachineConfig_keepOnRemove = ` +resource "vsphere_virtual_machine" "keep_disk" { + name = "terraform-test" +` + testAccTemplateBasicBody + ` + disk { + size = 1 + iops = 500 + controller_type = "scsi" + name = "one" + keep_on_remove = true + } +} +` + +func TestAccVSphereVirtualMachine_keepOnRemove(t *testing.T) { + var vm virtualMachine + basic_vars := setupTemplateBasicBodyVars() + config := basic_vars.testSprintfTemplateBody(testAccCheckVSphereVirtualMachineConfig_keepOnRemove) + var datastore string + if v := os.Getenv("VSPHERE_DATASTORE"); v != "" { + datastore = v + } + var datacenter string + if v := os.Getenv("VSPHERE_DATACENTER"); v != "" { + datacenter = v + } + + vmName := "vsphere_virtual_machine.keep_disk" + test_exists, test_name, test_cpu, test_mem, test_num_disk, test_num_of_nic, test_nic_label := + TestFuncData{vm: vm, label: basic_vars.label, vmName: vmName, numDisks: "2"}.testCheckFuncBasic() + + log.Printf("[DEBUG] template= %s", testAccCheckVSphereVirtualMachineConfig_keepOnRemove) + log.Printf("[DEBUG] template config= %s", config) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckVSphereVirtualMachineDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: config, + Check: resource.ComposeTestCheckFunc( + test_exists, test_name, test_cpu, test_mem, test_num_disk, test_num_of_nic, test_nic_label, + ), + }, + resource.TestStep{ + Config: " ", + Check: checkForDisk(datacenter, datastore, "terraform-test", "one.vmdk"), + }, + }, + }) +} + +func checkForDisk(datacenter string, datastore string, vmName string, path string) resource.TestCheckFunc { + return func(s *terraform.State) error { + client := testAccProvider.Meta().(*govmomi.Client) + finder := find.NewFinder(client.Client, true) + + dc, err := getDatacenter(client, datacenter) + if err != nil { + return err + } + finder.SetDatacenter(dc) + + ds, err := finder.Datastore(context.TODO(), datastore) + if err != nil { + log.Printf("[ERROR] checkForDisk - Couldn't find Datastore '%v': %v", datastore, err) + return err + } + + diskPath := vmName + "/" + path + + _, err = ds.Stat(context.TODO(), diskPath) + if err != nil { + log.Printf("[ERROR] checkForDisk - Couldn't stat file '%v': %v", diskPath, err) + return err + } + + // Cleanup + fileManager := object.NewFileManager(client.Client) + task, err := fileManager.DeleteDatastoreFile(context.TODO(), ds.Path(vmName), dc) + if err != nil { + log.Printf("[ERROR] checkForDisk - Couldn't delete vm folder '%v': %v", vmName, err) + return err + } + + _, err = task.WaitForResult(context.TODO(), nil) + if err != nil { + log.Printf("[ERROR] checForDisk - Failed while deleting vm folder '%v': %v", vmName, err) + return err + } + + return nil + } +} From fc838be69e411e47d732efb3e52dba8bac91c64e Mon Sep 17 00:00:00 2001 From: JB Arsenault Date: Mon, 11 Jul 2016 11:03:02 -0400 Subject: [PATCH 0207/1238] Add `destroy_grace_seconds` option to stop container before delete (#7513) --- builtin/providers/docker/resource_docker_container.go | 5 +++++ .../providers/docker/resource_docker_container_funcs.go | 8 ++++++++ .../providers/docker/resource_docker_container_test.go | 1 + .../docs/providers/docker/r/container.html.markdown | 1 + 4 files changed, 15 insertions(+) diff --git a/builtin/providers/docker/resource_docker_container.go b/builtin/providers/docker/resource_docker_container.go index 604b116ea..4e61bc2a2 100644 --- a/builtin/providers/docker/resource_docker_container.go +++ b/builtin/providers/docker/resource_docker_container.go @@ -285,6 +285,11 @@ func resourceDockerContainer() *schema.Resource { ForceNew: true, }, + "destroy_grace_seconds": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + "labels": &schema.Schema{ Type: schema.TypeMap, Optional: true, diff --git a/builtin/providers/docker/resource_docker_container_funcs.go b/builtin/providers/docker/resource_docker_container_funcs.go index a2aa1479f..9668fd0a7 100644 --- a/builtin/providers/docker/resource_docker_container_funcs.go +++ b/builtin/providers/docker/resource_docker_container_funcs.go @@ -265,6 +265,14 @@ func resourceDockerContainerUpdate(d *schema.ResourceData, meta interface{}) err func resourceDockerContainerDelete(d *schema.ResourceData, meta interface{}) error { client := meta.(*dc.Client) + // Stop the container before removing if destroy_grace_seconds is defined + if d.Get("destroy_grace_seconds").(int) > 0 { + var timeout = uint(d.Get("destroy_grace_seconds").(int)) + if err := client.StopContainer(d.Id(), timeout); err != nil { + return fmt.Errorf("Error stopping container %s: %s", d.Id(), err) + } + } + removeOpts := dc.RemoveContainerOptions{ ID: d.Id(), RemoveVolumes: true, diff --git a/builtin/providers/docker/resource_docker_container_test.go b/builtin/providers/docker/resource_docker_container_test.go index a3d7e9254..1c4da8cdd 100644 --- a/builtin/providers/docker/resource_docker_container_test.go +++ b/builtin/providers/docker/resource_docker_container_test.go @@ -255,6 +255,7 @@ resource "docker_container" "foo" { entrypoint = ["/bin/bash", "-c", "ping localhost"] user = "root:root" restart = "on-failure" + destroy_grace_seconds = 10 max_retry_count = 5 memory = 512 memory_swap = 2048 diff --git a/website/source/docs/providers/docker/r/container.html.markdown b/website/source/docs/providers/docker/r/container.html.markdown index cd4586b11..c4beac8cc 100644 --- a/website/source/docs/providers/docker/r/container.html.markdown +++ b/website/source/docs/providers/docker/r/container.html.markdown @@ -79,6 +79,7 @@ The following arguments are supported: * `network_mode` - (Optional, string) Network mode of the container. * `networks` - (Optional, set of strings) Id of the networks in which the container is. +* `destroy_grace_seconds` - (Optional, int) If defined will attempt to stop the container before destroying. Container will be destroyed after `n` seconds or on successful stop. ### Ports From cfb42e07ed232cd8eb0233f54e7bc2285905c6c7 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 11 Jul 2016 16:05:57 +0100 Subject: [PATCH 0208/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d3418109a..0c3a12b56 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -114,6 +114,7 @@ IMPROVEMENTS: * provider/cloudstack: Improve ACL swapping [GH-7315] * provider/datadog: Add support for 'require full window' and 'locked' [GH-6738] * provider/docker: Docker Container DNS Setting Enhancements [GH-7392] + * provider/docker: Add `destroy_grace_seconds` option to stop container before delete [GH-7513] * provider/fastly: Add support for Cache Settings [GH-6781] * provider/fastly: Add support for Service Request Settings on `fastly_service_v1` resources [GH-6622] * provider/fastly: Add support for custom VCL configuration [GH-6662] From c28b1002332140e4fbf08aa970aef409f71af170 Mon Sep 17 00:00:00 2001 From: stack72 Date: Mon, 11 Jul 2016 16:15:28 +0100 Subject: [PATCH 0209/1238] provider/vsphere: Fix the `vsphere_virtual_machine` tests The PR that was merged to add `keep_on_destroy` was showing a green build so was merged but that build happened before another merge adding another parameter to the tests FYI @dkalleg - fixes #7169 --- .../vsphere/resource_vsphere_virtual_machine_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go index 4ebeab093..3c063d006 100644 --- a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go +++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go @@ -1183,7 +1183,7 @@ func TestAccVSphereVirtualMachine_keepOnRemove(t *testing.T) { } vmName := "vsphere_virtual_machine.keep_disk" - test_exists, test_name, test_cpu, test_mem, test_num_disk, test_num_of_nic, test_nic_label := + test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label := TestFuncData{vm: vm, label: basic_vars.label, vmName: vmName, numDisks: "2"}.testCheckFuncBasic() log.Printf("[DEBUG] template= %s", testAccCheckVSphereVirtualMachineConfig_keepOnRemove) @@ -1197,7 +1197,7 @@ func TestAccVSphereVirtualMachine_keepOnRemove(t *testing.T) { resource.TestStep{ Config: config, Check: resource.ComposeTestCheckFunc( - test_exists, test_name, test_cpu, test_mem, test_num_disk, test_num_of_nic, test_nic_label, + test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label, ), }, resource.TestStep{ From f48ddfb1424e5de029416977d0bd0201d1704e4a Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Mon, 11 Jul 2016 12:59:31 -0500 Subject: [PATCH 0210/1238] vendor: Update to lastest hashicorp/hcl Catches https://github.com/hashicorp/hcl/pull/137 Fixes #7142 --- vendor/github.com/hashicorp/hcl/README.md | 17 +++++-- .../hashicorp/hcl/hcl/parser/parser.go | 34 +++++++++++-- .../hashicorp/hcl/hcl/scanner/scanner.go | 15 +++++- vendor/vendor.json | 50 +++++++++---------- 4 files changed, 81 insertions(+), 35 deletions(-) diff --git a/vendor/github.com/hashicorp/hcl/README.md b/vendor/github.com/hashicorp/hcl/README.md index 3d5b8bd92..e292d5999 100644 --- a/vendor/github.com/hashicorp/hcl/README.md +++ b/vendor/github.com/hashicorp/hcl/README.md @@ -81,9 +81,20 @@ FOO * Boolean values: `true`, `false` * Arrays can be made by wrapping it in `[]`. Example: - `["foo", "bar", 42]`. Arrays can contain primitives - and other arrays, but cannot contain objects. Objects must - use the block syntax shown below. + `["foo", "bar", 42]`. Arrays can contain primitives, + other arrays, and objects. As an alternative, lists + of objects can be created with repeated blocks, using + this structure: + + ```hcl + service { + key = "value" + } + + service { + key = "value" + } + ``` Objects and nested objects are created using the structure shown below: diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go index 37a72acbc..f46ed4cc0 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go +++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go @@ -79,6 +79,13 @@ func (p *Parser) objectList() (*ast.ObjectList, error) { } node.Add(n) + + // object lists can be optionally comma-delimited e.g. when a list of maps + // is being expressed, so a comma is allowed here - it's simply consumed + tok := p.scan() + if tok.Type != token.COMMA { + p.unscan() + } } return node, nil } @@ -311,15 +318,20 @@ func (p *Parser) listType() (*ast.ListType, error) { needComma := false for { tok := p.scan() - switch tok.Type { - case token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: - if needComma { + if needComma { + switch tok.Type { + case token.COMMA, token.RBRACK: + default: return nil, &PosError{ Pos: tok.Pos, - Err: fmt.Errorf("unexpected token: %s. Expecting %s", tok.Type, token.COMMA), + Err: fmt.Errorf( + "error parsing list, expected comma or list end, got: %s", + tok.Type), } } - + } + switch tok.Type { + case token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC: node, err := p.literalType() if err != nil { return nil, err @@ -343,6 +355,18 @@ func (p *Parser) listType() (*ast.ListType, error) { needComma = false continue + case token.LBRACE: + // Looks like a nested object, so parse it out + node, err := p.objectType() + if err != nil { + return nil, &PosError{ + Pos: tok.Pos, + Err: fmt.Errorf( + "error while trying to parse object within list: %s", err), + } + } + l.Add(node) + needComma = true case token.BOOL: // TODO(arslan) should we support? not supported by HCL yet case token.LBRACK: diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go index a3f34a7b5..174119a8d 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go +++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go @@ -525,16 +525,27 @@ func (s *Scanner) scanEscape() rune { // scanDigits scans a rune with the given base for n times. For example an // octal notation \184 would yield in scanDigits(ch, 8, 3) func (s *Scanner) scanDigits(ch rune, base, n int) rune { + start := n for n > 0 && digitVal(ch) < base { ch = s.next() + if ch == eof { + // If we see an EOF, we halt any more scanning of digits + // immediately. + break + } + n-- } if n > 0 { s.err("illegal char escape") } - // we scanned all digits, put the last non digit char back - s.unread() + if n != start { + // we scanned all digits, put the last non digit char back, + // only if we read anything at all + s.unread() + } + return ch } diff --git a/vendor/vendor.json b/vendor/vendor.json index 205d5b5a1..25225b8be 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -826,70 +826,70 @@ "revision": "7e3c02b30806fa5779d3bdfc152ce4c6f40e7b38" }, { - "checksumSHA1": "SJIgBfV02h1fsqCAe5DHj/JbHoM=", + "checksumSHA1": "ydHBPi04mEh+Tir+2JkpSIMckcw=", "path": "github.com/hashicorp/hcl", - "revision": "61f5143284c041681f76a5b63efcb232aaa94737", - "revisionTime": "2016-06-24T12:12:30Z" + "revision": "364df430845abef160a0bfb3a59979f746bf4956", + "revisionTime": "2016-07-08T14:13:38Z" }, { "checksumSHA1": "IxyvRpCFeoJBGl2obLKJV7RCGjg=", "path": "github.com/hashicorp/hcl/hcl/ast", - "revision": "61f5143284c041681f76a5b63efcb232aaa94737", - "revisionTime": "2016-06-24T12:12:30Z" + "revision": "364df430845abef160a0bfb3a59979f746bf4956", + "revisionTime": "2016-07-08T14:13:38Z" }, { "checksumSHA1": "5HVecyfmcTm6OTffEi6LGayQf5M=", "path": "github.com/hashicorp/hcl/hcl/fmtcmd", - "revision": "61f5143284c041681f76a5b63efcb232aaa94737", - "revisionTime": "2016-06-24T12:12:30Z" + "revision": "364df430845abef160a0bfb3a59979f746bf4956", + "revisionTime": "2016-07-08T14:13:38Z" }, { - "checksumSHA1": "cO89nXP9rKQCcm0zKGbtBCWK2ok=", + "checksumSHA1": "l2oQxBsZRwn6eZjf+whXr8c9+8c=", "path": "github.com/hashicorp/hcl/hcl/parser", - "revision": "61f5143284c041681f76a5b63efcb232aaa94737", - "revisionTime": "2016-06-24T12:12:30Z" + "revision": "364df430845abef160a0bfb3a59979f746bf4956", + "revisionTime": "2016-07-08T14:13:38Z" }, { "checksumSHA1": "CSmwxPOTz7GSpnWPF9aGkbVeR64=", "path": "github.com/hashicorp/hcl/hcl/printer", - "revision": "61f5143284c041681f76a5b63efcb232aaa94737", - "revisionTime": "2016-06-24T12:12:30Z" + "revision": "364df430845abef160a0bfb3a59979f746bf4956", + "revisionTime": "2016-07-08T14:13:38Z" }, { - "checksumSHA1": "WZM0q7Sya8PcGj607x1npgcEPa4=", + "checksumSHA1": "FHZ1IXjWHUyuMjy/wQChE4pSoPg=", "path": "github.com/hashicorp/hcl/hcl/scanner", - "revision": "61f5143284c041681f76a5b63efcb232aaa94737", - "revisionTime": "2016-06-24T12:12:30Z" + "revision": "364df430845abef160a0bfb3a59979f746bf4956", + "revisionTime": "2016-07-08T14:13:38Z" }, { "checksumSHA1": "riN5acfVDm4j6LhWXauqiWH5n84=", "path": "github.com/hashicorp/hcl/hcl/strconv", - "revision": "61f5143284c041681f76a5b63efcb232aaa94737", - "revisionTime": "2016-06-24T12:12:30Z" + "revision": "364df430845abef160a0bfb3a59979f746bf4956", + "revisionTime": "2016-07-08T14:13:38Z" }, { "checksumSHA1": "c6yprzj06ASwCo18TtbbNNBHljA=", "path": "github.com/hashicorp/hcl/hcl/token", - "revision": "61f5143284c041681f76a5b63efcb232aaa94737", - "revisionTime": "2016-06-24T12:12:30Z" + "revision": "364df430845abef160a0bfb3a59979f746bf4956", + "revisionTime": "2016-07-08T14:13:38Z" }, { "checksumSHA1": "jQ45CCc1ed/nlV7bbSnx6z72q1M=", "path": "github.com/hashicorp/hcl/json/parser", - "revision": "61f5143284c041681f76a5b63efcb232aaa94737", - "revisionTime": "2016-06-24T12:12:30Z" + "revision": "364df430845abef160a0bfb3a59979f746bf4956", + "revisionTime": "2016-07-08T14:13:38Z" }, { "checksumSHA1": "S1e0F9ZKSnqgOLfjDTYazRL28tA=", "path": "github.com/hashicorp/hcl/json/scanner", - "revision": "61f5143284c041681f76a5b63efcb232aaa94737", - "revisionTime": "2016-06-24T12:12:30Z" + "revision": "364df430845abef160a0bfb3a59979f746bf4956", + "revisionTime": "2016-07-08T14:13:38Z" }, { "checksumSHA1": "fNlXQCQEnb+B3k5UDL/r15xtSJY=", "path": "github.com/hashicorp/hcl/json/token", - "revision": "61f5143284c041681f76a5b63efcb232aaa94737", - "revisionTime": "2016-06-24T12:12:30Z" + "revision": "364df430845abef160a0bfb3a59979f746bf4956", + "revisionTime": "2016-07-08T14:13:38Z" }, { "checksumSHA1": "vWW3HXm7OTOMISuZPcCSJODRYkU=", From 743be7914dbf6fa2c149315930d9ff58f1790c4c Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 11 Jul 2016 19:06:44 +0100 Subject: [PATCH 0211/1238] provider/aws: Refresh CloudWatch Group from state on 404 (#7576) Fixes #7543 where creating a CloudWatch Group, then deleting it from the console will cause no action on refresh / plan ``` % make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSCloudWatchLogGroup_' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSCloudWatchLogGroup_ -timeout 120m === RUN TestAccAWSCloudWatchLogGroup_importBasic --- PASS: TestAccAWSCloudWatchLogGroup_importBasic (18.10s) === RUN TestAccAWSCloudWatchLogGroup_basic --- PASS: TestAccAWSCloudWatchLogGroup_basic (17.34s) === RUN TestAccAWSCloudWatchLogGroup_retentionPolicy --- PASS: TestAccAWSCloudWatchLogGroup_retentionPolicy (49.81s) === RUN TestAccAWSCloudWatchLogGroup_multiple --- PASS: TestAccAWSCloudWatchLogGroup_multiple (23.74s) === RUN TestAccAWSCloudWatchLogGroup_disappears --- PASS: TestAccAWSCloudWatchLogGroup_disappears (15.78s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 124.789s ``` --- .../aws/resource_aws_cloudwatch_log_group.go | 16 ++++-- .../resource_aws_cloudwatch_log_group_test.go | 50 +++++++++++++++++-- 2 files changed, 56 insertions(+), 10 deletions(-) diff --git a/builtin/providers/aws/resource_aws_cloudwatch_log_group.go b/builtin/providers/aws/resource_aws_cloudwatch_log_group.go index a135b4017..245c89043 100644 --- a/builtin/providers/aws/resource_aws_cloudwatch_log_group.go +++ b/builtin/providers/aws/resource_aws_cloudwatch_log_group.go @@ -63,11 +63,17 @@ func resourceAwsCloudWatchLogGroupCreate(d *schema.ResourceData, meta interface{ func resourceAwsCloudWatchLogGroupRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).cloudwatchlogsconn log.Printf("[DEBUG] Reading CloudWatch Log Group: %q", d.Get("name").(string)) - lg, err := lookupCloudWatchLogGroup(conn, d.Id(), nil) + lg, exists, err := lookupCloudWatchLogGroup(conn, d.Id(), nil) if err != nil { return err } + if !exists { + log.Printf("[DEBUG] CloudWatch Group %q Not Found", d.Id()) + d.SetId("") + return nil + } + log.Printf("[DEBUG] Found Log Group: %#v", *lg) d.Set("arn", *lg.Arn) @@ -81,19 +87,19 @@ func resourceAwsCloudWatchLogGroupRead(d *schema.ResourceData, meta interface{}) } func lookupCloudWatchLogGroup(conn *cloudwatchlogs.CloudWatchLogs, - name string, nextToken *string) (*cloudwatchlogs.LogGroup, error) { + name string, nextToken *string) (*cloudwatchlogs.LogGroup, bool, error) { input := &cloudwatchlogs.DescribeLogGroupsInput{ LogGroupNamePrefix: aws.String(name), NextToken: nextToken, } resp, err := conn.DescribeLogGroups(input) if err != nil { - return nil, err + return nil, true, err } for _, lg := range resp.LogGroups { if *lg.LogGroupName == name { - return lg, nil + return lg, true, nil } } @@ -101,7 +107,7 @@ func lookupCloudWatchLogGroup(conn *cloudwatchlogs.CloudWatchLogs, return lookupCloudWatchLogGroup(conn, name, resp.NextToken) } - return nil, fmt.Errorf("CloudWatch Log Group %q not found", name) + return nil, false, nil } func resourceAwsCloudWatchLogGroupUpdate(d *schema.ResourceData, meta interface{}) error { diff --git a/builtin/providers/aws/resource_aws_cloudwatch_log_group_test.go b/builtin/providers/aws/resource_aws_cloudwatch_log_group_test.go index da910c1a8..aba0db72c 100644 --- a/builtin/providers/aws/resource_aws_cloudwatch_log_group_test.go +++ b/builtin/providers/aws/resource_aws_cloudwatch_log_group_test.go @@ -77,6 +77,39 @@ func TestAccAWSCloudWatchLogGroup_multiple(t *testing.T) { }) } +func TestAccAWSCloudWatchLogGroup_disappears(t *testing.T) { + var lg cloudwatchlogs.LogGroup + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSCloudWatchLogGroupDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSCloudWatchLogGroupConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckCloudWatchLogGroupExists("aws_cloudwatch_log_group.foobar", &lg), + testAccCheckCloudWatchLogGroupDisappears(&lg), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckCloudWatchLogGroupDisappears(lg *cloudwatchlogs.LogGroup) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).cloudwatchlogsconn + opts := &cloudwatchlogs.DeleteLogGroupInput{ + LogGroupName: lg.LogGroupName, + } + if _, err := conn.DeleteLogGroup(opts); err != nil { + return err + } + return nil + } +} + func testAccCheckCloudWatchLogGroupExists(n string, lg *cloudwatchlogs.LogGroup) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -85,10 +118,13 @@ func testAccCheckCloudWatchLogGroupExists(n string, lg *cloudwatchlogs.LogGroup) } conn := testAccProvider.Meta().(*AWSClient).cloudwatchlogsconn - logGroup, err := lookupCloudWatchLogGroup(conn, rs.Primary.ID, nil) + logGroup, exists, err := lookupCloudWatchLogGroup(conn, rs.Primary.ID, nil) if err != nil { return err } + if !exists { + return fmt.Errorf("Bad: LogGroup %q does not exist", rs.Primary.ID) + } *lg = *logGroup @@ -103,11 +139,15 @@ func testAccCheckAWSCloudWatchLogGroupDestroy(s *terraform.State) error { if rs.Type != "aws_cloudwatch_log_group" { continue } - - _, err := lookupCloudWatchLogGroup(conn, rs.Primary.ID, nil) - if err == nil { - return fmt.Errorf("LogGroup Still Exists: %s", rs.Primary.ID) + _, exists, err := lookupCloudWatchLogGroup(conn, rs.Primary.ID, nil) + if err != nil { + return nil } + + if exists { + return fmt.Errorf("Bad: LogGroup still exists: %q", rs.Primary.ID) + } + } return nil From e39d311b1bf9c2ef959e27319892fe3265882326 Mon Sep 17 00:00:00 2001 From: Clint Date: Mon, 11 Jul 2016 12:07:16 -0600 Subject: [PATCH 0212/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0c3a12b56..317f14c42 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -182,6 +182,7 @@ BUG FIXES: * provider/aws: Remove EFS File System from State when NotFound [GH-7437] * provider/aws: `aws_customer_gateway` refreshing from state on deleted state [GH-7482] * provider/aws: Retry finding `aws_route` after creating it [GH-7463] + * provider/aws: Refresh CloudWatch Group from state on 404 [GH-7576] * provider/azurerm: Fixes terraform crash when using SSH keys with `azurerm_virtual_machine` [GH-6766] * provider/azurerm: Fix a bug causing 'diffs do not match' on `azurerm_network_interface` resources [GH-6790] * provider/azurerm: Normalizes `availability_set_id` casing to avoid spurious diffs in `azurerm_virtual_machine` [GH-6768] From 38e3d3ba8735466f2ee642386b3065522dba8ad5 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 11 Jul 2016 21:58:25 +0100 Subject: [PATCH 0213/1238] provider/aws: Support Import `aws_cloudfront_origin_access_identity` (#7506) * provider/aws: Support Import `aws_cloudfront_origin_access_identity` ``` make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSCloudFrontOriginAccessIdentity_' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSCloudFrontOriginAccessIdentity_ -timeout 120m === RUN TestAccAWSCloudFrontOriginAccessIdentity_importBasic --- PASS: TestAccAWSCloudFrontOriginAccessIdentity_importBasic (17.07s) === RUN TestAccAWSCloudFrontOriginAccessIdentity_basic --- PASS: TestAccAWSCloudFrontOriginAccessIdentity_basic (15.34s) === RUN TestAccAWSCloudFrontOriginAccessIdentity_noComment --- PASS: TestAccAWSCloudFrontOriginAccessIdentity_noComment (16.29s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 48.717s ``` * Update import_aws_cloudfront_origin_access_identity_test.go --- ..._cloudfront_origin_access_identity_test.go | 28 +++++++++++++++++++ ...e_aws_cloudfront_origin_access_identity.go | 3 ++ 2 files changed, 31 insertions(+) create mode 100644 builtin/providers/aws/import_aws_cloudfront_origin_access_identity_test.go diff --git a/builtin/providers/aws/import_aws_cloudfront_origin_access_identity_test.go b/builtin/providers/aws/import_aws_cloudfront_origin_access_identity_test.go new file mode 100644 index 000000000..dd45cc786 --- /dev/null +++ b/builtin/providers/aws/import_aws_cloudfront_origin_access_identity_test.go @@ -0,0 +1,28 @@ +package aws + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAWSCloudFrontOriginAccessIdentity_importBasic(t *testing.T) { + resourceName := "aws_cloudfront_origin_access_identity.origin_access_identity" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudFrontOriginAccessIdentityDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSCloudFrontOriginAccessIdentityConfig, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/aws/resource_aws_cloudfront_origin_access_identity.go b/builtin/providers/aws/resource_aws_cloudfront_origin_access_identity.go index cd6f8d73b..094203b4d 100644 --- a/builtin/providers/aws/resource_aws_cloudfront_origin_access_identity.go +++ b/builtin/providers/aws/resource_aws_cloudfront_origin_access_identity.go @@ -15,6 +15,9 @@ func resourceAwsCloudFrontOriginAccessIdentity() *schema.Resource { Read: resourceAwsCloudFrontOriginAccessIdentityRead, Update: resourceAwsCloudFrontOriginAccessIdentityUpdate, Delete: resourceAwsCloudFrontOriginAccessIdentityDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "comment": &schema.Schema{ From ea4483d8b608e44e88a6225c92bd7c6f10814ede Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 11 Jul 2016 21:59:14 +0100 Subject: [PATCH 0214/1238] provider/aws: Support Import `aws_rds_cluster_instance` (#7522) We were not setting all the values in the read Func. One other issue, we were setting the *wrong* db_parameter_name value to state ``` make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSRDSClusterInstance_import' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSRDSClusterInstance_import -timeout 120m === RUN TestAccAWSRDSClusterInstance_importBasic --- PASS: TestAccAWSRDSClusterInstance_importBasic (1201.80s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 1201.814s ``` --- .../import_aws_rds_cluster_instance_test.go | 29 +++++++++++++++++++ .../aws/resource_aws_rds_cluster_instance.go | 8 ++++- 2 files changed, 36 insertions(+), 1 deletion(-) create mode 100644 builtin/providers/aws/import_aws_rds_cluster_instance_test.go diff --git a/builtin/providers/aws/import_aws_rds_cluster_instance_test.go b/builtin/providers/aws/import_aws_rds_cluster_instance_test.go new file mode 100644 index 000000000..949acead7 --- /dev/null +++ b/builtin/providers/aws/import_aws_rds_cluster_instance_test.go @@ -0,0 +1,29 @@ +package aws + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAWSRDSClusterInstance_importBasic(t *testing.T) { + resourceName := "aws_rds_cluster_instance.cluster_instances" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSClusterInstanceConfig(acctest.RandInt()), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/aws/resource_aws_rds_cluster_instance.go b/builtin/providers/aws/resource_aws_rds_cluster_instance.go index 09580cc89..5289e4782 100644 --- a/builtin/providers/aws/resource_aws_rds_cluster_instance.go +++ b/builtin/providers/aws/resource_aws_rds_cluster_instance.go @@ -17,6 +17,9 @@ func resourceAwsRDSClusterInstance() *schema.Resource { Read: resourceAwsRDSClusterInstanceRead, Update: resourceAwsRDSClusterInstanceUpdate, Delete: resourceAwsRDSClusterInstanceDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "identifier": &schema.Schema{ @@ -185,9 +188,12 @@ func resourceAwsRDSClusterInstanceRead(d *schema.ResourceData, meta interface{}) } d.Set("publicly_accessible", db.PubliclyAccessible) + d.Set("cluster_identifier", db.DBClusterIdentifier) + d.Set("instance_class", db.DBInstanceClass) + d.Set("identifier", db.DBInstanceIdentifier) if len(db.DBParameterGroups) > 0 { - d.Set("parameter_group_name", db.DBParameterGroups[0].DBParameterGroupName) + d.Set("db_parameter_group_name", db.DBParameterGroups[0].DBParameterGroupName) } // Fetch and save tags From b7e854fa8b11200feae17dae3de0c3a3873c0497 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Mon, 11 Jul 2016 15:39:34 -0500 Subject: [PATCH 0215/1238] provider/template: validate vars are primitives Closes #7160 by returning a proper error. --- .../template/datasource_template_file.go | 29 +++++++++++-- .../template/datasource_template_file_test.go | 43 +++++++++++++++++++ .../docs/providers/template/d/file.html.md | 4 +- 3 files changed, 71 insertions(+), 5 deletions(-) diff --git a/builtin/providers/template/datasource_template_file.go b/builtin/providers/template/datasource_template_file.go index 865a24e06..6a04ccfca 100644 --- a/builtin/providers/template/datasource_template_file.go +++ b/builtin/providers/template/datasource_template_file.go @@ -6,6 +6,7 @@ import ( "fmt" "os" "path/filepath" + "strings" "github.com/hashicorp/hil" "github.com/hashicorp/hil/ast" @@ -49,10 +50,11 @@ func dataSourceFile() *schema.Resource { ConflictsWith: []string{"template"}, }, "vars": &schema.Schema{ - Type: schema.TypeMap, - Optional: true, - Default: make(map[string]interface{}), - Description: "variables to substitute", + Type: schema.TypeMap, + Optional: true, + Default: make(map[string]interface{}), + Description: "variables to substitute", + ValidateFunc: validateVarsAttribute, }, "rendered": &schema.Schema{ Type: schema.TypeString, @@ -156,3 +158,22 @@ func validateTemplateAttribute(v interface{}, key string) (ws []string, es []err return } + +func validateVarsAttribute(v interface{}, key string) (ws []string, es []error) { + // vars can only be primitives right now + var badVars []string + for k, v := range v.(map[string]interface{}) { + switch v.(type) { + case []interface{}: + badVars = append(badVars, fmt.Sprintf("%s (list)", k)) + case map[string]interface{}: + badVars = append(badVars, fmt.Sprintf("%s (map)", k)) + } + } + if len(badVars) > 0 { + es = append(es, fmt.Errorf( + "%s: cannot contain non-primitives; bad keys: %s", + key, strings.Join(badVars, ", "))) + } + return +} diff --git a/builtin/providers/template/datasource_template_file_test.go b/builtin/providers/template/datasource_template_file_test.go index 64a64102a..5e82382a6 100644 --- a/builtin/providers/template/datasource_template_file_test.go +++ b/builtin/providers/template/datasource_template_file_test.go @@ -71,6 +71,49 @@ func TestValidateTemplateAttribute(t *testing.T) { } } +func TestValidateVarsAttribute(t *testing.T) { + cases := map[string]struct { + Vars map[string]interface{} + ExpectErr string + }{ + "lists are invalid": { + map[string]interface{}{ + "list": []interface{}{}, + }, + `vars: cannot contain non-primitives`, + }, + "maps are invalid": { + map[string]interface{}{ + "map": map[string]interface{}{}, + }, + `vars: cannot contain non-primitives`, + }, + "strings, integers, floats, and bools are AOK": { + map[string]interface{}{ + "string": "foo", + "int": 1, + "bool": true, + "float": float64(1.0), + }, + ``, + }, + } + + for tn, tc := range cases { + _, es := validateVarsAttribute(tc.Vars, "vars") + if len(es) > 0 { + if tc.ExpectErr == "" { + t.Fatalf("%s: expected no err, got: %#v", tn, es) + } + if !strings.Contains(es[0].Error(), tc.ExpectErr) { + t.Fatalf("%s: expected\n%s\nto contain\n%s", tn, es[0], tc.ExpectErr) + } + } else if tc.ExpectErr != "" { + t.Fatalf("%s: expected err containing %q, got none!", tn, tc.ExpectErr) + } + } +} + // This test covers a panic due to config.Func formerly being a // shared map, causing multiple template_file resources to try and // accessing it parallel during their lang.Eval() runs. diff --git a/website/source/docs/providers/template/d/file.html.md b/website/source/docs/providers/template/d/file.html.md index 6b8381d61..b47f7e286 100644 --- a/website/source/docs/providers/template/d/file.html.md +++ b/website/source/docs/providers/template/d/file.html.md @@ -31,7 +31,9 @@ The following arguments are supported: from a file on disk using the [`file()` interpolation function](/docs/configuration/interpolation.html#file_path_). -* `vars` - (Optional) Variables for interpolation within the template. +* `vars` - (Optional) Variables for interpolation within the template. Note + that variables must all be primitives. Direct references to lists or maps + will cause a validation error. The following arguments are maintained for backwards compatibility and may be removed in a future version: From ceeab2ad12b67a05ee97e782848b81f0c570e1e2 Mon Sep 17 00:00:00 2001 From: stack72 Date: Wed, 6 Jul 2016 11:21:47 +0100 Subject: [PATCH 0216/1238] provider/aws: Support Import for `aws_api_gateway_key` Previously, the `stage_key` were not being set back to state in the Read func. Changing this means the tests now run as follows: ``` make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSAPIGatewayApiKey_' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSAPIGatewayApiKey_ -timeout 120m === RUN TestAccAWSAPIGatewayApiKey_importBasic --- PASS: TestAccAWSAPIGatewayApiKey_importBasic (42.42s) === RUN TestAccAWSAPIGatewayApiKey_basic --- PASS: TestAccAWSAPIGatewayApiKey_basic (42.11s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 84.549s ``` --- .../aws/import_aws_api_gateway_key_test.go | 28 +++++++++++++++++ .../aws/resource_aws_api_gateway_api_key.go | 4 +++ builtin/providers/aws/structure.go | 18 +++++++++++ builtin/providers/aws/structure_test.go | 31 +++++++++++++++++++ 4 files changed, 81 insertions(+) create mode 100644 builtin/providers/aws/import_aws_api_gateway_key_test.go diff --git a/builtin/providers/aws/import_aws_api_gateway_key_test.go b/builtin/providers/aws/import_aws_api_gateway_key_test.go new file mode 100644 index 000000000..10d09345c --- /dev/null +++ b/builtin/providers/aws/import_aws_api_gateway_key_test.go @@ -0,0 +1,28 @@ +package aws + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAWSAPIGatewayApiKey_importBasic(t *testing.T) { + resourceName := "aws_api_gateway_api_key.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayApiKeyDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSAPIGatewayApiKeyConfig, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/aws/resource_aws_api_gateway_api_key.go b/builtin/providers/aws/resource_aws_api_gateway_api_key.go index ba1d9101c..604eb5d54 100644 --- a/builtin/providers/aws/resource_aws_api_gateway_api_key.go +++ b/builtin/providers/aws/resource_aws_api_gateway_api_key.go @@ -18,6 +18,9 @@ func resourceAwsApiGatewayApiKey() *schema.Resource { Read: resourceAwsApiGatewayApiKeyRead, Update: resourceAwsApiGatewayApiKeyUpdate, Delete: resourceAwsApiGatewayApiKeyDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ @@ -97,6 +100,7 @@ func resourceAwsApiGatewayApiKeyRead(d *schema.ResourceData, meta interface{}) e d.Set("name", apiKey.Name) d.Set("description", apiKey.Description) d.Set("enabled", apiKey.Enabled) + d.Set("stage_key", flattenApiGatewayStageKeys(apiKey.StageKeys)) return nil } diff --git a/builtin/providers/aws/structure.go b/builtin/providers/aws/structure.go index ec57c18b1..2cf42b6bd 100644 --- a/builtin/providers/aws/structure.go +++ b/builtin/providers/aws/structure.go @@ -9,6 +9,8 @@ import ( "strconv" "strings" + "log" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/apigateway" "github.com/aws/aws-sdk-go/service/autoscaling" @@ -25,6 +27,7 @@ import ( "github.com/aws/aws-sdk-go/service/rds" "github.com/aws/aws-sdk-go/service/redshift" "github.com/aws/aws-sdk-go/service/route53" + "github.com/davecgh/go-spew/spew" "github.com/hashicorp/terraform/helper/schema" ) @@ -1005,6 +1008,21 @@ func flattenAsgEnabledMetrics(list []*autoscaling.EnabledMetric) []string { return strs } +func flattenApiGatewayStageKeys(keys []*string) []map[string]interface{} { + stageKeys := make([]map[string]interface{}, 0, len(keys)) + log.Printf("[INFO] THERE %s", spew.Sdump(keys)) + for _, o := range keys { + key := make(map[string]interface{}) + parts := strings.Split(*o, "/") + key["stage_name"] = parts[1] + key["rest_api_id"] = parts[0] + + stageKeys = append(stageKeys, key) + } + log.Printf("[INFO] HERE %s", spew.Sdump(stageKeys)) + return stageKeys +} + func expandApiGatewayStageKeys(d *schema.ResourceData) []*apigateway.StageKey { var stageKeys []*apigateway.StageKey diff --git a/builtin/providers/aws/structure_test.go b/builtin/providers/aws/structure_test.go index 63666eefc..fb36a8545 100644 --- a/builtin/providers/aws/structure_test.go +++ b/builtin/providers/aws/structure_test.go @@ -959,3 +959,34 @@ func TestFlattenApiGatewayThrottleSettings(t *testing.T) { t.Fatalf("Expected 'rate_limit' to equal %f, got %f", expectedRateLimit, rateLimitFloat) } } + +func TestFlattenApiGatewayStageKeys(t *testing.T) { + cases := []struct { + Input []*string + Output []map[string]interface{} + }{ + { + Input: []*string{ + aws.String("a1b2c3d4e5/dev"), + aws.String("e5d4c3b2a1/test"), + }, + Output: []map[string]interface{}{ + map[string]interface{}{ + "stage_name": "dev", + "rest_api_id": "a1b2c3d4e5", + }, + map[string]interface{}{ + "stage_name": "test", + "rest_api_id": "e5d4c3b2a1", + }, + }, + }, + } + + for _, tc := range cases { + output := flattenApiGatewayStageKeys(tc.Input) + if !reflect.DeepEqual(output, tc.Output) { + t.Fatalf("Got:\n\n%#v\n\nExpected:\n\n%#v", output, tc.Output) + } + } +} From edd67547bc2aa12244fcf1620bcb209ee73e008e Mon Sep 17 00:00:00 2001 From: clint shryock Date: Mon, 11 Jul 2016 15:10:13 -0600 Subject: [PATCH 0217/1238] remove debug statements --- .../aws/import_aws_api_gateway_key_test.go | 36 ++++++------- .../aws/resource_aws_api_gateway_api_key.go | 8 +-- builtin/providers/aws/structure.go | 23 ++++---- builtin/providers/aws/structure_test.go | 54 +++++++++---------- 4 files changed, 58 insertions(+), 63 deletions(-) diff --git a/builtin/providers/aws/import_aws_api_gateway_key_test.go b/builtin/providers/aws/import_aws_api_gateway_key_test.go index 10d09345c..2fd3d4cef 100644 --- a/builtin/providers/aws/import_aws_api_gateway_key_test.go +++ b/builtin/providers/aws/import_aws_api_gateway_key_test.go @@ -1,28 +1,28 @@ package aws import ( - "testing" + "testing" - "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/resource" ) func TestAccAWSAPIGatewayApiKey_importBasic(t *testing.T) { - resourceName := "aws_api_gateway_api_key.test" + resourceName := "aws_api_gateway_api_key.test" - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSAPIGatewayApiKeyDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSAPIGatewayApiKeyConfig, - }, + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAPIGatewayApiKeyDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSAPIGatewayApiKeyConfig, + }, - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) } diff --git a/builtin/providers/aws/resource_aws_api_gateway_api_key.go b/builtin/providers/aws/resource_aws_api_gateway_api_key.go index 604eb5d54..7ddd79087 100644 --- a/builtin/providers/aws/resource_aws_api_gateway_api_key.go +++ b/builtin/providers/aws/resource_aws_api_gateway_api_key.go @@ -18,9 +18,9 @@ func resourceAwsApiGatewayApiKey() *schema.Resource { Read: resourceAwsApiGatewayApiKeyRead, Update: resourceAwsApiGatewayApiKeyUpdate, Delete: resourceAwsApiGatewayApiKeyDelete, - Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, - }, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ @@ -100,7 +100,7 @@ func resourceAwsApiGatewayApiKeyRead(d *schema.ResourceData, meta interface{}) e d.Set("name", apiKey.Name) d.Set("description", apiKey.Description) d.Set("enabled", apiKey.Enabled) - d.Set("stage_key", flattenApiGatewayStageKeys(apiKey.StageKeys)) + d.Set("stage_key", flattenApiGatewayStageKeys(apiKey.StageKeys)) return nil } diff --git a/builtin/providers/aws/structure.go b/builtin/providers/aws/structure.go index 2cf42b6bd..41464a5d3 100644 --- a/builtin/providers/aws/structure.go +++ b/builtin/providers/aws/structure.go @@ -9,8 +9,6 @@ import ( "strconv" "strings" - "log" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/apigateway" "github.com/aws/aws-sdk-go/service/autoscaling" @@ -27,7 +25,6 @@ import ( "github.com/aws/aws-sdk-go/service/rds" "github.com/aws/aws-sdk-go/service/redshift" "github.com/aws/aws-sdk-go/service/route53" - "github.com/davecgh/go-spew/spew" "github.com/hashicorp/terraform/helper/schema" ) @@ -1009,18 +1006,16 @@ func flattenAsgEnabledMetrics(list []*autoscaling.EnabledMetric) []string { } func flattenApiGatewayStageKeys(keys []*string) []map[string]interface{} { - stageKeys := make([]map[string]interface{}, 0, len(keys)) - log.Printf("[INFO] THERE %s", spew.Sdump(keys)) - for _, o := range keys { - key := make(map[string]interface{}) - parts := strings.Split(*o, "/") - key["stage_name"] = parts[1] - key["rest_api_id"] = parts[0] + stageKeys := make([]map[string]interface{}, 0, len(keys)) + for _, o := range keys { + key := make(map[string]interface{}) + parts := strings.Split(*o, "/") + key["stage_name"] = parts[1] + key["rest_api_id"] = parts[0] - stageKeys = append(stageKeys, key) - } - log.Printf("[INFO] HERE %s", spew.Sdump(stageKeys)) - return stageKeys + stageKeys = append(stageKeys, key) + } + return stageKeys } func expandApiGatewayStageKeys(d *schema.ResourceData) []*apigateway.StageKey { diff --git a/builtin/providers/aws/structure_test.go b/builtin/providers/aws/structure_test.go index fb36a8545..0ac0a73dc 100644 --- a/builtin/providers/aws/structure_test.go +++ b/builtin/providers/aws/structure_test.go @@ -961,32 +961,32 @@ func TestFlattenApiGatewayThrottleSettings(t *testing.T) { } func TestFlattenApiGatewayStageKeys(t *testing.T) { - cases := []struct { - Input []*string - Output []map[string]interface{} - }{ - { - Input: []*string{ - aws.String("a1b2c3d4e5/dev"), - aws.String("e5d4c3b2a1/test"), - }, - Output: []map[string]interface{}{ - map[string]interface{}{ - "stage_name": "dev", - "rest_api_id": "a1b2c3d4e5", - }, - map[string]interface{}{ - "stage_name": "test", - "rest_api_id": "e5d4c3b2a1", - }, - }, - }, - } + cases := []struct { + Input []*string + Output []map[string]interface{} + }{ + { + Input: []*string{ + aws.String("a1b2c3d4e5/dev"), + aws.String("e5d4c3b2a1/test"), + }, + Output: []map[string]interface{}{ + map[string]interface{}{ + "stage_name": "dev", + "rest_api_id": "a1b2c3d4e5", + }, + map[string]interface{}{ + "stage_name": "test", + "rest_api_id": "e5d4c3b2a1", + }, + }, + }, + } - for _, tc := range cases { - output := flattenApiGatewayStageKeys(tc.Input) - if !reflect.DeepEqual(output, tc.Output) { - t.Fatalf("Got:\n\n%#v\n\nExpected:\n\n%#v", output, tc.Output) - } - } + for _, tc := range cases { + output := flattenApiGatewayStageKeys(tc.Input) + if !reflect.DeepEqual(output, tc.Output) { + t.Fatalf("Got:\n\n%#v\n\nExpected:\n\n%#v", output, tc.Output) + } + } } From d5a941a0a48967f264728a5aa4c1e7fd6c87fa36 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Mon, 11 Jul 2016 16:35:01 -0500 Subject: [PATCH 0218/1238] command: Do not count data sources in plan totals Fixes #7483 --- command/hook_count.go | 6 ++++++ command/hook_count_test.go | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/command/hook_count.go b/command/hook_count.go index 5f3f514c2..3e642b254 100644 --- a/command/hook_count.go +++ b/command/hook_count.go @@ -1,6 +1,7 @@ package command import ( + "strings" "sync" "github.com/hashicorp/terraform/terraform" @@ -90,6 +91,11 @@ func (h *CountHook) PostDiff( h.Lock() defer h.Unlock() + // We don't count anything for data sources + if strings.HasPrefix(n.Id, "data.") { + return terraform.HookActionContinue, nil + } + switch d.ChangeType() { case terraform.DiffDestroyCreate: h.ToRemoveAndAdd += 1 diff --git a/command/hook_count_test.go b/command/hook_count_test.go index deb822699..5f0b000e8 100644 --- a/command/hook_count_test.go +++ b/command/hook_count_test.go @@ -182,3 +182,37 @@ func TestCountHookPostDiff_NoChange(t *testing.T) { expected, h) } } + +func TestCountHookPostDiff_DataSource(t *testing.T) { + h := new(CountHook) + + resources := map[string]*terraform.InstanceDiff{ + "data.foo": &terraform.InstanceDiff{ + Destroy: true, + }, + "data.bar": &terraform.InstanceDiff{}, + "data.lorem": &terraform.InstanceDiff{ + Destroy: false, + Attributes: map[string]*terraform.ResourceAttrDiff{ + "foo": &terraform.ResourceAttrDiff{}, + }, + }, + "data.ipsum": &terraform.InstanceDiff{Destroy: true}, + } + + for k, d := range resources { + n := &terraform.InstanceInfo{Id: k} + h.PostDiff(n, d) + } + + expected := new(CountHook) + expected.ToAdd = 0 + expected.ToChange = 0 + expected.ToRemoveAndAdd = 0 + expected.ToRemove = 0 + + if !reflect.DeepEqual(expected, h) { + t.Fatalf("Expected %#v, got %#v instead.", + expected, h) + } +} From a0f8e7bd04a7fedd3a8435ca5ca12967e9952872 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Sat, 9 Jul 2016 21:33:12 +0100 Subject: [PATCH 0219/1238] deps: Update github.com/hashicorp/hil --- vendor/github.com/hashicorp/hil/convert.go | 60 ++++++++++++++++++++++ vendor/github.com/hashicorp/hil/eval.go | 44 +++++----------- vendor/vendor.json | 6 +-- 3 files changed, 76 insertions(+), 34 deletions(-) diff --git a/vendor/github.com/hashicorp/hil/convert.go b/vendor/github.com/hashicorp/hil/convert.go index 738e719ff..b7bff7544 100644 --- a/vendor/github.com/hashicorp/hil/convert.go +++ b/vendor/github.com/hashicorp/hil/convert.go @@ -42,6 +42,10 @@ func hilMapstructureWeakDecode(m interface{}, rawVal interface{}) error { } func InterfaceToVariable(input interface{}) (ast.Variable, error) { + if inputVariable, ok := input.(ast.Variable); ok { + return inputVariable, nil + } + var stringVal string if err := hilMapstructureWeakDecode(input, &stringVal); err == nil { return ast.Variable{ @@ -86,3 +90,59 @@ func InterfaceToVariable(input interface{}) (ast.Variable, error) { return ast.Variable{}, fmt.Errorf("value for conversion must be a string, interface{} or map[string]interface: got %T", input) } + +func VariableToInterface(input ast.Variable) (interface{}, error) { + if input.Type == ast.TypeString { + if inputStr, ok := input.Value.(string); ok { + return inputStr, nil + } else { + return nil, fmt.Errorf("ast.Variable with type string has value which is not a string") + } + } + + if input.Type == ast.TypeList { + inputList, ok := input.Value.([]ast.Variable) + if !ok { + return nil, fmt.Errorf("ast.Variable with type list has value which is not a []ast.Variable") + } + + result := make([]interface{}, 0) + if len(inputList) == 0 { + return result, nil + } + + for _, element := range inputList { + if convertedElement, err := VariableToInterface(element); err == nil { + result = append(result, convertedElement) + } else { + return nil, err + } + } + + return result, nil + } + + if input.Type == ast.TypeMap { + inputMap, ok := input.Value.(map[string]ast.Variable) + if !ok { + return nil, fmt.Errorf("ast.Variable with type map has value which is not a map[string]ast.Variable") + } + + result := make(map[string]interface{}, 0) + if len(inputMap) == 0 { + return result, nil + } + + for key, value := range inputMap { + if convertedValue, err := VariableToInterface(value); err == nil { + result[key] = convertedValue + } else { + return nil, err + } + } + + return result, nil + } + + return nil, fmt.Errorf("Find") +} diff --git a/vendor/github.com/hashicorp/hil/eval.go b/vendor/github.com/hashicorp/hil/eval.go index f5537312e..173c67f5a 100644 --- a/vendor/github.com/hashicorp/hil/eval.go +++ b/vendor/github.com/hashicorp/hil/eval.go @@ -63,15 +63,23 @@ func Eval(root ast.Node, config *EvalConfig) (EvaluationResult, error) { switch outputType { case ast.TypeList: + val, err := VariableToInterface(ast.Variable{ + Type: ast.TypeList, + Value: output, + }) return EvaluationResult{ Type: TypeList, - Value: hilListToGoSlice(output.([]ast.Variable)), - }, nil + Value: val, + }, err case ast.TypeMap: + val, err := VariableToInterface(ast.Variable{ + Type: ast.TypeMap, + Value: output, + }) return EvaluationResult{ - Type: TypeMap, - Value: hilMapToGoMap(output.(map[string]ast.Variable)), - }, nil + Type: TypeMap, + Value: val, + }, err case ast.TypeString: return EvaluationResult{ Type: TypeString, @@ -337,32 +345,6 @@ func (v *evalIndex) evalMapIndex(variableName string, target interface{}, key in return value.Value, value.Type, nil } -// hilListToGoSlice converts an ast.Variable into a []interface{}. We assume that -// the type checking is already done since this is internal and only used in output -// evaluation. -func hilListToGoSlice(variable []ast.Variable) []interface{} { - output := make([]interface{}, len(variable)) - - for index, element := range variable { - output[index] = element.Value - } - - return output -} - -// hilMapToGoMap converts an ast.Variable into a map[string]interface{}. We assume -// that the type checking is already done since this is internal and only used in -// output evaluation. -func hilMapToGoMap(variable map[string]ast.Variable) map[string]interface{} { - output := make(map[string]interface{}) - - for key, element := range variable { - output[key] = element.Value - } - - return output -} - type evalOutput struct{ *ast.Output } func (v *evalOutput) Eval(s ast.Scope, stack *ast.Stack) (interface{}, ast.Type, error) { diff --git a/vendor/vendor.json b/vendor/vendor.json index d9635eb2a..939ab76c6 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -890,10 +890,10 @@ "revisionTime": "2016-06-24T12:12:30Z" }, { - "checksumSHA1": "vWW3HXm7OTOMISuZPcCSJODRYkU=", + "checksumSHA1": "o5JhQCQpoSRFcMwD8LxqP8iJ04o=", "path": "github.com/hashicorp/hil", - "revision": "7130f7330953adacbfb4ca0ad4b14b806bce3762", - "revisionTime": "2016-06-12T11:49:46Z" + "revision": "79fc9230647576201673b35c724c58ec034bd21d", + "revisionTime": "2016-07-11T16:29:56Z" }, { "checksumSHA1": "UICubs001+Q4MsUf9zl2vcMzWQQ=", From d955c5191cc970bf24266a23e610e70229650958 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Mon, 11 Jul 2016 11:40:21 -0600 Subject: [PATCH 0220/1238] core: Fix interpolation tests with nested lists Some of the tests for splat syntax were from the pre-list-and-map world, and effectively flattened the values if interpolating a resource value which was itself a list. We now set the expected values correctly so that an interpolation like `aws_instance.test.*.security_group_ids` now returns a list of lists. We also fix the implementation to correctly deal with maps. --- terraform/interpolate.go | 21 ++++++++------------- terraform/interpolate_test.go | 8 ++++---- 2 files changed, 12 insertions(+), 17 deletions(-) diff --git a/terraform/interpolate.go b/terraform/interpolate.go index 870fac762..b42ca8211 100644 --- a/terraform/interpolate.go +++ b/terraform/interpolate.go @@ -493,7 +493,8 @@ func (i *Interpolater) computeResourceMultiVariable( if module == nil || len(module.Resources) == 0 { return &unknownVariable, nil } - var values []string + + var values []interface{} for j := 0; j < count; j++ { id := fmt.Sprintf("%s.%d", v.ResourceId(), j) @@ -521,9 +522,10 @@ func (i *Interpolater) computeResourceMultiVariable( continue } - // computed list attribute - _, ok = r.Primary.Attributes[v.Field+".#"] - if !ok { + // computed list or map attribute + _, isList := r.Primary.Attributes[v.Field+".#"] + _, isMap := r.Primary.Attributes[v.Field+".%"] + if !(isList || isMap) { continue } multiAttr, err := i.interpolateComplexTypeAttribute(v.Field, r.Primary.Attributes) @@ -535,14 +537,7 @@ func (i *Interpolater) computeResourceMultiVariable( return &ast.Variable{Type: ast.TypeString, Value: ""}, nil } - for _, element := range multiAttr.Value.([]ast.Variable) { - strVal := element.Value.(string) - if strVal == config.UnknownVariableValue { - return &unknownVariable, nil - } - - values = append(values, strVal) - } + values = append(values, multiAttr) } if len(values) == 0 { @@ -595,7 +590,7 @@ func (i *Interpolater) interpolateComplexTypeAttribute( keys := make([]string, 0) listElementKey := regexp.MustCompile("^" + resourceID + "\\.[0-9]+$") - for id, _ := range attributes { + for id := range attributes { if listElementKey.MatchString(id) { keys = append(keys, id) } diff --git a/terraform/interpolate_test.go b/terraform/interpolate_test.go index 8012e16af..65d716c2e 100644 --- a/terraform/interpolate_test.go +++ b/terraform/interpolate_test.go @@ -449,11 +449,11 @@ func TestInterpolator_resourceMultiAttributesWithResourceCount(t *testing.T) { // More than 1 element testInterpolate(t, i, scope, "aws_route53_zone.terra.0.name_servers", - interfaceToVariableSwallowError(name_servers[0:4])) + interfaceToVariableSwallowError(name_servers[:4])) // More than 1 element in both testInterpolate(t, i, scope, "aws_route53_zone.terra.*.name_servers", - interfaceToVariableSwallowError(name_servers)) + interfaceToVariableSwallowError([]interface{}{name_servers[:4], name_servers[4:]})) // Exactly 1 element testInterpolate(t, i, scope, "aws_route53_zone.terra.0.listeners", @@ -461,7 +461,7 @@ func TestInterpolator_resourceMultiAttributesWithResourceCount(t *testing.T) { // Exactly 1 element in both testInterpolate(t, i, scope, "aws_route53_zone.terra.*.listeners", - interfaceToVariableSwallowError([]interface{}{"red", "blue"})) + interfaceToVariableSwallowError([]interface{}{[]interface{}{"red"}, []interface{}{"blue"}})) // Zero elements testInterpolate(t, i, scope, "aws_route53_zone.terra.0.nothing", @@ -469,7 +469,7 @@ func TestInterpolator_resourceMultiAttributesWithResourceCount(t *testing.T) { // Zero + 1 element testInterpolate(t, i, scope, "aws_route53_zone.terra.*.special", - interfaceToVariableSwallowError([]interface{}{"extra"})) + interfaceToVariableSwallowError([]interface{}{[]interface{}{"extra"}})) // Maps still need to work testInterpolate(t, i, scope, "aws_route53_zone.terra.0.tags.Name", ast.Variable{ From f5ef8f36c2ae0fe2fb56729f3336ec2cb8ef3baf Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Tue, 12 Jul 2016 00:02:47 +0100 Subject: [PATCH 0221/1238] provider/aws: Allow import of efs_file_system --- .../aws/import_aws_efs_file_system_test.go | 29 +++++++++++++++++++ .../aws/resource_aws_efs_file_system.go | 4 +++ 2 files changed, 33 insertions(+) create mode 100644 builtin/providers/aws/import_aws_efs_file_system_test.go diff --git a/builtin/providers/aws/import_aws_efs_file_system_test.go b/builtin/providers/aws/import_aws_efs_file_system_test.go new file mode 100644 index 000000000..b40e7f496 --- /dev/null +++ b/builtin/providers/aws/import_aws_efs_file_system_test.go @@ -0,0 +1,29 @@ +package aws + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAWSEFSFileSystem_importBasic(t *testing.T) { + resourceName := "aws_efs_file_system.foo-with-tags" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckEfsFileSystemDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSEFSFileSystemConfigWithTags, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"reference_name"}, + }, + }, + }) +} diff --git a/builtin/providers/aws/resource_aws_efs_file_system.go b/builtin/providers/aws/resource_aws_efs_file_system.go index 90a34143d..28f451756 100644 --- a/builtin/providers/aws/resource_aws_efs_file_system.go +++ b/builtin/providers/aws/resource_aws_efs_file_system.go @@ -19,6 +19,10 @@ func resourceAwsEfsFileSystem() *schema.Resource { Update: resourceAwsEfsFileSystemUpdate, Delete: resourceAwsEfsFileSystemDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ "reference_name": &schema.Schema{ Type: schema.TypeString, From 86c16612053aa4d063cacb15ae5bb01c14fbb505 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Tue, 12 Jul 2016 00:12:10 +0100 Subject: [PATCH 0222/1238] provider/aws: Allow import of efs_mount_target --- .../aws/import_aws_efs_mount_target_test.go | 28 +++++++++++++++++++ .../aws/resource_aws_efs_mount_target.go | 4 +++ 2 files changed, 32 insertions(+) create mode 100644 builtin/providers/aws/import_aws_efs_mount_target_test.go diff --git a/builtin/providers/aws/import_aws_efs_mount_target_test.go b/builtin/providers/aws/import_aws_efs_mount_target_test.go new file mode 100644 index 000000000..f27175f5c --- /dev/null +++ b/builtin/providers/aws/import_aws_efs_mount_target_test.go @@ -0,0 +1,28 @@ +package aws + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAWSEFSMountTarget_importBasic(t *testing.T) { + resourceName := "aws_efs_mount_target.alpha" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckEfsMountTargetDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSEFSMountTargetConfig, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/aws/resource_aws_efs_mount_target.go b/builtin/providers/aws/resource_aws_efs_mount_target.go index de15ac760..34597230e 100644 --- a/builtin/providers/aws/resource_aws_efs_mount_target.go +++ b/builtin/providers/aws/resource_aws_efs_mount_target.go @@ -20,6 +20,10 @@ func resourceAwsEfsMountTarget() *schema.Resource { Update: resourceAwsEfsMountTargetUpdate, Delete: resourceAwsEfsMountTargetDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ "file_system_id": &schema.Schema{ Type: schema.TypeString, From 8cb2a482a851cab954a76e39cbe7ff3ff6ae9c47 Mon Sep 17 00:00:00 2001 From: Clint Date: Mon, 11 Jul 2016 17:23:09 -0600 Subject: [PATCH 0223/1238] provider/triton: Update base packages in tests (#7593) --- builtin/providers/triton/resource_machine_test.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/builtin/providers/triton/resource_machine_test.go b/builtin/providers/triton/resource_machine_test.go index dd9ba7a00..78f3ec290 100644 --- a/builtin/providers/triton/resource_machine_test.go +++ b/builtin/providers/triton/resource_machine_test.go @@ -294,7 +294,7 @@ provider "triton" { resource "triton_machine" "test" { name = "%s" - package = "g3-standard-0.25-smartos" + package = "g4-general-4G" image = "c20b4b7c-e1a6-11e5-9a4d-ef590901732e" tags = { @@ -310,7 +310,7 @@ provider "triton" { resource "triton_machine" "test" { name = "%s" - package = "g3-standard-0.25-smartos" + package = "g4-general-4G" image = "c20b4b7c-e1a6-11e5-9a4d-ef590901732e" firewall_enabled = 0 @@ -323,7 +323,7 @@ provider "triton" { resource "triton_machine" "test" { name = "%s" - package = "g3-standard-0.25-smartos" + package = "g4-general-4G" image = "c20b4b7c-e1a6-11e5-9a4d-ef590901732e" firewall_enabled = 1 @@ -337,7 +337,7 @@ provider "triton" { resource "triton_machine" "test" { name = "%s" - package = "g3-standard-0.25-smartos" + package = "g4-general-4G" image = "c20b4b7c-e1a6-11e5-9a4d-ef590901732e" user_data = "hello" @@ -364,7 +364,7 @@ resource "triton_fabric" "test" { resource "triton_machine" "test" { name = "%s" - package = "g3-standard-0.25-smartos" + package = "g4-general-4G" image = "842e6fa6-6e9b-11e5-8402-1b490459e334" tags = { @@ -391,7 +391,7 @@ resource "triton_fabric" "test" { resource "triton_machine" "test" { name = "%s" - package = "g3-standard-0.25-smartos" + package = "g4-general-4G" image = "842e6fa6-6e9b-11e5-8402-1b490459e334" tags = { From 4f3cc7e31444d5c0fad1a9c88a1577c1201e80f1 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Mon, 11 Jul 2016 17:24:27 -0600 Subject: [PATCH 0224/1238] deps: Update github.com/hashicorp/hcl --- .../hashicorp/hcl/hcl/scanner/scanner.go | 2 +- .../hashicorp/hcl/hcl/strconv/quote.go | 3 -- vendor/vendor.json | 48 +++++++++---------- 3 files changed, 25 insertions(+), 28 deletions(-) diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go index 174119a8d..b20416539 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go +++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go @@ -469,7 +469,7 @@ func (s *Scanner) scanString() { // read character after quote ch := s.next() - if ch == '\n' || ch < 0 || ch == eof { + if ch < 0 || ch == eof { s.err("literal not terminated") return } diff --git a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go index 74e232e15..956c8991c 100644 --- a/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go +++ b/vendor/github.com/hashicorp/hcl/hcl/strconv/quote.go @@ -27,9 +27,6 @@ func Unquote(s string) (t string, err error) { if quote != '"' { return "", ErrSyntax } - if contains(s, '\n') { - return "", ErrSyntax - } // Is it trivial? Avoid allocation. if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') { diff --git a/vendor/vendor.json b/vendor/vendor.json index 023364c2c..0c0f019e6 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -828,68 +828,68 @@ { "checksumSHA1": "ydHBPi04mEh+Tir+2JkpSIMckcw=", "path": "github.com/hashicorp/hcl", - "revision": "364df430845abef160a0bfb3a59979f746bf4956", - "revisionTime": "2016-07-08T14:13:38Z" + "revision": "d8c773c4cba11b11539e3d45f93daeaa5dcf1fa1", + "revisionTime": "2016-07-11T23:17:52Z" }, { "checksumSHA1": "IxyvRpCFeoJBGl2obLKJV7RCGjg=", "path": "github.com/hashicorp/hcl/hcl/ast", - "revision": "364df430845abef160a0bfb3a59979f746bf4956", - "revisionTime": "2016-07-08T14:13:38Z" + "revision": "d8c773c4cba11b11539e3d45f93daeaa5dcf1fa1", + "revisionTime": "2016-07-11T23:17:52Z" }, { "checksumSHA1": "5HVecyfmcTm6OTffEi6LGayQf5M=", "path": "github.com/hashicorp/hcl/hcl/fmtcmd", - "revision": "364df430845abef160a0bfb3a59979f746bf4956", - "revisionTime": "2016-07-08T14:13:38Z" + "revision": "d8c773c4cba11b11539e3d45f93daeaa5dcf1fa1", + "revisionTime": "2016-07-11T23:17:52Z" }, { "checksumSHA1": "l2oQxBsZRwn6eZjf+whXr8c9+8c=", "path": "github.com/hashicorp/hcl/hcl/parser", - "revision": "364df430845abef160a0bfb3a59979f746bf4956", - "revisionTime": "2016-07-08T14:13:38Z" + "revision": "d8c773c4cba11b11539e3d45f93daeaa5dcf1fa1", + "revisionTime": "2016-07-11T23:17:52Z" }, { "checksumSHA1": "CSmwxPOTz7GSpnWPF9aGkbVeR64=", "path": "github.com/hashicorp/hcl/hcl/printer", - "revision": "364df430845abef160a0bfb3a59979f746bf4956", - "revisionTime": "2016-07-08T14:13:38Z" + "revision": "d8c773c4cba11b11539e3d45f93daeaa5dcf1fa1", + "revisionTime": "2016-07-11T23:17:52Z" }, { - "checksumSHA1": "FHZ1IXjWHUyuMjy/wQChE4pSoPg=", + "checksumSHA1": "vjhDQVlgHhdxml1V8/cj0vOe+j8=", "path": "github.com/hashicorp/hcl/hcl/scanner", - "revision": "364df430845abef160a0bfb3a59979f746bf4956", - "revisionTime": "2016-07-08T14:13:38Z" + "revision": "d8c773c4cba11b11539e3d45f93daeaa5dcf1fa1", + "revisionTime": "2016-07-11T23:17:52Z" }, { - "checksumSHA1": "riN5acfVDm4j6LhWXauqiWH5n84=", + "checksumSHA1": "JlZmnzqdmFFyb1+2afLyR3BOE/8=", "path": "github.com/hashicorp/hcl/hcl/strconv", - "revision": "364df430845abef160a0bfb3a59979f746bf4956", - "revisionTime": "2016-07-08T14:13:38Z" + "revision": "d8c773c4cba11b11539e3d45f93daeaa5dcf1fa1", + "revisionTime": "2016-07-11T23:17:52Z" }, { "checksumSHA1": "c6yprzj06ASwCo18TtbbNNBHljA=", "path": "github.com/hashicorp/hcl/hcl/token", - "revision": "364df430845abef160a0bfb3a59979f746bf4956", - "revisionTime": "2016-07-08T14:13:38Z" + "revision": "d8c773c4cba11b11539e3d45f93daeaa5dcf1fa1", + "revisionTime": "2016-07-11T23:17:52Z" }, { "checksumSHA1": "jQ45CCc1ed/nlV7bbSnx6z72q1M=", "path": "github.com/hashicorp/hcl/json/parser", - "revision": "364df430845abef160a0bfb3a59979f746bf4956", - "revisionTime": "2016-07-08T14:13:38Z" + "revision": "d8c773c4cba11b11539e3d45f93daeaa5dcf1fa1", + "revisionTime": "2016-07-11T23:17:52Z" }, { "checksumSHA1": "S1e0F9ZKSnqgOLfjDTYazRL28tA=", "path": "github.com/hashicorp/hcl/json/scanner", - "revision": "364df430845abef160a0bfb3a59979f746bf4956", - "revisionTime": "2016-07-08T14:13:38Z" + "revision": "d8c773c4cba11b11539e3d45f93daeaa5dcf1fa1", + "revisionTime": "2016-07-11T23:17:52Z" }, { "checksumSHA1": "fNlXQCQEnb+B3k5UDL/r15xtSJY=", "path": "github.com/hashicorp/hcl/json/token", - "revision": "364df430845abef160a0bfb3a59979f746bf4956", - "revisionTime": "2016-07-08T14:13:38Z" + "revision": "d8c773c4cba11b11539e3d45f93daeaa5dcf1fa1", + "revisionTime": "2016-07-11T23:17:52Z" }, { "checksumSHA1": "o5JhQCQpoSRFcMwD8LxqP8iJ04o=", From 1a999926fef5a502d9d8c43dcc42fa8d91526a25 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Mon, 11 Jul 2016 17:28:11 -0600 Subject: [PATCH 0225/1238] deps: Update github.com/hashicorp/hil --- vendor/github.com/hashicorp/hil/convert.go | 2 +- vendor/vendor.json | 10 +++++----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/vendor/github.com/hashicorp/hil/convert.go b/vendor/github.com/hashicorp/hil/convert.go index b7bff7544..3841d1fb3 100644 --- a/vendor/github.com/hashicorp/hil/convert.go +++ b/vendor/github.com/hashicorp/hil/convert.go @@ -144,5 +144,5 @@ func VariableToInterface(input ast.Variable) (interface{}, error) { return result, nil } - return nil, fmt.Errorf("Find") + return nil, fmt.Errorf("unknown input type: %s", input.Type) } diff --git a/vendor/vendor.json b/vendor/vendor.json index 0c0f019e6..a4347ea1b 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -892,16 +892,16 @@ "revisionTime": "2016-07-11T23:17:52Z" }, { - "checksumSHA1": "o5JhQCQpoSRFcMwD8LxqP8iJ04o=", + "checksumSHA1": "kqCMCHy2b+RBMKC+ER+OPqp8C3E=", "path": "github.com/hashicorp/hil", - "revision": "79fc9230647576201673b35c724c58ec034bd21d", - "revisionTime": "2016-07-11T16:29:56Z" + "revision": "1e86c6b523c55d1fa6c6e930ce80b548664c95c2", + "revisionTime": "2016-07-11T23:18:37Z" }, { "checksumSHA1": "UICubs001+Q4MsUf9zl2vcMzWQQ=", "path": "github.com/hashicorp/hil/ast", - "revision": "7130f7330953adacbfb4ca0ad4b14b806bce3762", - "revisionTime": "2016-06-12T11:49:46Z" + "revision": "1e86c6b523c55d1fa6c6e930ce80b548664c95c2", + "revisionTime": "2016-07-11T23:18:37Z" }, { "path": "github.com/hashicorp/logutils", From 40e05ecc33825b37c2c76fbbe63a69fd7d7eeb93 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Tue, 12 Jul 2016 08:49:26 +0100 Subject: [PATCH 0226/1238] provider/aws: Allow import of glacier_vault (#7596) --- .../aws/import_aws_glacier_vault_test.go | 28 +++++++++++++++++++ .../aws/resource_aws_glacier_vault.go | 22 ++++++++++++++- 2 files changed, 49 insertions(+), 1 deletion(-) create mode 100644 builtin/providers/aws/import_aws_glacier_vault_test.go diff --git a/builtin/providers/aws/import_aws_glacier_vault_test.go b/builtin/providers/aws/import_aws_glacier_vault_test.go new file mode 100644 index 000000000..e5fd5aa5b --- /dev/null +++ b/builtin/providers/aws/import_aws_glacier_vault_test.go @@ -0,0 +1,28 @@ +package aws + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAWSGlacierVault_importBasic(t *testing.T) { + resourceName := "aws_glacier_vault.full" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckGlacierVaultDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccGlacierVault_full, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/aws/resource_aws_glacier_vault.go b/builtin/providers/aws/resource_aws_glacier_vault.go index 21ac4d7cc..99332f6e0 100644 --- a/builtin/providers/aws/resource_aws_glacier_vault.go +++ b/builtin/providers/aws/resource_aws_glacier_vault.go @@ -1,6 +1,7 @@ package aws import ( + "errors" "fmt" "log" "regexp" @@ -19,6 +20,10 @@ func resourceAwsGlacierVault() *schema.Resource { Update: resourceAwsGlacierVaultUpdate, Delete: resourceAwsGlacierVaultDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ "name": &schema.Schema{ Type: schema.TypeString, @@ -130,7 +135,15 @@ func resourceAwsGlacierVaultRead(d *schema.ResourceData, meta interface{}) error return fmt.Errorf("Error reading Glacier Vault: %s", err.Error()) } - d.Set("arn", *out.VaultARN) + awsClient := meta.(*AWSClient) + d.Set("name", out.VaultName) + d.Set("arn", out.VaultARN) + + location, err := buildGlacierVaultLocation(awsClient.accountid, d.Id()) + if err != nil { + return err + } + d.Set("location", location) tags, err := getGlacierVaultTags(glacierconn, d.Id()) if err != nil { @@ -366,6 +379,13 @@ func glacierPointersToStringList(pointers []*string) []interface{} { return list } +func buildGlacierVaultLocation(accountId, vaultName string) (string, error) { + if accountId == "" { + return "", errors.New("AWS account ID unavailable - failed to construct Vault location") + } + return fmt.Sprintf("/" + accountId + "/vaults/" + vaultName), nil +} + func getGlacierVaultNotification(glacierconn *glacier.Glacier, vaultName string) ([]map[string]interface{}, error) { request := &glacier.GetVaultNotificationsInput{ VaultName: aws.String(vaultName), From 8fa75ea38322b897800b11bfb6347c579fae7def Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Tue, 12 Jul 2016 08:49:33 +0100 Subject: [PATCH 0227/1238] provider/aws: Allow import of KMS key (#7599) --- .../providers/aws/import_aws_kms_key_test.go | 29 +++++++++++++++++++ builtin/providers/aws/resource_aws_kms_key.go | 4 +++ 2 files changed, 33 insertions(+) create mode 100644 builtin/providers/aws/import_aws_kms_key_test.go diff --git a/builtin/providers/aws/import_aws_kms_key_test.go b/builtin/providers/aws/import_aws_kms_key_test.go new file mode 100644 index 000000000..ba809a5dd --- /dev/null +++ b/builtin/providers/aws/import_aws_kms_key_test.go @@ -0,0 +1,29 @@ +package aws + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAWSKMSKey_importBasic(t *testing.T) { + resourceName := "aws_kms_key.foo" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSKmsKeyDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSKmsKey, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"deletion_window_in_days"}, + }, + }, + }) +} diff --git a/builtin/providers/aws/resource_aws_kms_key.go b/builtin/providers/aws/resource_aws_kms_key.go index c096216ee..052dcadaa 100644 --- a/builtin/providers/aws/resource_aws_kms_key.go +++ b/builtin/providers/aws/resource_aws_kms_key.go @@ -19,6 +19,10 @@ func resourceAwsKmsKey() *schema.Resource { Update: resourceAwsKmsKeyUpdate, Delete: resourceAwsKmsKeyDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ "arn": &schema.Schema{ Type: schema.TypeString, From 71c694cc5f362cafeb91e583b5bb9e1db5d753d2 Mon Sep 17 00:00:00 2001 From: dkalleg Date: Tue, 12 Jul 2016 01:02:41 -0700 Subject: [PATCH 0228/1238] Additional SCSI controller types support (#7525) This allows the user to specify new controller types. Before when specifying 'scsi', govmomi defaults to lsilogic-parallel. This patch allows the user to now specify 'scsi-lsi-parallel', 'scsi-buslogic', scsi-paravirtual', and 'scsi-lsi-sas'. Resolves issue https://github.com/hashicorp/terraform/issues/7202 --- .../resource_vsphere_virtual_machine.go | 71 ++++++++++++++++--- 1 file changed, 63 insertions(+), 8 deletions(-) diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go index 4998d63a4..cf480eaa0 100644 --- a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go +++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go @@ -26,6 +26,15 @@ var DefaultDNSServers = []string{ "8.8.4.4", } +var DiskControllerTypes = []string{ + "scsi", + "scsi-lsi-parallel", + "scsi-buslogic", + "scsi-paravirtual", + "scsi-lsi-sas", + "ide", +} + type networkInterface struct { deviceName string label string @@ -421,9 +430,15 @@ func resourceVSphereVirtualMachine() *schema.Resource { Default: "scsi", ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { value := v.(string) - if value != "scsi" && value != "ide" { + found := false + for _, t := range DiskControllerTypes { + if t == value { + found = true + } + } + if !found { errors = append(errors, fmt.Errorf( - "only 'scsi' and 'ide' are supported values for 'controller_type'")) + "Supported values for 'controller_type' are %v", strings.Join(DiskControllerTypes, ", "))) } return }, @@ -1160,8 +1175,24 @@ func addHardDisk(vm *object.VirtualMachine, size, iops int64, diskType string, d log.Printf("[DEBUG] vm devices: %#v\n", devices) var controller types.BaseVirtualController - controller, err = devices.FindDiskController(controller_type) - if err != nil { + switch controller_type { + case "scsi": + controller, err = devices.FindDiskController(controller_type) + case "scsi-lsi-parallel": + controller = devices.PickController(&types.VirtualLsiLogicController{}) + case "scsi-buslogic": + controller = devices.PickController(&types.VirtualBusLogicController{}) + case "scsi-paravirtual": + controller = devices.PickController(&types.ParaVirtualSCSIController{}) + case "scsi-lsi-sas": + controller = devices.PickController(&types.VirtualLsiLogicSASController{}) + case "ide": + controller, err = devices.FindDiskController(controller_type) + default: + return fmt.Errorf("[ERROR] Unsupported disk controller provided: %v", controller_type) + } + + if err != nil || controller == nil { log.Printf("[DEBUG] Couldn't find a %v controller. Creating one..", controller_type) var c types.BaseVirtualDevice @@ -1172,6 +1203,30 @@ func addHardDisk(vm *object.VirtualMachine, size, iops int64, diskType string, d if err != nil { return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err) } + case "scsi-lsi-parallel": + // Create scsi controller + c, err = devices.CreateSCSIController("lsilogic") + if err != nil { + return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err) + } + case "scsi-buslogic": + // Create scsi controller + c, err = devices.CreateSCSIController("buslogic") + if err != nil { + return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err) + } + case "scsi-paravirtual": + // Create scsi controller + c, err = devices.CreateSCSIController("pvscsi") + if err != nil { + return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err) + } + case "scsi-lsi-sas": + // Create scsi controller + c, err = devices.CreateSCSIController("lsilogic-sas") + if err != nil { + return fmt.Errorf("[ERROR] Failed creating SCSI controller: %v", err) + } case "ide": // Create ide controller c, err = devices.CreateIDEController() @@ -1188,10 +1243,10 @@ func addHardDisk(vm *object.VirtualMachine, size, iops int64, diskType string, d if err != nil { return err } - controller, err = devices.FindDiskController(controller_type) - if err != nil { - log.Printf("[ERROR] Could not find the new %v controller: %v", controller_type, err) - return err + controller = devices.PickController(c.(types.BaseVirtualController)) + if controller == nil { + log.Printf("[ERROR] Could not find the new %v controller", controller_type) + return fmt.Errorf("Could not find the new %v controller", controller_type) } } From 2f3f1daff478e03c44e09afd09e7c6c15cae5d79 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Tue, 12 Jul 2016 09:03:46 +0100 Subject: [PATCH 0229/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 317f14c42..a51f7c937 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -139,6 +139,7 @@ IMPROVEMENTS: * provider/vsphere: Virtual Machine and File resources handle Read errors properley [GH-7220] * provider/vsphere: set uuid as `vsphere_virtual_machine` output [GH-4382] * provider/vsphere: Add support for `keep_on_remove` to `vsphere_virtual_machine` [GH-7169] + * provider/vsphere: Add support for additional `vsphere_virtial_machine` SCSI controller types [GH-7525] * provisioner/file: File provisioners may now have file content set as an attribute [GH-7561] BUG FIXES: From 9cbdc809378750097a77a70953fd5e6163361c2e Mon Sep 17 00:00:00 2001 From: dkalleg Date: Tue, 12 Jul 2016 01:05:02 -0700 Subject: [PATCH 0230/1238] vSphere Provider - Fix destroy when vm is powered off or has networks (#7206) This patch adds a wait when powering on a vm so setupVirtualMachine does not return until the vm is actually powered on. This allows other functions to work off the assumption that the current state of the vm is not in flux. During resourceVSphereVirtualMachineRead(), the wait for IP would cause a hang for any VM with no network interfaces or for vms that had been powered off for any reason. This also means that the user could not delete a vm with no network interfaces or that is powered off. Checking power state before trying to check for network interfaces. Resolves https://github.com/hashicorp/terraform/issues/7168 --- .../resource_vsphere_virtual_machine.go | 38 +++++++++++-------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go index cf480eaa0..a946517d7 100644 --- a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go +++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go @@ -638,12 +638,6 @@ func resourceVSphereVirtualMachineUpdate(d *schema.ResourceData, meta interface{ } } - ip, err := vm.WaitForIP(context.TODO()) - if err != nil { - return err - } - log.Printf("[DEBUG] ip address: %v", ip) - return resourceVSphereVirtualMachineRead(d, meta) } @@ -916,14 +910,20 @@ func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{}) return nil } - var mvm mo.VirtualMachine - - // wait for interfaces to appear - _, err = vm.WaitForNetIP(context.TODO(), true) + state, err := vm.PowerState(context.TODO()) if err != nil { return err } + if state == types.VirtualMachinePowerStatePoweredOn { + // wait for interfaces to appear + _, err = vm.WaitForNetIP(context.TODO(), true) + if err != nil { + return err + } + } + + var mvm mo.VirtualMachine collector := property.DefaultCollector(client.Client) if err := collector.RetrieveOne(context.TODO(), vm.Reference(), []string{"guest", "summary", "datastore", "config"}, &mvm); err != nil { return err @@ -1059,11 +1059,15 @@ func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Invalid network interfaces to set: %#v", networkInterfaces) } - log.Printf("[DEBUG] ip address: %v", networkInterfaces[0]["ipv4_address"].(string)) - d.SetConnInfo(map[string]string{ - "type": "ssh", - "host": networkInterfaces[0]["ipv4_address"].(string), - }) + if len(networkInterfaces) > 0 { + if _, ok := networkInterfaces[0]["ipv4_address"]; ok { + log.Printf("[DEBUG] ip address: %v", networkInterfaces[0]["ipv4_address"].(string)) + d.SetConnInfo(map[string]string{ + "type": "ssh", + "host": networkInterfaces[0]["ipv4_address"].(string), + }) + } + } var rootDatastore string for _, v := range mvm.Datastore { @@ -1989,6 +1993,10 @@ func (vm *virtualMachine) setupVirtualMachine(c *govmomi.Client) error { if vm.hasBootableVmdk || vm.template != "" { newVM.PowerOn(context.TODO()) + err = newVM.WaitForPowerState(context.TODO(), types.VirtualMachinePowerStatePoweredOn) + if err != nil { + return err + } } return nil } From c990625516892bb039010eeec8a28e7c236bd1d4 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Tue, 12 Jul 2016 09:06:02 +0100 Subject: [PATCH 0231/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a51f7c937..e6eb38250 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -212,6 +212,7 @@ BUG FIXES: * provider/vsphere: Fix bug where `enable_disk_uuid` was not set on `vsphere_virtual_machine` resources [GH-7275] * provider/vsphere: Make `vsphere_virtual_machine` `product_key` optional [GH-7410] * provider/vsphere: Refreshing devices list after adding a disk or cdrom controller [GH-7167] + * provider/vsphere: `vsphere_virtual_machine` no longer has to be powered on to delete [GH-7206] * provisioner/remote-exec: Properly seed random script paths so they are not deterministic across runs [GH-7413] ## 0.6.16 (May 9, 2016) From 303ba86cf6c4534a68914dd3ad226d67dad1a4e3 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Tue, 12 Jul 2016 09:07:25 +0100 Subject: [PATCH 0232/1238] provider/aws: Allow `port` on `aws_db_instance` to be updated (#7441) Fixes #2439 Port used to ForceNew, it has now been changed to allow it to be updated. 2 changes were needed: 1. Setting the port back to state 2. Adding the wait for state function to the Update func ``` make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSDBInstance_portUpdate' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSDBInstance_portUpdate -timeout 120m === RUN TestAccAWSDBInstance_portUpdate --- PASS: TestAccAWSDBInstance_portUpdate (699.84s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 699.861s ``` --- .../providers/aws/resource_aws_db_instance.go | 26 ++++++- .../aws/resource_aws_db_instance_test.go | 67 +++++++++++++++++++ 2 files changed, 92 insertions(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_db_instance.go b/builtin/providers/aws/resource_aws_db_instance.go index 4b6c3985e..8dc6f115b 100644 --- a/builtin/providers/aws/resource_aws_db_instance.go +++ b/builtin/providers/aws/resource_aws_db_instance.go @@ -160,7 +160,6 @@ func resourceAwsDbInstance() *schema.Resource { Type: schema.TypeInt, Optional: true, Computed: true, - ForceNew: true, }, "publicly_accessible": &schema.Schema{ @@ -654,6 +653,7 @@ func resourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error { d.Set("publicly_accessible", v.PubliclyAccessible) d.Set("multi_az", v.MultiAZ) d.Set("kms_key_id", v.KmsKeyId) + d.Set("port", v.DbInstancePort) if v.DBSubnetGroup != nil { d.Set("db_subnet_group_name", v.DBSubnetGroup.DBSubnetGroupName) } @@ -923,6 +923,12 @@ func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error requestUpdate = true } + if d.HasChange("port") { + d.SetPartial("port") + req.DBPortNumber = aws.Int64(int64(d.Get("port").(int))) + requestUpdate = true + } + log.Printf("[DEBUG] Send DB Instance Modification request: %t", requestUpdate) if requestUpdate { log.Printf("[DEBUG] DB Instance Modification request: %s", req) @@ -930,6 +936,24 @@ func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error if err != nil { return fmt.Errorf("Error modifying DB Instance %s: %s", d.Id(), err) } + + log.Println("[INFO] Waiting for DB Instance to be available") + + stateConf := &resource.StateChangeConf{ + Pending: []string{"creating", "backing-up", "modifying", "resetting-master-credentials", + "maintenance", "renaming", "rebooting", "upgrading"}, + Target: []string{"available"}, + Refresh: resourceAwsDbInstanceStateRefreshFunc(d, meta), + Timeout: 80 * time.Minute, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, // Wait 30 secs before starting + } + + // Wait, catching any errors + _, dbStateErr := stateConf.WaitForState() + if dbStateErr != nil { + return dbStateErr + } } // separate request to promote a database diff --git a/builtin/providers/aws/resource_aws_db_instance_test.go b/builtin/providers/aws/resource_aws_db_instance_test.go index 5db77b14c..36bf6e508 100644 --- a/builtin/providers/aws/resource_aws_db_instance_test.go +++ b/builtin/providers/aws/resource_aws_db_instance_test.go @@ -213,6 +213,37 @@ func TestAccAWSDBInstance_iops_update(t *testing.T) { }) } +func TestAccAWSDBInstance_portUpdate(t *testing.T) { + var v rds.DBInstance + + rName := acctest.RandString(5) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSDBInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccSnapshotInstanceConfig_mysqlPort(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSDBInstanceExists("aws_db_instance.bar", &v), + resource.TestCheckResourceAttr( + "aws_db_instance.bar", "port", "3306"), + ), + }, + + resource.TestStep{ + Config: testAccSnapshotInstanceConfig_updateMysqlPort(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSDBInstanceExists("aws_db_instance.bar", &v), + resource.TestCheckResourceAttr( + "aws_db_instance.bar", "port", "3305"), + ), + }, + }, + }) +} + func testAccCheckAWSDBInstanceDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).rdsconn @@ -701,3 +732,39 @@ resource "aws_db_instance" "bar" { iops = %d }`, rName, iops) } + +func testAccSnapshotInstanceConfig_mysqlPort(rName string) string { + return fmt.Sprintf(` +resource "aws_db_instance" "bar" { + identifier = "mydb-rds-%s" + engine = "mysql" + engine_version = "5.6.23" + instance_class = "db.t2.micro" + name = "mydb" + username = "foo" + password = "barbarbar" + parameter_group_name = "default.mysql5.6" + port = 3306 + allocated_storage = 10 + + apply_immediately = true +}`, rName) +} + +func testAccSnapshotInstanceConfig_updateMysqlPort(rName string) string { + return fmt.Sprintf(` +resource "aws_db_instance" "bar" { + identifier = "mydb-rds-%s" + engine = "mysql" + engine_version = "5.6.23" + instance_class = "db.t2.micro" + name = "mydb" + username = "foo" + password = "barbarbar" + parameter_group_name = "default.mysql5.6" + port = 3305 + allocated_storage = 10 + + apply_immediately = true +}`, rName) +} From 69f3fb803a25eed8f5782502e0115e85e9c90006 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Tue, 12 Jul 2016 09:07:58 +0100 Subject: [PATCH 0233/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e6eb38250..eb809aeb2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -103,6 +103,7 @@ IMPROVEMENTS: * provider/aws: Add support for `encryption` and `kms_key_id` to `aws_ami` [GH-7181] * provider/aws: AWS prefix lists to enable security group egress to a VPC Endpoint [GH-7511] * provider/aws: Retry creation of IAM role depending on new IAM user [GH-7324] + * provider/aws: Allow `port` on `aws_db_instance` to be updated [GH-7441] * provider/azurerm: Add support for EnableIPForwarding to `azurerm_network_interface` [GH-6807] * provider/azurerm: Add support for exporting the `azurerm_storage_account` access keys [GH-6742] * provider/azurerm: The Azure SDK now exposes better error messages [GH-6976] From 8b80d05103b2a19d70de31165347a985db0b8456 Mon Sep 17 00:00:00 2001 From: Joe Topjian Date: Tue, 12 Jul 2016 02:13:52 -0600 Subject: [PATCH 0234/1238] provider/openstack: Support Import of OpenStack Block Storage Volumes (#7347) --- ...t_openstack_blockstorage_volume_v1_test.go | 29 ++++++++++++++ ...t_openstack_blockstorage_volume_v2_test.go | 29 ++++++++++++++ ...source_openstack_blockstorage_volume_v1.go | 21 +++++----- ...e_openstack_blockstorage_volume_v1_test.go | 8 +--- ...source_openstack_blockstorage_volume_v2.go | 21 +++++----- ...e_openstack_blockstorage_volume_v2_test.go | 40 +++++++++---------- 6 files changed, 102 insertions(+), 46 deletions(-) create mode 100644 builtin/providers/openstack/import_openstack_blockstorage_volume_v1_test.go create mode 100644 builtin/providers/openstack/import_openstack_blockstorage_volume_v2_test.go diff --git a/builtin/providers/openstack/import_openstack_blockstorage_volume_v1_test.go b/builtin/providers/openstack/import_openstack_blockstorage_volume_v1_test.go new file mode 100644 index 000000000..b5539f2cb --- /dev/null +++ b/builtin/providers/openstack/import_openstack_blockstorage_volume_v1_test.go @@ -0,0 +1,29 @@ +package openstack + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOpenStackBlockStorageV1Volume_importBasic(t *testing.T) { + resourceName := "openstack_blockstorage_volume_v1.volume_1" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckBlockStorageV1VolumeDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccBlockStorageV1Volume_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"region"}, + }, + }, + }) +} diff --git a/builtin/providers/openstack/import_openstack_blockstorage_volume_v2_test.go b/builtin/providers/openstack/import_openstack_blockstorage_volume_v2_test.go new file mode 100644 index 000000000..37489a820 --- /dev/null +++ b/builtin/providers/openstack/import_openstack_blockstorage_volume_v2_test.go @@ -0,0 +1,29 @@ +package openstack + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOpenStackBlockStorageV2Volume_importBasic(t *testing.T) { + resourceName := "openstack_blockstorage_volume_v2.volume_1" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckBlockStorageV2VolumeDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccBlockStorageV2Volume_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"region"}, + }, + }, + }) +} diff --git a/builtin/providers/openstack/resource_openstack_blockstorage_volume_v1.go b/builtin/providers/openstack/resource_openstack_blockstorage_volume_v1.go index 05328fa18..fca73f216 100644 --- a/builtin/providers/openstack/resource_openstack_blockstorage_volume_v1.go +++ b/builtin/providers/openstack/resource_openstack_blockstorage_volume_v1.go @@ -20,6 +20,9 @@ func resourceBlockStorageVolumeV1() *schema.Resource { Read: resourceBlockStorageVolumeV1Read, Update: resourceBlockStorageVolumeV1Update, Delete: resourceBlockStorageVolumeV1Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "region": &schema.Schema{ @@ -177,17 +180,15 @@ func resourceBlockStorageVolumeV1Read(d *schema.ResourceData, meta interface{}) d.Set("volume_type", v.VolumeType) d.Set("metadata", v.Metadata) - if len(v.Attachments) > 0 { - attachments := make([]map[string]interface{}, len(v.Attachments)) - for i, attachment := range v.Attachments { - attachments[i] = make(map[string]interface{}) - attachments[i]["id"] = attachment["id"] - attachments[i]["instance_id"] = attachment["server_id"] - attachments[i]["device"] = attachment["device"] - log.Printf("[DEBUG] attachment: %v", attachment) - } - d.Set("attachment", attachments) + attachments := make([]map[string]interface{}, len(v.Attachments)) + for i, attachment := range v.Attachments { + attachments[i] = make(map[string]interface{}) + attachments[i]["id"] = attachment["id"] + attachments[i]["instance_id"] = attachment["server_id"] + attachments[i]["device"] = attachment["device"] + log.Printf("[DEBUG] attachment: %v", attachment) } + d.Set("attachment", attachments) return nil } diff --git a/builtin/providers/openstack/resource_openstack_blockstorage_volume_v1_test.go b/builtin/providers/openstack/resource_openstack_blockstorage_volume_v1_test.go index 3900e2331..cae663c10 100644 --- a/builtin/providers/openstack/resource_openstack_blockstorage_volume_v1_test.go +++ b/builtin/providers/openstack/resource_openstack_blockstorage_volume_v1_test.go @@ -156,24 +156,20 @@ func testAccCheckBlockStorageV1VolumeMetadata( var testAccBlockStorageV1Volume_basic = fmt.Sprintf(` resource "openstack_blockstorage_volume_v1" "volume_1" { - region = "%s" name = "tf-test-volume" description = "first test volume" metadata{ foo = "bar" } size = 1 - }`, - OS_REGION_NAME) + }`) var testAccBlockStorageV1Volume_update = fmt.Sprintf(` resource "openstack_blockstorage_volume_v1" "volume_1" { - region = "%s" name = "tf-test-volume-updated" description = "first test volume" metadata{ foo = "bar" } size = 1 - }`, - OS_REGION_NAME) + }`) diff --git a/builtin/providers/openstack/resource_openstack_blockstorage_volume_v2.go b/builtin/providers/openstack/resource_openstack_blockstorage_volume_v2.go index 75dcc0a4d..d2d2a559d 100644 --- a/builtin/providers/openstack/resource_openstack_blockstorage_volume_v2.go +++ b/builtin/providers/openstack/resource_openstack_blockstorage_volume_v2.go @@ -20,6 +20,9 @@ func resourceBlockStorageVolumeV2() *schema.Resource { Read: resourceBlockStorageVolumeV2Read, Update: resourceBlockStorageVolumeV2Update, Delete: resourceBlockStorageVolumeV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "region": &schema.Schema{ @@ -189,17 +192,15 @@ func resourceBlockStorageVolumeV2Read(d *schema.ResourceData, meta interface{}) d.Set("volume_type", v.VolumeType) d.Set("metadata", v.Metadata) - if len(v.Attachments) > 0 { - attachments := make([]map[string]interface{}, len(v.Attachments)) - for i, attachment := range v.Attachments { - attachments[i] = make(map[string]interface{}) - attachments[i]["id"] = attachment["id"] - attachments[i]["instance_id"] = attachment["server_id"] - attachments[i]["device"] = attachment["device"] - log.Printf("[DEBUG] attachment: %v", attachment) - } - d.Set("attachment", attachments) + attachments := make([]map[string]interface{}, len(v.Attachments)) + for i, attachment := range v.Attachments { + attachments[i] = make(map[string]interface{}) + attachments[i]["id"] = attachment["id"] + attachments[i]["instance_id"] = attachment["server_id"] + attachments[i]["device"] = attachment["device"] + log.Printf("[DEBUG] attachment: %v", attachment) } + d.Set("attachment", attachments) return nil } diff --git a/builtin/providers/openstack/resource_openstack_blockstorage_volume_v2_test.go b/builtin/providers/openstack/resource_openstack_blockstorage_volume_v2_test.go index 7600a6527..914ff4688 100644 --- a/builtin/providers/openstack/resource_openstack_blockstorage_volume_v2_test.go +++ b/builtin/providers/openstack/resource_openstack_blockstorage_volume_v2_test.go @@ -15,26 +15,6 @@ import ( func TestAccBlockStorageV2Volume_basic(t *testing.T) { var volume volumes.Volume - var testAccBlockStorageV2Volume_basic = fmt.Sprintf(` - resource "openstack_blockstorage_volume_v2" "volume_1" { - name = "volume_1" - description = "first test volume" - metadata { - foo = "bar" - } - size = 1 - }`) - - var testAccBlockStorageV2Volume_update = fmt.Sprintf(` - resource "openstack_blockstorage_volume_v2" "volume_1" { - name = "volume_1-updated" - description = "first test volume" - metadata { - foo = "bar" - } - size = 1 - }`) - resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -186,3 +166,23 @@ func testAccCheckBlockStorageV2VolumeMetadata( return fmt.Errorf("Metadata not found: %s", k) } } + +var testAccBlockStorageV2Volume_basic = fmt.Sprintf(` + resource "openstack_blockstorage_volume_v2" "volume_1" { + name = "volume_1" + description = "first test volume" + metadata { + foo = "bar" + } + size = 1 + }`) + +var testAccBlockStorageV2Volume_update = fmt.Sprintf(` + resource "openstack_blockstorage_volume_v2" "volume_1" { + name = "volume_1-updated" + description = "first test volume" + metadata { + foo = "bar" + } + size = 1 + }`) From 32abd937f15983d69017a2df93e0862ae73854df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martin=20H=C3=A4ger?= Date: Tue, 12 Jul 2016 13:55:58 +0200 Subject: [PATCH 0235/1238] SimpleDB domain resource (#7600) --- builtin/providers/aws/config.go | 5 + builtin/providers/aws/provider.go | 1 + .../aws/resource_aws_simpledb_domain.go | 81 + .../aws/resource_aws_simpledb_domain_test.go | 80 + .../aws/aws-sdk-go/private/signer/v2/v2.go | 180 ++ .../aws/aws-sdk-go/service/simpledb/api.go | 1528 +++++++++++++++++ .../service/simpledb/customizations.go | 11 + .../aws-sdk-go/service/simpledb/service.go | 102 ++ .../service/simpledb/unmarshall_error.go | 53 + vendor/vendor.json | 12 + .../aws/r/simpledb_domain.html.markdown | 31 + website/source/layouts/aws.erb | 12 + 12 files changed, 2096 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_simpledb_domain.go create mode 100644 builtin/providers/aws/resource_aws_simpledb_domain_test.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/signer/v2/v2.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/simpledb/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/simpledb/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/simpledb/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/simpledb/unmarshall_error.go create mode 100644 website/source/docs/providers/aws/r/simpledb_domain.html.markdown diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go index 1373d7c40..ab50b8ae2 100644 --- a/builtin/providers/aws/config.go +++ b/builtin/providers/aws/config.go @@ -51,6 +51,7 @@ import ( "github.com/aws/aws-sdk-go/service/route53" "github.com/aws/aws-sdk-go/service/s3" "github.com/aws/aws-sdk-go/service/ses" + "github.com/aws/aws-sdk-go/service/simpledb" "github.com/aws/aws-sdk-go/service/sns" "github.com/aws/aws-sdk-go/service/sqs" "github.com/aws/aws-sdk-go/service/sts" @@ -96,6 +97,7 @@ type AWSClient struct { autoscalingconn *autoscaling.AutoScaling s3conn *s3.S3 sesConn *ses.SES + simpledbconn *simpledb.SimpleDB sqsconn *sqs.SQS snsconn *sns.SNS stsconn *sts.STS @@ -219,6 +221,9 @@ func (c *Config) Client() (interface{}, error) { log.Println("[INFO] Initializing SES connection") client.sesConn = ses.New(sess) + log.Println("[INFO] Initializing SimpleDB connection") + client.simpledbconn = simpledb.New(sess) + log.Println("[INFO] Initializing SQS connection") client.sqsconn = sqs.New(sess) diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index 48d7c4077..6ae9d5320 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -260,6 +260,7 @@ func Provider() terraform.ResourceProvider { "aws_s3_bucket_notification": resourceAwsS3BucketNotification(), "aws_security_group": resourceAwsSecurityGroup(), "aws_security_group_rule": resourceAwsSecurityGroupRule(), + "aws_simpledb_domain": resourceAwsSimpleDBDomain(), "aws_spot_instance_request": resourceAwsSpotInstanceRequest(), "aws_spot_fleet_request": resourceAwsSpotFleetRequest(), "aws_sqs_queue": resourceAwsSqsQueue(), diff --git a/builtin/providers/aws/resource_aws_simpledb_domain.go b/builtin/providers/aws/resource_aws_simpledb_domain.go new file mode 100644 index 000000000..645908c9b --- /dev/null +++ b/builtin/providers/aws/resource_aws_simpledb_domain.go @@ -0,0 +1,81 @@ +package aws + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/simpledb" +) + +func resourceAwsSimpleDBDomain() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsSimpleDBDomainCreate, + Read: resourceAwsSimpleDBDomainRead, + Delete: resourceAwsSimpleDBDomainDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsSimpleDBDomainCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).simpledbconn + + name := d.Get("name").(string) + input := &simpledb.CreateDomainInput{ + DomainName: aws.String(name), + } + _, err := conn.CreateDomain(input) + if err != nil { + return fmt.Errorf("Create SimpleDB Domain failed: %s", err) + } + + d.SetId(name) + return nil +} + +func resourceAwsSimpleDBDomainRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).simpledbconn + + input := &simpledb.DomainMetadataInput{ + DomainName: aws.String(d.Id()), + } + _, err := conn.DomainMetadata(input) + if awsErr, ok := err.(awserr.Error); ok { + if awsErr.Code() == "NoSuchDomain" { + log.Printf("[WARN] Removing SimpleDB domain %q because it's gone.", d.Id()) + d.SetId("") + return nil + } + } + if err != nil { + return err + } + + d.Set("name", d.Id()) + return nil +} + +func resourceAwsSimpleDBDomainDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).simpledbconn + + input := &simpledb.DeleteDomainInput{ + DomainName: aws.String(d.Id()), + } + _, err := conn.DeleteDomain(input) + if err != nil { + return fmt.Errorf("Delete SimpleDB Domain failed: %s", err) + } + + d.SetId("") + return nil +} diff --git a/builtin/providers/aws/resource_aws_simpledb_domain_test.go b/builtin/providers/aws/resource_aws_simpledb_domain_test.go new file mode 100644 index 000000000..0e4ee2e8e --- /dev/null +++ b/builtin/providers/aws/resource_aws_simpledb_domain_test.go @@ -0,0 +1,80 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/simpledb" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSSimpleDBDomain_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSimpleDBDomainDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSSimpleDBDomainConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSimpleDBDomainExists("aws_simpledb_domain.test_domain"), + ), + }, + }, + }) +} + +func testAccCheckAWSSimpleDBDomainDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).simpledbconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_simpledb_domain" { + continue + } + + input := &simpledb.DomainMetadataInput{ + DomainName: aws.String(rs.Primary.ID), + } + _, err := conn.DomainMetadata(input) + if err == nil { + return fmt.Errorf("Domain exists when it should be destroyed!") + } + + // Verify the error is an API error, not something else + _, ok := err.(awserr.Error) + if !ok { + return err + } + } + + return nil +} + +func testAccCheckAWSSimpleDBDomainExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No SimpleDB domain with that name exists") + } + + conn := testAccProvider.Meta().(*AWSClient).simpledbconn + input := &simpledb.DomainMetadataInput{ + DomainName: aws.String(rs.Primary.ID), + } + _, err := conn.DomainMetadata(input) + return err + } +} + +var testAccAWSSimpleDBDomainConfig = ` +resource "aws_simpledb_domain" "test_domain" { + name = "terraform-test-domain" +} +` diff --git a/vendor/github.com/aws/aws-sdk-go/private/signer/v2/v2.go b/vendor/github.com/aws/aws-sdk-go/private/signer/v2/v2.go new file mode 100644 index 000000000..88c3a2f61 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/signer/v2/v2.go @@ -0,0 +1,180 @@ +package v2 + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/base64" + "errors" + "fmt" + "net/http" + "net/url" + "sort" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +var ( + errInvalidMethod = errors.New("v2 signer only handles HTTP POST") +) + +const ( + signatureVersion = "2" + signatureMethod = "HmacSHA256" + timeFormat = "2006-01-02T15:04:05Z" +) + +type signer struct { + // Values that must be populated from the request + Request *http.Request + Time time.Time + Credentials *credentials.Credentials + Debug aws.LogLevelType + Logger aws.Logger + + Query url.Values + stringToSign string + signature string +} + +// SignRequestHandler is a named request handler the SDK will use to sign +// service client request with using the V4 signature. +var SignRequestHandler = request.NamedHandler{ + Name: "v2.SignRequestHandler", Fn: SignSDKRequest, +} + +// SignSDKRequest requests with signature version 2. +// +// Will sign the requests with the service config's Credentials object +// Signing is skipped if the credentials is the credentials.AnonymousCredentials +// object. +func SignSDKRequest(req *request.Request) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + if req.HTTPRequest.Method != "POST" && req.HTTPRequest.Method != "GET" { + // The V2 signer only supports GET and POST + req.Error = errInvalidMethod + return + } + + v2 := signer{ + Request: req.HTTPRequest, + Time: req.Time, + Credentials: req.Config.Credentials, + Debug: req.Config.LogLevel.Value(), + Logger: req.Config.Logger, + } + + req.Error = v2.Sign() + + if req.Error != nil { + return + } + + if req.HTTPRequest.Method == "POST" { + // Set the body of the request based on the modified query parameters + req.SetStringBody(v2.Query.Encode()) + + // Now that the body has changed, remove any Content-Length header, + // because it will be incorrect + req.HTTPRequest.ContentLength = 0 + req.HTTPRequest.Header.Del("Content-Length") + } else { + req.HTTPRequest.URL.RawQuery = v2.Query.Encode() + } +} + +func (v2 *signer) Sign() error { + credValue, err := v2.Credentials.Get() + if err != nil { + return err + } + + if v2.Request.Method == "POST" { + // Parse the HTTP request to obtain the query parameters that will + // be used to build the string to sign. Note that because the HTTP + // request will need to be modified, the PostForm and Form properties + // are reset to nil after parsing. + v2.Request.ParseForm() + v2.Query = v2.Request.PostForm + v2.Request.PostForm = nil + v2.Request.Form = nil + } else { + v2.Query = v2.Request.URL.Query() + } + + // Set new query parameters + v2.Query.Set("AWSAccessKeyId", credValue.AccessKeyID) + v2.Query.Set("SignatureVersion", signatureVersion) + v2.Query.Set("SignatureMethod", signatureMethod) + v2.Query.Set("Timestamp", v2.Time.UTC().Format(timeFormat)) + if credValue.SessionToken != "" { + v2.Query.Set("SecurityToken", credValue.SessionToken) + } + + // in case this is a retry, ensure no signature present + v2.Query.Del("Signature") + + method := v2.Request.Method + host := v2.Request.URL.Host + path := v2.Request.URL.Path + if path == "" { + path = "/" + } + + // obtain all of the query keys and sort them + queryKeys := make([]string, 0, len(v2.Query)) + for key := range v2.Query { + queryKeys = append(queryKeys, key) + } + sort.Strings(queryKeys) + + // build URL-encoded query keys and values + queryKeysAndValues := make([]string, len(queryKeys)) + for i, key := range queryKeys { + k := strings.Replace(url.QueryEscape(key), "+", "%20", -1) + v := strings.Replace(url.QueryEscape(v2.Query.Get(key)), "+", "%20", -1) + queryKeysAndValues[i] = k + "=" + v + } + + // join into one query string + query := strings.Join(queryKeysAndValues, "&") + + // build the canonical string for the V2 signature + v2.stringToSign = strings.Join([]string{ + method, + host, + path, + query, + }, "\n") + + hash := hmac.New(sha256.New, []byte(credValue.SecretAccessKey)) + hash.Write([]byte(v2.stringToSign)) + v2.signature = base64.StdEncoding.EncodeToString(hash.Sum(nil)) + v2.Query.Set("Signature", v2.signature) + + if v2.Debug.Matches(aws.LogDebugWithSigning) { + v2.logSigningInfo() + } + + return nil +} + +const logSignInfoMsg = `DEBUG: Request Signature: +---[ STRING TO SIGN ]-------------------------------- +%s +---[ SIGNATURE ]------------------------------------- +%s +-----------------------------------------------------` + +func (v2 *signer) logSigningInfo() { + msg := fmt.Sprintf(logSignInfoMsg, v2.stringToSign, v2.Query.Get("Signature")) + v2.Logger.Log(msg) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/simpledb/api.go b/vendor/github.com/aws/aws-sdk-go/service/simpledb/api.go new file mode 100644 index 000000000..c4d0e5cf8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/simpledb/api.go @@ -0,0 +1,1528 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +// Package simpledb provides a client for Amazon SimpleDB. +package simpledb + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +const opBatchDeleteAttributes = "BatchDeleteAttributes" + +// BatchDeleteAttributesRequest generates a "aws/request.Request" representing the +// client's request for the BatchDeleteAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the BatchDeleteAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the BatchDeleteAttributesRequest method. +// req, resp := client.BatchDeleteAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SimpleDB) BatchDeleteAttributesRequest(input *BatchDeleteAttributesInput) (req *request.Request, output *BatchDeleteAttributesOutput) { + op := &request.Operation{ + Name: opBatchDeleteAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchDeleteAttributesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &BatchDeleteAttributesOutput{} + req.Data = output + return +} + +// Performs multiple DeleteAttributes operations in a single call, which reduces +// round trips and latencies. This enables Amazon SimpleDB to optimize requests, +// which generally yields better throughput. +// +// If you specify BatchDeleteAttributes without attributes or values, all +// the attributes for the item are deleted. +// +// BatchDeleteAttributes is an idempotent operation; running it multiple times +// on the same item or attribute doesn't result in an error. +// +// The BatchDeleteAttributes operation succeeds or fails in its entirety. +// There are no partial deletes. You can execute multiple BatchDeleteAttributes +// operations and other operations in parallel. However, large numbers of concurrent +// BatchDeleteAttributes calls can result in Service Unavailable (503) responses. +// +// This operation is vulnerable to exceeding the maximum URL size when making +// a REST request using the HTTP GET method. +// +// This operation does not support conditions using Expected.X.Name, Expected.X.Value, +// or Expected.X.Exists. +// +// The following limitations are enforced for this operation: 1 MB request +// size 25 item limit per BatchDeleteAttributes operation +func (c *SimpleDB) BatchDeleteAttributes(input *BatchDeleteAttributesInput) (*BatchDeleteAttributesOutput, error) { + req, out := c.BatchDeleteAttributesRequest(input) + err := req.Send() + return out, err +} + +const opBatchPutAttributes = "BatchPutAttributes" + +// BatchPutAttributesRequest generates a "aws/request.Request" representing the +// client's request for the BatchPutAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the BatchPutAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the BatchPutAttributesRequest method. +// req, resp := client.BatchPutAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SimpleDB) BatchPutAttributesRequest(input *BatchPutAttributesInput) (req *request.Request, output *BatchPutAttributesOutput) { + op := &request.Operation{ + Name: opBatchPutAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchPutAttributesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &BatchPutAttributesOutput{} + req.Data = output + return +} + +// The BatchPutAttributes operation creates or replaces attributes within one +// or more items. By using this operation, the client can perform multiple PutAttribute +// operation with a single call. This helps yield savings in round trips and +// latencies, enabling Amazon SimpleDB to optimize requests and generally produce +// better throughput. +// +// The client may specify the item name with the Item.X.ItemName parameter. +// The client may specify new attributes using a combination of the Item.X.Attribute.Y.Name +// and Item.X.Attribute.Y.Value parameters. The client may specify the first +// attribute for the first item using the parameters Item.0.Attribute.0.Name +// and Item.0.Attribute.0.Value, and for the second attribute for the first +// item by the parameters Item.0.Attribute.1.Name and Item.0.Attribute.1.Value, +// and so on. +// +// Attributes are uniquely identified within an item by their name/value combination. +// For example, a single item can have the attributes { "first_name", "first_value" +// } and { "first_name", "second_value" }. However, it cannot have two attribute +// instances where both the Item.X.Attribute.Y.Name and Item.X.Attribute.Y.Value +// are the same. +// +// Optionally, the requester can supply the Replace parameter for each individual +// value. Setting this value to true will cause the new attribute values to +// replace the existing attribute values. For example, if an item I has the +// attributes { 'a', '1' }, { 'b', '2'} and { 'b', '3' } and the requester does +// a BatchPutAttributes of {'I', 'b', '4' } with the Replace parameter set to +// true, the final attributes of the item will be { 'a', '1' } and { 'b', '4' +// }, replacing the previous values of the 'b' attribute with the new value. +// +// You cannot specify an empty string as an item or as an attribute name. +// The BatchPutAttributes operation succeeds or fails in its entirety. There +// are no partial puts. This operation is vulnerable to exceeding the maximum +// URL size when making a REST request using the HTTP GET method. This operation +// does not support conditions using Expected.X.Name, Expected.X.Value, or Expected.X.Exists. +// You can execute multiple BatchPutAttributes operations and other operations +// in parallel. However, large numbers of concurrent BatchPutAttributes calls +// can result in Service Unavailable (503) responses. +// +// The following limitations are enforced for this operation: 256 attribute +// name-value pairs per item 1 MB request size 1 billion attributes per domain +// 10 GB of total user data storage per domain 25 item limit per BatchPutAttributes +// operation +func (c *SimpleDB) BatchPutAttributes(input *BatchPutAttributesInput) (*BatchPutAttributesOutput, error) { + req, out := c.BatchPutAttributesRequest(input) + err := req.Send() + return out, err +} + +const opCreateDomain = "CreateDomain" + +// CreateDomainRequest generates a "aws/request.Request" representing the +// client's request for the CreateDomain operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDomain method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDomainRequest method. +// req, resp := client.CreateDomainRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SimpleDB) CreateDomainRequest(input *CreateDomainInput) (req *request.Request, output *CreateDomainOutput) { + op := &request.Operation{ + Name: opCreateDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateDomainInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &CreateDomainOutput{} + req.Data = output + return +} + +// The CreateDomain operation creates a new domain. The domain name should be +// unique among the domains associated with the Access Key ID provided in the +// request. The CreateDomain operation may take 10 or more seconds to complete. +// +// CreateDomain is an idempotent operation; running it multiple times using +// the same domain name will not result in an error response. The client can +// create up to 100 domains per account. +// +// If the client requires additional domains, go to http://aws.amazon.com/contact-us/simpledb-limit-request/ +// (http://aws.amazon.com/contact-us/simpledb-limit-request/). +func (c *SimpleDB) CreateDomain(input *CreateDomainInput) (*CreateDomainOutput, error) { + req, out := c.CreateDomainRequest(input) + err := req.Send() + return out, err +} + +const opDeleteAttributes = "DeleteAttributes" + +// DeleteAttributesRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteAttributesRequest method. +// req, resp := client.DeleteAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SimpleDB) DeleteAttributesRequest(input *DeleteAttributesInput) (req *request.Request, output *DeleteAttributesOutput) { + op := &request.Operation{ + Name: opDeleteAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAttributesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteAttributesOutput{} + req.Data = output + return +} + +// Deletes one or more attributes associated with an item. If all attributes +// of the item are deleted, the item is deleted. +// +// If DeleteAttributes is called without being passed any attributes or values +// specified, all the attributes for the item are deleted. DeleteAttributes +// is an idempotent operation; running it multiple times on the same item or +// attribute does not result in an error response. +// +// Because Amazon SimpleDB makes multiple copies of item data and uses an +// eventual consistency update model, performing a GetAttributes or Select operation +// (read) immediately after a DeleteAttributes or PutAttributes operation (write) +// might not return updated item data. +func (c *SimpleDB) DeleteAttributes(input *DeleteAttributesInput) (*DeleteAttributesOutput, error) { + req, out := c.DeleteAttributesRequest(input) + err := req.Send() + return out, err +} + +const opDeleteDomain = "DeleteDomain" + +// DeleteDomainRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDomain operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDomain method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDomainRequest method. +// req, resp := client.DeleteDomainRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SimpleDB) DeleteDomainRequest(input *DeleteDomainInput) (req *request.Request, output *DeleteDomainOutput) { + op := &request.Operation{ + Name: opDeleteDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteDomainInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteDomainOutput{} + req.Data = output + return +} + +// The DeleteDomain operation deletes a domain. Any items (and their attributes) +// in the domain are deleted as well. The DeleteDomain operation might take +// 10 or more seconds to complete. +// +// Running DeleteDomain on a domain that does not exist or running the function +// multiple times using the same domain name will not result in an error response. +func (c *SimpleDB) DeleteDomain(input *DeleteDomainInput) (*DeleteDomainOutput, error) { + req, out := c.DeleteDomainRequest(input) + err := req.Send() + return out, err +} + +const opDomainMetadata = "DomainMetadata" + +// DomainMetadataRequest generates a "aws/request.Request" representing the +// client's request for the DomainMetadata operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DomainMetadata method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DomainMetadataRequest method. +// req, resp := client.DomainMetadataRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SimpleDB) DomainMetadataRequest(input *DomainMetadataInput) (req *request.Request, output *DomainMetadataOutput) { + op := &request.Operation{ + Name: opDomainMetadata, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DomainMetadataInput{} + } + + req = c.newRequest(op, input, output) + output = &DomainMetadataOutput{} + req.Data = output + return +} + +// Returns information about the domain, including when the domain was created, +// the number of items and attributes in the domain, and the size of the attribute +// names and values. +func (c *SimpleDB) DomainMetadata(input *DomainMetadataInput) (*DomainMetadataOutput, error) { + req, out := c.DomainMetadataRequest(input) + err := req.Send() + return out, err +} + +const opGetAttributes = "GetAttributes" + +// GetAttributesRequest generates a "aws/request.Request" representing the +// client's request for the GetAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetAttributesRequest method. +// req, resp := client.GetAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SimpleDB) GetAttributesRequest(input *GetAttributesInput) (req *request.Request, output *GetAttributesOutput) { + op := &request.Operation{ + Name: opGetAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetAttributesOutput{} + req.Data = output + return +} + +// Returns all of the attributes associated with the specified item. Optionally, +// the attributes returned can be limited to one or more attributes by specifying +// an attribute name parameter. +// +// If the item does not exist on the replica that was accessed for this operation, +// an empty set is returned. The system does not return an error as it cannot +// guarantee the item does not exist on other replicas. +// +// If GetAttributes is called without being passed any attribute names, all +// the attributes for the item are returned. +func (c *SimpleDB) GetAttributes(input *GetAttributesInput) (*GetAttributesOutput, error) { + req, out := c.GetAttributesRequest(input) + err := req.Send() + return out, err +} + +const opListDomains = "ListDomains" + +// ListDomainsRequest generates a "aws/request.Request" representing the +// client's request for the ListDomains operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListDomains method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListDomainsRequest method. +// req, resp := client.ListDomainsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SimpleDB) ListDomainsRequest(input *ListDomainsInput) (req *request.Request, output *ListDomainsOutput) { + op := &request.Operation{ + Name: opListDomains, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxNumberOfDomains", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListDomainsInput{} + } + + req = c.newRequest(op, input, output) + output = &ListDomainsOutput{} + req.Data = output + return +} + +// The ListDomains operation lists all domains associated with the Access Key +// ID. It returns domain names up to the limit set by MaxNumberOfDomains (#MaxNumberOfDomains). +// A NextToken (#NextToken) is returned if there are more than MaxNumberOfDomains +// domains. Calling ListDomains successive times with the NextToken provided +// by the operation returns up to MaxNumberOfDomains more domain names with +// each successive operation call. +func (c *SimpleDB) ListDomains(input *ListDomainsInput) (*ListDomainsOutput, error) { + req, out := c.ListDomainsRequest(input) + err := req.Send() + return out, err +} + +// ListDomainsPages iterates over the pages of a ListDomains operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDomains method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDomains operation. +// pageNum := 0 +// err := client.ListDomainsPages(params, +// func(page *ListDomainsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SimpleDB) ListDomainsPages(input *ListDomainsInput, fn func(p *ListDomainsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListDomainsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListDomainsOutput), lastPage) + }) +} + +const opPutAttributes = "PutAttributes" + +// PutAttributesRequest generates a "aws/request.Request" representing the +// client's request for the PutAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutAttributesRequest method. +// req, resp := client.PutAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SimpleDB) PutAttributesRequest(input *PutAttributesInput) (req *request.Request, output *PutAttributesOutput) { + op := &request.Operation{ + Name: opPutAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutAttributesInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &PutAttributesOutput{} + req.Data = output + return +} + +// The PutAttributes operation creates or replaces attributes in an item. The +// client may specify new attributes using a combination of the Attribute.X.Name +// and Attribute.X.Value parameters. The client specifies the first attribute +// by the parameters Attribute.0.Name and Attribute.0.Value, the second attribute +// by the parameters Attribute.1.Name and Attribute.1.Value, and so on. +// +// Attributes are uniquely identified in an item by their name/value combination. +// For example, a single item can have the attributes { "first_name", "first_value" +// } and { "first_name", second_value" }. However, it cannot have two attribute +// instances where both the Attribute.X.Name and Attribute.X.Value are the same. +// +// Optionally, the requestor can supply the Replace parameter for each individual +// attribute. Setting this value to true causes the new attribute value to replace +// the existing attribute value(s). For example, if an item has the attributes +// { 'a', '1' }, { 'b', '2'} and { 'b', '3' } and the requestor calls PutAttributes +// using the attributes { 'b', '4' } with the Replace parameter set to true, +// the final attributes of the item are changed to { 'a', '1' } and { 'b', '4' +// }, which replaces the previous values of the 'b' attribute with the new value. +// +// Using PutAttributes to replace attribute values that do not exist will +// not result in an error response. You cannot specify an empty string as +// an attribute name. +// +// Because Amazon SimpleDB makes multiple copies of client data and uses an +// eventual consistency update model, an immediate GetAttributes or Select operation +// (read) immediately after a PutAttributes or DeleteAttributes operation (write) +// might not return the updated data. +// +// The following limitations are enforced for this operation: 256 total attribute +// name-value pairs per item One billion attributes per domain 10 GB of total +// user data storage per domain +func (c *SimpleDB) PutAttributes(input *PutAttributesInput) (*PutAttributesOutput, error) { + req, out := c.PutAttributesRequest(input) + err := req.Send() + return out, err +} + +const opSelect = "Select" + +// SelectRequest generates a "aws/request.Request" representing the +// client's request for the Select operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the Select method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SelectRequest method. +// req, resp := client.SelectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SimpleDB) SelectRequest(input *SelectInput) (req *request.Request, output *SelectOutput) { + op := &request.Operation{ + Name: opSelect, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "", + TruncationToken: "", + }, + } + + if input == nil { + input = &SelectInput{} + } + + req = c.newRequest(op, input, output) + output = &SelectOutput{} + req.Data = output + return +} + +// The Select operation returns a set of attributes for ItemNames that match +// the select expression. Select is similar to the standard SQL SELECT statement. +// +// The total size of the response cannot exceed 1 MB in total size. Amazon +// SimpleDB automatically adjusts the number of items returned per page to enforce +// this limit. For example, if the client asks to retrieve 2500 items, but each +// individual item is 10 kB in size, the system returns 100 items and an appropriate +// NextToken so the client can access the next page of results. +// +// For information on how to construct select expressions, see Using Select +// to Create Amazon SimpleDB Queries in the Developer Guide. +func (c *SimpleDB) Select(input *SelectInput) (*SelectOutput, error) { + req, out := c.SelectRequest(input) + err := req.Send() + return out, err +} + +// SelectPages iterates over the pages of a Select operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See Select method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a Select operation. +// pageNum := 0 +// err := client.SelectPages(params, +// func(page *SelectOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *SimpleDB) SelectPages(input *SelectInput, fn func(p *SelectOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.SelectRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*SelectOutput), lastPage) + }) +} + +type Attribute struct { + _ struct{} `type:"structure"` + + AlternateNameEncoding *string `type:"string"` + + AlternateValueEncoding *string `type:"string"` + + // The name of the attribute. + Name *string `type:"string" required:"true"` + + // The value of the attribute. + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Attribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Attribute) GoString() string { + return s.String() +} + +type BatchDeleteAttributesInput struct { + _ struct{} `type:"structure"` + + // The name of the domain in which the attributes are being deleted. + DomainName *string `type:"string" required:"true"` + + // A list of items on which to perform the operation. + Items []*DeletableItem `locationNameList:"Item" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s BatchDeleteAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDeleteAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchDeleteAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchDeleteAttributesInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.Items == nil { + invalidParams.Add(request.NewErrParamRequired("Items")) + } + if s.Items != nil { + for i, v := range s.Items { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Items", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type BatchDeleteAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s BatchDeleteAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchDeleteAttributesOutput) GoString() string { + return s.String() +} + +type BatchPutAttributesInput struct { + _ struct{} `type:"structure"` + + // The name of the domain in which the attributes are being stored. + DomainName *string `type:"string" required:"true"` + + // A list of items on which to perform the operation. + Items []*ReplaceableItem `locationNameList:"Item" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s BatchPutAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchPutAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchPutAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchPutAttributesInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.Items == nil { + invalidParams.Add(request.NewErrParamRequired("Items")) + } + if s.Items != nil { + for i, v := range s.Items { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Items", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type BatchPutAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s BatchPutAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchPutAttributesOutput) GoString() string { + return s.String() +} + +type CreateDomainInput struct { + _ struct{} `type:"structure"` + + // The name of the domain to create. The name can range between 3 and 255 characters + // and can contain the following characters: a-z, A-Z, 0-9, '_', '-', and '.'. + DomainName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDomainInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDomainInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDomainInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CreateDomainOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDomainOutput) GoString() string { + return s.String() +} + +type DeletableAttribute struct { + _ struct{} `type:"structure"` + + // The name of the attribute. + Name *string `type:"string" required:"true"` + + // The value of the attribute. + Value *string `type:"string"` +} + +// String returns the string representation +func (s DeletableAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletableAttribute) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletableAttribute) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletableAttribute"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeletableItem struct { + _ struct{} `type:"structure"` + + Attributes []*DeletableAttribute `locationNameList:"Attribute" type:"list" flattened:"true"` + + Name *string `locationName:"ItemName" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletableItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletableItem) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletableItem) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeletableItem"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Attributes != nil { + for i, v := range s.Attributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Attributes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteAttributesInput struct { + _ struct{} `type:"structure"` + + // A list of Attributes. Similar to columns on a spreadsheet, attributes represent + // categories of data that can be assigned to items. + Attributes []*DeletableAttribute `locationNameList:"Attribute" type:"list" flattened:"true"` + + // The name of the domain in which to perform the operation. + DomainName *string `type:"string" required:"true"` + + // The update condition which, if specified, determines whether the specified + // attributes will be deleted or not. The update condition must be satisfied + // in order for this request to be processed and the attributes to be deleted. + Expected *UpdateCondition `type:"structure"` + + // The name of the item. Similar to rows on a spreadsheet, items represent individual + // objects that contain one or more value-attribute pairs. + ItemName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteAttributesInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.ItemName == nil { + invalidParams.Add(request.NewErrParamRequired("ItemName")) + } + if s.Attributes != nil { + for i, v := range s.Attributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Attributes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteAttributesOutput) GoString() string { + return s.String() +} + +type DeleteDomainInput struct { + _ struct{} `type:"structure"` + + // The name of the domain to delete. + DomainName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDomainInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteDomainInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteDomainInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteDomainOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteDomainOutput) GoString() string { + return s.String() +} + +type DomainMetadataInput struct { + _ struct{} `type:"structure"` + + // The name of the domain for which to display the metadata of. + DomainName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DomainMetadataInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainMetadataInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DomainMetadataInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DomainMetadataInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DomainMetadataOutput struct { + _ struct{} `type:"structure"` + + // The number of unique attribute names in the domain. + AttributeNameCount *int64 `type:"integer"` + + // The total size of all unique attribute names in the domain, in bytes. + AttributeNamesSizeBytes *int64 `type:"long"` + + // The number of all attribute name/value pairs in the domain. + AttributeValueCount *int64 `type:"integer"` + + // The total size of all attribute values in the domain, in bytes. + AttributeValuesSizeBytes *int64 `type:"long"` + + // The number of all items in the domain. + ItemCount *int64 `type:"integer"` + + // The total size of all item names in the domain, in bytes. + ItemNamesSizeBytes *int64 `type:"long"` + + // The data and time when metadata was calculated, in Epoch (UNIX) seconds. + Timestamp *int64 `type:"integer"` +} + +// String returns the string representation +func (s DomainMetadataOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DomainMetadataOutput) GoString() string { + return s.String() +} + +type GetAttributesInput struct { + _ struct{} `type:"structure"` + + // The names of the attributes. + AttributeNames []*string `locationNameList:"AttributeName" type:"list" flattened:"true"` + + // Determines whether or not strong consistency should be enforced when data + // is read from SimpleDB. If true, any data previously written to SimpleDB will + // be returned. Otherwise, results will be consistent eventually, and the client + // may not see data that was written immediately before your read. + ConsistentRead *bool `type:"boolean"` + + // The name of the domain in which to perform the operation. + DomainName *string `type:"string" required:"true"` + + // The name of the item. + ItemName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetAttributesInput"} + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.ItemName == nil { + invalidParams.Add(request.NewErrParamRequired("ItemName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetAttributesOutput struct { + _ struct{} `type:"structure"` + + // The list of attributes returned by the operation. + Attributes []*Attribute `locationNameList:"Attribute" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetAttributesOutput) GoString() string { + return s.String() +} + +type Item struct { + _ struct{} `type:"structure"` + + AlternateNameEncoding *string `type:"string"` + + // A list of attributes. + Attributes []*Attribute `locationNameList:"Attribute" type:"list" flattened:"true" required:"true"` + + // The name of the item. + Name *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Item) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Item) GoString() string { + return s.String() +} + +type ListDomainsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of domain names you want returned. The range is 1 to 100. + // The default setting is 100. + MaxNumberOfDomains *int64 `type:"integer"` + + // A string informing Amazon SimpleDB where to start the next list of domain + // names. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListDomainsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDomainsInput) GoString() string { + return s.String() +} + +type ListDomainsOutput struct { + _ struct{} `type:"structure"` + + // A list of domain names that match the expression. + DomainNames []*string `locationNameList:"DomainName" type:"list" flattened:"true"` + + // An opaque token indicating that there are more domains than the specified + // MaxNumberOfDomains still available. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListDomainsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListDomainsOutput) GoString() string { + return s.String() +} + +type PutAttributesInput struct { + _ struct{} `type:"structure"` + + // The list of attributes. + Attributes []*ReplaceableAttribute `locationNameList:"Attribute" type:"list" flattened:"true" required:"true"` + + // The name of the domain in which to perform the operation. + DomainName *string `type:"string" required:"true"` + + // The update condition which, if specified, determines whether the specified + // attributes will be updated or not. The update condition must be satisfied + // in order for this request to be processed and the attributes to be updated. + Expected *UpdateCondition `type:"structure"` + + // The name of the item. + ItemName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PutAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutAttributesInput"} + if s.Attributes == nil { + invalidParams.Add(request.NewErrParamRequired("Attributes")) + } + if s.DomainName == nil { + invalidParams.Add(request.NewErrParamRequired("DomainName")) + } + if s.ItemName == nil { + invalidParams.Add(request.NewErrParamRequired("ItemName")) + } + if s.Attributes != nil { + for i, v := range s.Attributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Attributes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutAttributesOutput) GoString() string { + return s.String() +} + +type ReplaceableAttribute struct { + _ struct{} `type:"structure"` + + // The name of the replaceable attribute. + Name *string `type:"string" required:"true"` + + // A flag specifying whether or not to replace the attribute/value pair or to + // add a new attribute/value pair. The default setting is false. + Replace *bool `type:"boolean"` + + // The value of the replaceable attribute. + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ReplaceableAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceableAttribute) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplaceableAttribute) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplaceableAttribute"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ReplaceableItem struct { + _ struct{} `type:"structure"` + + // The list of attributes for a replaceable item. + Attributes []*ReplaceableAttribute `locationNameList:"Attribute" type:"list" flattened:"true" required:"true"` + + // The name of the replaceable item. + Name *string `locationName:"ItemName" type:"string" required:"true"` +} + +// String returns the string representation +func (s ReplaceableItem) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplaceableItem) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplaceableItem) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplaceableItem"} + if s.Attributes == nil { + invalidParams.Add(request.NewErrParamRequired("Attributes")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Attributes != nil { + for i, v := range s.Attributes { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Attributes", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SelectInput struct { + _ struct{} `type:"structure"` + + // Determines whether or not strong consistency should be enforced when data + // is read from SimpleDB. If true, any data previously written to SimpleDB will + // be returned. Otherwise, results will be consistent eventually, and the client + // may not see data that was written immediately before your read. + ConsistentRead *bool `type:"boolean"` + + // A string informing Amazon SimpleDB where to start the next list of ItemNames. + NextToken *string `type:"string"` + + // The expression used to query the domain. + SelectExpression *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SelectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SelectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SelectInput"} + if s.SelectExpression == nil { + invalidParams.Add(request.NewErrParamRequired("SelectExpression")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type SelectOutput struct { + _ struct{} `type:"structure"` + + // A list of items that match the select expression. + Items []*Item `locationNameList:"Item" type:"list" flattened:"true"` + + // An opaque token indicating that more items than MaxNumberOfItems were matched, + // the response size exceeded 1 megabyte, or the execution time exceeded 5 seconds. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s SelectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SelectOutput) GoString() string { + return s.String() +} + +// Specifies the conditions under which data should be updated. If an update +// condition is specified for a request, the data will only be updated if the +// condition is satisfied. For example, if an attribute with a specific name +// and value exists, or if a specific attribute doesn't exist. +type UpdateCondition struct { + _ struct{} `type:"structure"` + + // A value specifying whether or not the specified attribute must exist with + // the specified value in order for the update condition to be satisfied. Specify + // true if the attribute must exist for the update condition to be satisfied. + // Specify false if the attribute should not exist in order for the update condition + // to be satisfied. + Exists *bool `type:"boolean"` + + // The name of the attribute involved in the condition. + Name *string `type:"string"` + + // The value of an attribute. This value can only be specified when the Exists + // parameter is equal to true. + Value *string `type:"string"` +} + +// String returns the string representation +func (s UpdateCondition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UpdateCondition) GoString() string { + return s.String() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/simpledb/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/simpledb/customizations.go new file mode 100644 index 000000000..a0dcce54b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/simpledb/customizations.go @@ -0,0 +1,11 @@ +package simpledb + +import "github.com/aws/aws-sdk-go/aws/client" + +func init() { + initClient = func(c *client.Client) { + // SimpleDB uses custom error unmarshaling logic + c.Handlers.UnmarshalError.Clear() + c.Handlers.UnmarshalError.PushBack(unmarshalError) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/simpledb/service.go b/vendor/github.com/aws/aws-sdk-go/service/simpledb/service.go new file mode 100644 index 000000000..196047620 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/simpledb/service.go @@ -0,0 +1,102 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package simpledb + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/signer/v2" +) + +// Amazon SimpleDB is a web service providing the core database functions of +// data indexing and querying in the cloud. By offloading the time and effort +// associated with building and operating a web-scale database, SimpleDB provides +// developers the freedom to focus on application development. A traditional, +// clustered relational database requires a sizable upfront capital outlay, +// is complex to design, and often requires extensive and repetitive database +// administration. Amazon SimpleDB is dramatically simpler, requiring no schema, +// automatically indexing your data and providing a simple API for storage and +// access. This approach eliminates the administrative burden of data modeling, +// index maintenance, and performance tuning. Developers gain access to this +// functionality within Amazon's proven computing environment, are able to scale +// instantly, and pay only for what they use. +// +// Visit http://aws.amazon.com/simpledb/ (http://aws.amazon.com/simpledb/) +// for more information. +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type SimpleDB struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "sdb" + +// New creates a new instance of the SimpleDB client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a SimpleDB client from just a session. +// svc := simpledb.New(mySession) +// +// // Create a SimpleDB client with additional configuration +// svc := simpledb.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *SimpleDB { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *SimpleDB { + svc := &SimpleDB{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2009-04-15", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v2.SignRequestHandler) + svc.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a SimpleDB operation and runs any +// custom request initialization. +func (c *SimpleDB) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/simpledb/unmarshall_error.go b/vendor/github.com/aws/aws-sdk-go/service/simpledb/unmarshall_error.go new file mode 100644 index 000000000..acc8a86eb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/simpledb/unmarshall_error.go @@ -0,0 +1,53 @@ +package simpledb + +import ( + "encoding/xml" + "io" + "io/ioutil" + "strings" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +type xmlErrorDetail struct { + Code string `xml:"Code"` + Message string `xml:"Message"` +} + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"Response"` + Errors []xmlErrorDetail `xml:"Errors>Error"` + RequestID string `xml:"RequestID"` +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + defer io.Copy(ioutil.Discard, r.HTTPResponse.Body) + + if r.HTTPResponse.ContentLength == int64(0) { + // No body, use status code to generate an awserr.Error + r.Error = awserr.NewRequestFailure( + awserr.New(strings.Replace(r.HTTPResponse.Status, " ", "", -1), r.HTTPResponse.Status, nil), + r.HTTPResponse.StatusCode, + "", + ) + return + } + + resp := &xmlErrorResponse{} + err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) + if err != nil && err != io.EOF { + r.Error = awserr.New("SerializationError", "failed to decode SimpleDB XML error response", nil) + } else if len(resp.Errors) == 0 { + r.Error = awserr.New("MissingError", "missing error code in SimpleDB XML error response", nil) + } else { + // If there are multiple error codes, return only the first as the aws.Error interface only supports + // one error code. + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Errors[0].Code, resp.Errors[0].Message, nil), + r.HTTPResponse.StatusCode, + resp.RequestID, + ) + } +} diff --git a/vendor/vendor.json b/vendor/vendor.json index a4347ea1b..cf678605f 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -389,6 +389,12 @@ "path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" }, + { + "checksumSHA1": "F6mth+G7dXN1GI+nktaGo8Lx8aE=", + "path": "github.com/aws/aws-sdk-go/private/signer/v2", + "revision": "333fcdc9874ea63fbdb3176e12ffa04b5ec44f5a", + "revisionTime": "2016-07-05T22:03:21Z" + }, { "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/signer/v4", @@ -565,6 +571,12 @@ "path": "github.com/aws/aws-sdk-go/service/s3", "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" }, + { + "checksumSHA1": "DW5kDRWLA2yAgYh9vsI+0uVqq/Q=", + "path": "github.com/aws/aws-sdk-go/service/simpledb", + "revision": "333fcdc9874ea63fbdb3176e12ffa04b5ec44f5a", + "revisionTime": "2016-07-05T22:03:21Z" + }, { "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/sns", diff --git a/website/source/docs/providers/aws/r/simpledb_domain.html.markdown b/website/source/docs/providers/aws/r/simpledb_domain.html.markdown new file mode 100644 index 000000000..52c324652 --- /dev/null +++ b/website/source/docs/providers/aws/r/simpledb_domain.html.markdown @@ -0,0 +1,31 @@ +--- +layout: "aws" +page_title: "AWS: simpledb_domain" +sidebar_current: "docs-aws-resource-simpledb-domain" +description: |- + Provides a SimpleDB domain resource. +--- + +# aws\_simpledb\_domain + +Provides a SimpleDB domain resource + +## Example Usage + +``` +resource "aws_simpledb_domain" "users" { + name = "users" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the SimpleDB domain + +## Attributes Reference + +The following attributes are exported: + +* `id` - The name of the SimpleDB domain diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb index 75dd6410d..b3873ef95 100644 --- a/website/source/layouts/aws.erb +++ b/website/source/layouts/aws.erb @@ -728,6 +728,18 @@ + > + SimpleDB Resources + + + + > SNS Resources From e13e4977f09897c5fabe6d27ad9b142807052a3c Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 13 Jul 2016 15:38:06 +0100 Subject: [PATCH 0259/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 720fae2ea..1311ffebf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -68,6 +68,7 @@ FEATURES: * **New Resource:** `azurerm_virtual_machine_scale_set` [GH-6711] * **New Resource:** `datadog_timeboard` [GH-6900] * **New Resource:** `digitalocean_tag` [GH-7500] + * **New Resource:** `digitalocean_volume` [GH-7560] * core: Tainted resources now show up in the plan and respect dependency ordering [GH-6600] * core: The `lookup` interpolation function can now have a default fall-back value specified [GH-6884] * core: The `terraform plan` command no longer persists state. [GH-6811] From ef3aad123182863ac3f92703f05dc3b005336134 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Tue, 12 Jul 2016 10:35:44 -0600 Subject: [PATCH 0260/1238] core: Correctly format nested outputs This commit pretty prints outputs which consist of nested complex structures (e.g. lists of lists, lists of maps). Fixes #7143. --- command/output.go | 82 ++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 77 insertions(+), 5 deletions(-) diff --git a/command/output.go b/command/output.go index 81ffd16e2..23347a4dd 100644 --- a/command/output.go +++ b/command/output.go @@ -122,7 +122,16 @@ func (c *OutputCommand) Run(args []string) int { break } - c.Ui.Output(fmt.Sprintf("%s", output[indexInt])) + outputVal := output[indexInt] + switch typedOutputVal := outputVal.(type) { + case string: + c.Ui.Output(fmt.Sprintf("%s", typedOutputVal)) + case []interface{}: + c.Ui.Output(fmt.Sprintf("%s", formatNestedList("", typedOutputVal))) + case map[string]interface{}: + c.Ui.Output(fmt.Sprintf("%s", formatNestedMap("", typedOutputVal))) + } + return 0 case map[string]interface{}: if index == "" { @@ -131,7 +140,14 @@ func (c *OutputCommand) Run(args []string) int { } if value, ok := output[index]; ok { - c.Ui.Output(fmt.Sprintf("%s", value)) + switch typedOutputVal := value.(type) { + case string: + c.Ui.Output(fmt.Sprintf("%s", typedOutputVal)) + case []interface{}: + c.Ui.Output(fmt.Sprintf("%s", formatNestedList("", typedOutputVal))) + case map[string]interface{}: + c.Ui.Output(fmt.Sprintf("%s", formatNestedMap("", typedOutputVal))) + } return 0 } else { return 1 @@ -144,6 +160,23 @@ func (c *OutputCommand) Run(args []string) int { return 0 } +func formatNestedList(indent string, outputList []interface{}) string { + outputBuf := new(bytes.Buffer) + outputBuf.WriteString(fmt.Sprintf("%s[", indent)) + + lastIdx := len(outputList) - 1 + + for i, value := range outputList { + outputBuf.WriteString(fmt.Sprintf("\n%s%s%s", indent, " ", value)) + if i != lastIdx { + outputBuf.WriteString(",") + } + } + + outputBuf.WriteString(fmt.Sprintf("\n%s]", indent)) + return strings.TrimPrefix(outputBuf.String(), "\n") +} + func formatListOutput(indent, outputName string, outputList []interface{}) string { keyIndent := "" @@ -151,11 +184,26 @@ func formatListOutput(indent, outputName string, outputList []interface{}) strin if outputName != "" { outputBuf.WriteString(fmt.Sprintf("%s%s = [", indent, outputName)) - keyIndent = " " + keyIndent = " " } - for _, value := range outputList { - outputBuf.WriteString(fmt.Sprintf("\n%s%s%s", indent, keyIndent, value)) + lastIdx := len(outputList) - 1 + + for i, value := range outputList { + switch typedValue := value.(type) { + case string: + outputBuf.WriteString(fmt.Sprintf("\n%s%s%s", indent, keyIndent, value)) + case []interface{}: + outputBuf.WriteString(fmt.Sprintf("\n%s%s", indent, + formatNestedList(indent+keyIndent, typedValue))) + case map[string]interface{}: + outputBuf.WriteString(fmt.Sprintf("\n%s%s", indent, + formatNestedMap(indent+keyIndent, typedValue))) + } + + if lastIdx != i { + outputBuf.WriteString(",") + } } if outputName != "" { @@ -169,6 +217,30 @@ func formatListOutput(indent, outputName string, outputList []interface{}) strin return strings.TrimPrefix(outputBuf.String(), "\n") } +func formatNestedMap(indent string, outputMap map[string]interface{}) string { + ks := make([]string, 0, len(outputMap)) + for k, _ := range outputMap { + ks = append(ks, k) + } + sort.Strings(ks) + + outputBuf := new(bytes.Buffer) + outputBuf.WriteString(fmt.Sprintf("%s{", indent)) + + lastIdx := len(outputMap) - 1 + for i, k := range ks { + v := outputMap[k] + outputBuf.WriteString(fmt.Sprintf("\n%s%s = %v", indent+" ", k, v)) + + if lastIdx != i { + outputBuf.WriteString(",") + } + } + + outputBuf.WriteString(fmt.Sprintf("\n%s}", indent)) + + return strings.TrimPrefix(outputBuf.String(), "\n") +} func formatMapOutput(indent, outputName string, outputMap map[string]interface{}) string { ks := make([]string, 0, len(outputMap)) for k, _ := range outputMap { From b4048dfc1da2a36963850d6357e6921a340eccea Mon Sep 17 00:00:00 2001 From: James Nugent Date: Wed, 13 Jul 2016 10:38:19 -0600 Subject: [PATCH 0261/1238] core: Add -json flag to `terraform output` This commit removes the ability to index into complex output types using `terraform output a_list 1` (for example), and adds a `-json` flag to the `terraform output` command, such that the output can be piped through a post-processor such as jq or json. This removes the need to allow arbitrary traversal of nested structures. It also adds tests of human readable ("normal") output with nested lists and maps, and of the new JSON output. --- command/output.go | 105 ++++++---------- command/output_test.go | 119 +++++++++++++++--- .../source/docs/commands/output.html.markdown | 3 + 3 files changed, 145 insertions(+), 82 deletions(-) diff --git a/command/output.go b/command/output.go index 23347a4dd..9054dfb4d 100644 --- a/command/output.go +++ b/command/output.go @@ -2,10 +2,10 @@ package command import ( "bytes" + "encoding/json" "flag" "fmt" "sort" - "strconv" "strings" ) @@ -19,7 +19,10 @@ func (c *OutputCommand) Run(args []string) int { args = c.Meta.process(args, false) var module string + var jsonOutput bool + cmdFlags := flag.NewFlagSet("output", flag.ContinueOnError) + cmdFlags.BoolVar(&jsonOutput, "json", false, "json") cmdFlags.StringVar(&c.Meta.statePath, "state", DefaultStateFilename, "path") cmdFlags.StringVar(&module, "module", "", "module") cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } @@ -29,7 +32,7 @@ func (c *OutputCommand) Run(args []string) int { } args = cmdFlags.Args() - if len(args) > 2 { + if len(args) > 1 { c.Ui.Error( "The output command expects exactly one argument with the name\n" + "of an output variable or no arguments to show all outputs.\n") @@ -42,11 +45,6 @@ func (c *OutputCommand) Run(args []string) int { name = args[0] } - index := "" - if len(args) > 1 { - index = args[1] - } - stateStore, err := c.Meta.State() if err != nil { c.Ui.Error(fmt.Sprintf("Error reading state: %s", err)) @@ -81,8 +79,18 @@ func (c *OutputCommand) Run(args []string) int { } if name == "" { - c.Ui.Output(outputsAsString(state, nil, false)) - return 0 + if jsonOutput { + jsonOutputs, err := json.MarshalIndent(mod.Outputs, "", " ") + if err != nil { + return 1 + } + + c.Ui.Output(string(jsonOutputs)) + return 0 + } else { + c.Ui.Output(outputsAsString(state, nil, false)) + return 0 + } } v, ok := mod.Outputs[name] @@ -95,66 +103,28 @@ func (c *OutputCommand) Run(args []string) int { return 1 } - switch output := v.Value.(type) { - case string: - c.Ui.Output(output) - return 0 - case []interface{}: - if index == "" { - c.Ui.Output(formatListOutput("", "", output)) - break - } - - indexInt, err := strconv.Atoi(index) + if jsonOutput { + jsonOutputs, err := json.MarshalIndent(v, "", " ") if err != nil { - c.Ui.Error(fmt.Sprintf( - "The index %q requested is not valid for the list output\n"+ - "%q - indices must be numeric, and in the range 0-%d", index, name, - len(output)-1)) - break - } - - if indexInt < 0 || indexInt >= len(output) { - c.Ui.Error(fmt.Sprintf( - "The index %d requested is not valid for the list output\n"+ - "%q - indices must be in the range 0-%d", indexInt, name, - len(output)-1)) - break - } - - outputVal := output[indexInt] - switch typedOutputVal := outputVal.(type) { - case string: - c.Ui.Output(fmt.Sprintf("%s", typedOutputVal)) - case []interface{}: - c.Ui.Output(fmt.Sprintf("%s", formatNestedList("", typedOutputVal))) - case map[string]interface{}: - c.Ui.Output(fmt.Sprintf("%s", formatNestedMap("", typedOutputVal))) - } - - return 0 - case map[string]interface{}: - if index == "" { - c.Ui.Output(formatMapOutput("", "", output)) - break - } - - if value, ok := output[index]; ok { - switch typedOutputVal := value.(type) { - case string: - c.Ui.Output(fmt.Sprintf("%s", typedOutputVal)) - case []interface{}: - c.Ui.Output(fmt.Sprintf("%s", formatNestedList("", typedOutputVal))) - case map[string]interface{}: - c.Ui.Output(fmt.Sprintf("%s", formatNestedMap("", typedOutputVal))) - } - return 0 - } else { return 1 } - default: - c.Ui.Error(fmt.Sprintf("Unknown output type: %T", v.Type)) - return 1 + + c.Ui.Output(string(jsonOutputs)) + } else { + switch output := v.Value.(type) { + case string: + c.Ui.Output(output) + return 0 + case []interface{}: + c.Ui.Output(formatListOutput("", "", output)) + return 0 + case map[string]interface{}: + c.Ui.Output(formatMapOutput("", "", output)) + return 0 + default: + c.Ui.Error(fmt.Sprintf("Unknown output type: %T", v.Type)) + return 1 + } } return 0 @@ -289,6 +259,9 @@ Options: -module=name If specified, returns the outputs for a specific module + -json If specified, machine readable output will be + printed in JSON format + ` return strings.TrimSpace(helpText) } diff --git a/command/output_test.go b/command/output_test.go index c553ff5aa..1487d41cb 100644 --- a/command/output_test.go +++ b/command/output_test.go @@ -14,10 +14,10 @@ import ( func TestOutput(t *testing.T) { originalState := &terraform.State{ Modules: []*terraform.ModuleState{ - &terraform.ModuleState{ + { Path: []string{"root"}, Outputs: map[string]*terraform.OutputState{ - "foo": &terraform.OutputState{ + "foo": { Value: "bar", Type: "string", }, @@ -53,19 +53,19 @@ func TestOutput(t *testing.T) { func TestModuleOutput(t *testing.T) { originalState := &terraform.State{ Modules: []*terraform.ModuleState{ - &terraform.ModuleState{ + { Path: []string{"root"}, Outputs: map[string]*terraform.OutputState{ - "foo": &terraform.OutputState{ + "foo": { Value: "bar", Type: "string", }, }, }, - &terraform.ModuleState{ + { Path: []string{"root", "my_module"}, Outputs: map[string]*terraform.OutputState{ - "blah": &terraform.OutputState{ + "blah": { Value: "tastatur", Type: "string", }, @@ -100,13 +100,100 @@ func TestModuleOutput(t *testing.T) { } } +func TestOutput_nestedListAndMap(t *testing.T) { + originalState := &terraform.State{ + Modules: []*terraform.ModuleState{ + { + Path: []string{"root"}, + Outputs: map[string]*terraform.OutputState{ + "foo": { + Value: []interface{}{ + map[string]interface{}{ + "key": "value", + "key2": "value2", + }, + map[string]interface{}{ + "key": "value", + }, + }, + Type: "list", + }, + }, + }, + }, + } + + statePath := testStateFile(t, originalState) + + ui := new(cli.MockUi) + c := &OutputCommand{ + Meta: Meta{ + ContextOpts: testCtxConfig(testProvider()), + Ui: ui, + }, + } + + args := []string{ + "-state", statePath, + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + actual := strings.TrimSpace(ui.OutputWriter.String()) + expected := "foo = [\n {\n key = value,\n key2 = value2\n },\n {\n key = value\n }\n]" + if actual != expected { + t.Fatalf("bad:\n%#v\n%#v", expected, actual) + } +} + +func TestOutput_json(t *testing.T) { + originalState := &terraform.State{ + Modules: []*terraform.ModuleState{ + { + Path: []string{"root"}, + Outputs: map[string]*terraform.OutputState{ + "foo": { + Value: "bar", + Type: "string", + }, + }, + }, + }, + } + + statePath := testStateFile(t, originalState) + + ui := new(cli.MockUi) + c := &OutputCommand{ + Meta: Meta{ + ContextOpts: testCtxConfig(testProvider()), + Ui: ui, + }, + } + + args := []string{ + "-state", statePath, + "-json", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + actual := strings.TrimSpace(ui.OutputWriter.String()) + expected := "{\n \"foo\": {\n \"sensitive\": false,\n \"type\": \"string\",\n \"value\": \"bar\"\n }\n}" + if actual != expected { + t.Fatalf("bad:\n%#v\n%#v", expected, actual) + } +} + func TestMissingModuleOutput(t *testing.T) { originalState := &terraform.State{ Modules: []*terraform.ModuleState{ - &terraform.ModuleState{ + { Path: []string{"root"}, Outputs: map[string]*terraform.OutputState{ - "foo": &terraform.OutputState{ + "foo": { Value: "bar", Type: "string", }, @@ -139,10 +226,10 @@ func TestMissingModuleOutput(t *testing.T) { func TestOutput_badVar(t *testing.T) { originalState := &terraform.State{ Modules: []*terraform.ModuleState{ - &terraform.ModuleState{ + { Path: []string{"root"}, Outputs: map[string]*terraform.OutputState{ - "foo": &terraform.OutputState{ + "foo": { Value: "bar", Type: "string", }, @@ -173,14 +260,14 @@ func TestOutput_badVar(t *testing.T) { func TestOutput_blank(t *testing.T) { originalState := &terraform.State{ Modules: []*terraform.ModuleState{ - &terraform.ModuleState{ + { Path: []string{"root"}, Outputs: map[string]*terraform.OutputState{ - "foo": &terraform.OutputState{ + "foo": { Value: "bar", Type: "string", }, - "name": &terraform.OutputState{ + "name": { Value: "john-doe", Type: "string", }, @@ -272,7 +359,7 @@ func TestOutput_noState(t *testing.T) { func TestOutput_noVars(t *testing.T) { originalState := &terraform.State{ Modules: []*terraform.ModuleState{ - &terraform.ModuleState{ + { Path: []string{"root"}, Outputs: map[string]*terraform.OutputState{}, }, @@ -301,10 +388,10 @@ func TestOutput_noVars(t *testing.T) { func TestOutput_stateDefault(t *testing.T) { originalState := &terraform.State{ Modules: []*terraform.ModuleState{ - &terraform.ModuleState{ + { Path: []string{"root"}, Outputs: map[string]*terraform.OutputState{ - "foo": &terraform.OutputState{ + "foo": { Value: "bar", Type: "string", }, diff --git a/website/source/docs/commands/output.html.markdown b/website/source/docs/commands/output.html.markdown index f1a70394e..b284c79a5 100644 --- a/website/source/docs/commands/output.html.markdown +++ b/website/source/docs/commands/output.html.markdown @@ -20,6 +20,9 @@ current directory for the state file to query. The command-line flags are all optional. The list of available flags are: +* `-json` - If specified, the outputs are formatted as a JSON object, with + a key per output. This can be piped into tools such as `jq` for further + processing. * `-state=path` - Path to the state file. Defaults to "terraform.tfstate". * `-module=module_name` - The module path which has needed output. By default this is the root path. Other modules can be specified by From 56aadab115ef79e72d0c5405516056f822988c8d Mon Sep 17 00:00:00 2001 From: James Nugent Date: Wed, 13 Jul 2016 11:23:56 -0600 Subject: [PATCH 0262/1238] core: Add context test for module var from splat This adds additional coverage of the situation reported in #7195 to prevent against regression. The actual fix was in 2356afd, in response to #7143. --- terraform/context_plan_test.go | 27 +++++++++++++++++++ terraform/terraform_test.go | 22 +++++++++++++++ .../plan-module-variable-from-splat/main.tf | 9 +++++++ .../mod/main.tf | 12 +++++++++ 4 files changed, 70 insertions(+) create mode 100644 terraform/test-fixtures/plan-module-variable-from-splat/main.tf create mode 100644 terraform/test-fixtures/plan-module-variable-from-splat/mod/main.tf diff --git a/terraform/context_plan_test.go b/terraform/context_plan_test.go index 09ed1bd82..2bccc40e5 100644 --- a/terraform/context_plan_test.go +++ b/terraform/context_plan_test.go @@ -2365,3 +2365,30 @@ func TestContext2Plan_computedValueInMap(t *testing.T) { t.Fatalf("bad:\n%s\n\nexpected\n\n%s", actual, expected) } } + +func TestContext2Plan_moduleVariableFromSplat(t *testing.T) { + m := testModule(t, "plan-module-variable-from-splat") + p := testProvider("aws") + p.DiffFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Module: m, + Providers: map[string]ResourceProviderFactory{ + "aws": testProviderFuncFixed(p), + }, + }) + + if _, err := ctx.Plan(); err != nil { + t.Fatalf("err: %s", err) + } + + plan, err := ctx.Plan() + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(plan.String()) + expected := strings.TrimSpace(testTerraformPlanModuleVariableFromSplat) + if actual != expected { + t.Fatalf("bad:\n%s\n\nexpected\n\n%s", actual, expected) + } +} diff --git a/terraform/terraform_test.go b/terraform/terraform_test.go index 333e72767..6be32ea03 100644 --- a/terraform/terraform_test.go +++ b/terraform/terraform_test.go @@ -1371,3 +1371,25 @@ STATE: ` + +const testTerraformPlanModuleVariableFromSplat = ` +DIFF: + +module.mod1: + CREATE: aws_instance.test.0 + thing: "" => "doesnt" + type: "" => "aws_instance" + CREATE: aws_instance.test.1 + thing: "" => "doesnt" + type: "" => "aws_instance" +module.mod2: + CREATE: aws_instance.test.0 + thing: "" => "doesnt" + type: "" => "aws_instance" + CREATE: aws_instance.test.1 + thing: "" => "doesnt" + type: "" => "aws_instance" + +STATE: + +` diff --git a/terraform/test-fixtures/plan-module-variable-from-splat/main.tf b/terraform/test-fixtures/plan-module-variable-from-splat/main.tf new file mode 100644 index 000000000..2af78acd5 --- /dev/null +++ b/terraform/test-fixtures/plan-module-variable-from-splat/main.tf @@ -0,0 +1,9 @@ +module "mod1" { + source = "mod" + param = ["this", "one", "works"] +} + +module "mod2" { + source = "mod" + param = ["${module.mod1.out_from_splat[0]}"] +} diff --git a/terraform/test-fixtures/plan-module-variable-from-splat/mod/main.tf b/terraform/test-fixtures/plan-module-variable-from-splat/mod/main.tf new file mode 100644 index 000000000..28d9175d2 --- /dev/null +++ b/terraform/test-fixtures/plan-module-variable-from-splat/mod/main.tf @@ -0,0 +1,12 @@ +variable "param" { + type = "list" +} + +resource "aws_instance" "test" { + count = "2" + thing = "doesnt" +} + +output "out_from_splat" { + value = ["${aws_instance.test.*.thing}"] +} From 28438daeb409554fc500fc021b06889e8946adf8 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Tue, 12 Jul 2016 12:10:19 -0600 Subject: [PATCH 0263/1238] provider/aws: Fix IDs in aws_iam_policy_document We cannot use the "id" field to represent policy ID, because it is used internally by Terraform. Also change the "id" field within a statement to "sid" for consistency with the generated JSON. --- .../data_source_aws_iam_policy_document.go | 28 +++++++++++-------- ...ata_source_aws_iam_policy_document_test.go | 6 +++- builtin/providers/aws/iam_policy_model.go | 2 +- .../aws/d/iam_policy_document.html.markdown | 5 ++-- 4 files changed, 25 insertions(+), 16 deletions(-) diff --git a/builtin/providers/aws/data_source_aws_iam_policy_document.go b/builtin/providers/aws/data_source_aws_iam_policy_document.go index ef045df80..8d5051f77 100644 --- a/builtin/providers/aws/data_source_aws_iam_policy_document.go +++ b/builtin/providers/aws/data_source_aws_iam_policy_document.go @@ -24,20 +24,20 @@ func dataSourceAwsIamPolicyDocument() *schema.Resource { Read: dataSourceAwsIamPolicyDocumentRead, Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ + "policy_id": { Type: schema.TypeString, Optional: true, }, - "statement": &schema.Schema{ - Type: schema.TypeSet, + "statement": { + Type: schema.TypeList, Required: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ + "sid": { Type: schema.TypeString, Optional: true, }, - "effect": &schema.Schema{ + "effect": { Type: schema.TypeString, Optional: true, Default: "Allow", @@ -48,20 +48,20 @@ func dataSourceAwsIamPolicyDocument() *schema.Resource { "not_resources": setOfString, "principals": dataSourceAwsIamPolicyPrincipalSchema(), "not_principals": dataSourceAwsIamPolicyPrincipalSchema(), - "condition": &schema.Schema{ + "condition": { Type: schema.TypeSet, Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ - "test": &schema.Schema{ + "test": { Type: schema.TypeString, Required: true, }, - "variable": &schema.Schema{ + "variable": { Type: schema.TypeString, Required: true, }, - "values": &schema.Schema{ + "values": { Type: schema.TypeSet, Required: true, Elem: &schema.Schema{ @@ -74,7 +74,7 @@ func dataSourceAwsIamPolicyDocument() *schema.Resource { }, }, }, - "json": &schema.Schema{ + "json": { Type: schema.TypeString, Computed: true, }, @@ -87,11 +87,11 @@ func dataSourceAwsIamPolicyDocumentRead(d *schema.ResourceData, meta interface{} Version: "2012-10-17", } - if policyId, hasPolicyId := d.GetOk("id"); hasPolicyId { + if policyId, hasPolicyId := d.GetOk("policy_id"); hasPolicyId { doc.Id = policyId.(string) } - var cfgStmts = d.Get("statement").(*schema.Set).List() + var cfgStmts = d.Get("statement").([]interface{}) stmts := make([]*IAMPolicyStatement, len(cfgStmts)) doc.Statements = stmts for i, stmtI := range cfgStmts { @@ -100,6 +100,10 @@ func dataSourceAwsIamPolicyDocumentRead(d *schema.ResourceData, meta interface{} Effect: cfgStmt["effect"].(string), } + if sid, ok := cfgStmt["sid"]; ok { + stmt.Sid = sid.(string) + } + if actions := cfgStmt["actions"].(*schema.Set).List(); len(actions) > 0 { stmt.Actions = iamPolicyDecodeConfigStringList(actions) } diff --git a/builtin/providers/aws/data_source_aws_iam_policy_document_test.go b/builtin/providers/aws/data_source_aws_iam_policy_document_test.go index edd0f8d40..8a2210265 100644 --- a/builtin/providers/aws/data_source_aws_iam_policy_document_test.go +++ b/builtin/providers/aws/data_source_aws_iam_policy_document_test.go @@ -16,7 +16,7 @@ func TestAccAWSIAMPolicyDocument(t *testing.T) { PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, Steps: []resource.TestStep{ - resource.TestStep{ + { Config: testAccAWSIAMPolicyDocumentConfig, Check: resource.ComposeTestCheckFunc( testAccCheckStateValue( @@ -52,7 +52,9 @@ func testAccCheckStateValue(id, name, value string) resource.TestCheckFunc { var testAccAWSIAMPolicyDocumentConfig = ` data "aws_iam_policy_document" "test" { + policy_id = "policy_id" statement { + sid = "1" actions = [ "s3:ListAllMyBuckets", "s3:GetBucketLocation", @@ -110,8 +112,10 @@ data "aws_iam_policy_document" "test" { var testAccAWSIAMPolicyDocumentExpectedJSON = `{ "Version": "2012-10-17", + "Id": "policy_id", "Statement": [ { + "Sid": "1", "Effect": "Allow", "Action": [ "s3:GetBucketLocation", diff --git a/builtin/providers/aws/iam_policy_model.go b/builtin/providers/aws/iam_policy_model.go index e90a08fe4..56ffc9d5c 100644 --- a/builtin/providers/aws/iam_policy_model.go +++ b/builtin/providers/aws/iam_policy_model.go @@ -5,8 +5,8 @@ import ( ) type IAMPolicyDoc struct { - Id string `json:",omitempty"` Version string `json:",omitempty"` + Id string `json:",omitempty"` Statements []*IAMPolicyStatement `json:"Statement"` } diff --git a/website/source/docs/providers/aws/d/iam_policy_document.html.markdown b/website/source/docs/providers/aws/d/iam_policy_document.html.markdown index 036765202..f2e01fe13 100644 --- a/website/source/docs/providers/aws/d/iam_policy_document.html.markdown +++ b/website/source/docs/providers/aws/d/iam_policy_document.html.markdown @@ -17,6 +17,7 @@ such as the `aws_iam_policy` resource. ``` data "aws_iam_policy_document" "example" { statement { + sid = "1" actions = [ "s3:ListAllMyBuckets", "s3:GetBucketLocation", @@ -71,14 +72,14 @@ valid to use literal JSON strings within your configuration, or to use the The following arguments are supported: -* `id` (Optional) - An ID for the policy document. +* `policy_id` (Optional) - An ID for the policy document. * `statement` (Required) - A nested configuration block (described below) configuring one *statement* to be included in the policy document. Each document configuration must have one or more `statement` blocks, which each accept the following arguments: -* `id` (Optional) - An ID for the policy statement. +* `sid` (Optional) - An ID for the policy statement. * `effect` (Optional) - Either "Allow" or "Deny", to specify whether this statement allows or denies the given actions. The default is "Allow". * `actions` (Optional) - A list of actions that this statement either allows From 821d9d8b137033e95485cb36e9f57a357d19eb1f Mon Sep 17 00:00:00 2001 From: James Nugent Date: Wed, 13 Jul 2016 12:16:04 -0600 Subject: [PATCH 0264/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1311ffebf..44eec8227 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -32,6 +32,7 @@ FEATURES: * **State Import** allows a way to import existing resources into Terraform state for many types of resource. Initial coverage of AWS is quite high, and it is straightforward to add support for new resources. * **New Command:** `terraform state` to provide access to a variety of state manipulation functions [GH-5811] + * **New Option:** `terraform output` now supports the `-json` flag to print a machine-readable representation of outputs [GH-7608] * **New Data Source:** `aws_ami` [GH-6911] * **New Data Source:** `aws_availability_zones` [GH-6805] * **New Data Source:** `aws_iam_policy_document` [GH-6881] From 4d5162ae2c8191a6f1e5b5a6e93a7e0539de8b0d Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 13 Jul 2016 12:55:35 -0600 Subject: [PATCH 0265/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 44eec8227..440d2205a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -159,6 +159,7 @@ BUG FIXES: * core: Fix a crash during eval when we're upgrading an empty state [GH-7403] * core: Honor the `-state-out` flag when applying with a plan file [GH-7443] * core: Fix a panic when a `terraform_remote_state` data source doesn't exist [GH-7464] + * core: Fix issue where `ignore_changes` caused incorrect diffs on dependent resources [GH-7563] * provider/aws: Manual changes to `aws_codedeploy_deployment_group` resources are now detected [GH-7530] * provider/aws: Changing keys in `aws_dynamodb_table` correctly force new resources [GH-6829] * provider/aws: Fix a bug where CloudWatch alarms are created repeatedly if the user does not have permission to use the the DescribeAlarms operation [GH-7227] From 0ce6337a2a394dbfdb8e508f9b2473cb8525fb79 Mon Sep 17 00:00:00 2001 From: Joonas Bergius Date: Wed, 13 Jul 2016 13:48:00 -0600 Subject: [PATCH 0266/1238] docs: Add digitalocean_tag resource to the sidebar (#7629) --- website/source/layouts/digitalocean.erb | 3 +++ 1 file changed, 3 insertions(+) diff --git a/website/source/layouts/digitalocean.erb b/website/source/layouts/digitalocean.erb index a7855e821..701ed3597 100644 --- a/website/source/layouts/digitalocean.erb +++ b/website/source/layouts/digitalocean.erb @@ -32,6 +32,9 @@ > digitalocean_ssh_key + > + digitalocean_tag + > digitalocean_volume From 9081cabd6ee20258c9c6bb9a70697ee70075b307 Mon Sep 17 00:00:00 2001 From: Raphael Randschau Date: Wed, 13 Jul 2016 22:03:41 +0200 Subject: [PATCH 0267/1238] Add scaleway provider (#7331) * Add scaleway provider this PR allows the entire scaleway stack to be managed with terraform example usage looks like this: ``` provider "scaleway" { api_key = "snap" organization = "snip" } resource "scaleway_ip" "base" { server = "${scaleway_server.base.id}" } resource "scaleway_server" "base" { name = "test" # ubuntu 14.04 image = "aecaed73-51a5-4439-a127-6d8229847145" type = "C2S" } resource "scaleway_volume" "test" { name = "test" size_in_gb = 20 type = "l_ssd" } resource "scaleway_volume_attachment" "test" { server = "${scaleway_server.base.id}" volume = "${scaleway_volume.test.id}" } resource "scaleway_security_group" "base" { name = "public" description = "public gateway" } resource "scaleway_security_group_rule" "http-ingress" { security_group = "${scaleway_security_group.base.id}" action = "accept" direction = "inbound" ip_range = "0.0.0.0/0" protocol = "TCP" port = 80 } resource "scaleway_security_group_rule" "http-egress" { security_group = "${scaleway_security_group.base.id}" action = "accept" direction = "outbound" ip_range = "0.0.0.0/0" protocol = "TCP" port = 80 } ``` Note that volume attachments require the server to be stopped, which can lead to downtimes of you attach new volumes to already used servers * Update IP read to handle 404 gracefully * Read back resource on update * Ensure IP detachment works as expected Sadly this is not part of the official scaleway api just yet * Adjust detachIP helper based on feedback from @QuentinPerez in https://github.com/scaleway/scaleway-cli/pull/378 * Cleanup documentation * Rename api_key to access_key following @stack72 suggestion and rename the provider api_key for more clarity * Make tests less chatty by using custom logger --- builtin/bins/provider-scaleway/main.go | 12 + builtin/providers/scaleway/config.go | 62 + builtin/providers/scaleway/helpers.go | 101 + builtin/providers/scaleway/provider.go | 46 + builtin/providers/scaleway/provider_test.go | 38 + .../scaleway/resource_scaleway_ip.go | 86 + .../scaleway/resource_scaleway_ip_test.go | 140 + .../resource_scaleway_security_group.go | 118 + .../resource_scaleway_security_group_rule.go | 162 + ...ource_scaleway_security_group_rule_test.go | 158 + .../resource_scaleway_security_group_test.go | 104 + .../scaleway/resource_scaleway_server.go | 183 ++ .../scaleway/resource_scaleway_server_test.go | 113 + .../scaleway/resource_scaleway_volume.go | 127 + .../resource_scaleway_volume_attachment.go | 201 ++ ...esource_scaleway_volume_attachment_test.go | 93 + .../scaleway/resource_scaleway_volume_test.go | 107 + command/internal_plugin_list.go | 2 + vendor/github.com/moul/anonuuid/LICENSE | 22 + vendor/github.com/moul/anonuuid/README.md | 170 ++ vendor/github.com/moul/anonuuid/anonuuid.go | 229 ++ .../github.com/renstrom/fuzzysearch/LICENSE | 21 + .../renstrom/fuzzysearch/fuzzy/fuzzy.go | 167 ++ .../renstrom/fuzzysearch/fuzzy/levenshtein.go | 43 + .../scaleway/scaleway-cli/LICENSE.md | 22 + .../scaleway/scaleway-cli/pkg/api/README.md | 25 + .../scaleway/scaleway-cli/pkg/api/api.go | 2667 +++++++++++++++++ .../scaleway/scaleway-cli/pkg/api/api_test.go | 21 + .../scaleway/scaleway-cli/pkg/api/cache.go | 731 +++++ .../scaleway/scaleway-cli/pkg/api/logger.go | 49 + .../scaleway-cli/pkg/scwversion/version.go | 16 + .../pkg/scwversion/version_test.go | 14 + .../providers/scaleway/index.html.markdown | 90 + .../providers/scaleway/r/ip.html.markdown | 34 + .../scaleway/r/security_group.html.markdown | 36 + .../r/security_group_rule.html.markdown | 51 + .../providers/scaleway/r/server.html.markdown | 38 + .../providers/scaleway/r/volume.html.markdown | 44 + .../r/volume_attachment.html.markdown | 48 + website/source/layouts/docs.erb | 4 + website/source/layouts/scaleway.erb | 41 + 41 files changed, 6436 insertions(+) create mode 100644 builtin/bins/provider-scaleway/main.go create mode 100644 builtin/providers/scaleway/config.go create mode 100644 builtin/providers/scaleway/helpers.go create mode 100644 builtin/providers/scaleway/provider.go create mode 100644 builtin/providers/scaleway/provider_test.go create mode 100644 builtin/providers/scaleway/resource_scaleway_ip.go create mode 100644 builtin/providers/scaleway/resource_scaleway_ip_test.go create mode 100644 builtin/providers/scaleway/resource_scaleway_security_group.go create mode 100644 builtin/providers/scaleway/resource_scaleway_security_group_rule.go create mode 100644 builtin/providers/scaleway/resource_scaleway_security_group_rule_test.go create mode 100644 builtin/providers/scaleway/resource_scaleway_security_group_test.go create mode 100644 builtin/providers/scaleway/resource_scaleway_server.go create mode 100644 builtin/providers/scaleway/resource_scaleway_server_test.go create mode 100644 builtin/providers/scaleway/resource_scaleway_volume.go create mode 100644 builtin/providers/scaleway/resource_scaleway_volume_attachment.go create mode 100644 builtin/providers/scaleway/resource_scaleway_volume_attachment_test.go create mode 100644 builtin/providers/scaleway/resource_scaleway_volume_test.go create mode 100644 vendor/github.com/moul/anonuuid/LICENSE create mode 100644 vendor/github.com/moul/anonuuid/README.md create mode 100644 vendor/github.com/moul/anonuuid/anonuuid.go create mode 100644 vendor/github.com/renstrom/fuzzysearch/LICENSE create mode 100644 vendor/github.com/renstrom/fuzzysearch/fuzzy/fuzzy.go create mode 100644 vendor/github.com/renstrom/fuzzysearch/fuzzy/levenshtein.go create mode 100644 vendor/github.com/scaleway/scaleway-cli/LICENSE.md create mode 100644 vendor/github.com/scaleway/scaleway-cli/pkg/api/README.md create mode 100644 vendor/github.com/scaleway/scaleway-cli/pkg/api/api.go create mode 100644 vendor/github.com/scaleway/scaleway-cli/pkg/api/api_test.go create mode 100644 vendor/github.com/scaleway/scaleway-cli/pkg/api/cache.go create mode 100644 vendor/github.com/scaleway/scaleway-cli/pkg/api/logger.go create mode 100644 vendor/github.com/scaleway/scaleway-cli/pkg/scwversion/version.go create mode 100644 vendor/github.com/scaleway/scaleway-cli/pkg/scwversion/version_test.go create mode 100644 website/source/docs/providers/scaleway/index.html.markdown create mode 100644 website/source/docs/providers/scaleway/r/ip.html.markdown create mode 100644 website/source/docs/providers/scaleway/r/security_group.html.markdown create mode 100644 website/source/docs/providers/scaleway/r/security_group_rule.html.markdown create mode 100644 website/source/docs/providers/scaleway/r/server.html.markdown create mode 100644 website/source/docs/providers/scaleway/r/volume.html.markdown create mode 100644 website/source/docs/providers/scaleway/r/volume_attachment.html.markdown create mode 100644 website/source/layouts/scaleway.erb diff --git a/builtin/bins/provider-scaleway/main.go b/builtin/bins/provider-scaleway/main.go new file mode 100644 index 000000000..0d9754797 --- /dev/null +++ b/builtin/bins/provider-scaleway/main.go @@ -0,0 +1,12 @@ +package main + +import ( + "github.com/hashicorp/terraform/builtin/providers/scaleway" + "github.com/hashicorp/terraform/plugin" +) + +func main() { + plugin.Serve(&plugin.ServeOpts{ + ProviderFunc: scaleway.Provider, + }) +} diff --git a/builtin/providers/scaleway/config.go b/builtin/providers/scaleway/config.go new file mode 100644 index 000000000..5d321344a --- /dev/null +++ b/builtin/providers/scaleway/config.go @@ -0,0 +1,62 @@ +package scaleway + +import ( + "fmt" + "log" + "net/http" + "os" + + "github.com/scaleway/scaleway-cli/pkg/api" + "github.com/scaleway/scaleway-cli/pkg/scwversion" +) + +// Config contains scaleway configuration values +type Config struct { + Organization string + APIKey string +} + +// Client contains scaleway api clients +type Client struct { + scaleway *api.ScalewayAPI +} + +// Client configures and returns a fully initialized Scaleway client +func (c *Config) Client() (*Client, error) { + api, err := api.NewScalewayAPI( + c.Organization, + c.APIKey, + scwversion.UserAgent(), + func(s *api.ScalewayAPI) { + s.Logger = newTerraformLogger() + }, + ) + if err != nil { + return nil, err + } + return &Client{api}, nil +} + +func newTerraformLogger() api.Logger { + return &terraformLogger{} +} + +type terraformLogger struct { +} + +func (l *terraformLogger) LogHTTP(r *http.Request) { + log.Printf("[DEBUG] %s %s\n", r.Method, r.URL.Path) +} +func (l *terraformLogger) Fatalf(format string, v ...interface{}) { + log.Printf("[FATAL] %s\n", fmt.Sprintf(format, v)) + os.Exit(1) +} +func (l *terraformLogger) Debugf(format string, v ...interface{}) { + log.Printf("[DEBUG] %s\n", fmt.Sprintf(format, v)) +} +func (l *terraformLogger) Infof(format string, v ...interface{}) { + log.Printf("[INFO ] %s\n", fmt.Sprintf(format, v)) +} +func (l *terraformLogger) Warnf(format string, v ...interface{}) { + log.Printf("[WARN ] %s\n", fmt.Sprintf(format, v)) +} diff --git a/builtin/providers/scaleway/helpers.go b/builtin/providers/scaleway/helpers.go new file mode 100644 index 000000000..b1ba9ba9d --- /dev/null +++ b/builtin/providers/scaleway/helpers.go @@ -0,0 +1,101 @@ +package scaleway + +import ( + "fmt" + "log" + "net/http" + "time" + + "github.com/scaleway/scaleway-cli/pkg/api" +) + +// Bool returns a pointer to of the bool value passed in. +func Bool(val bool) *bool { + return &val +} + +// String returns a pointer to of the string value passed in. +func String(val string) *string { + return &val +} + +// DetachIP detaches an IP from a server +func DetachIP(s *api.ScalewayAPI, ipID string) error { + var update struct { + Address string `json:"address"` + ID string `json:"id"` + Organization string `json:"organization"` + } + + ip, err := s.GetIP(ipID) + if err != nil { + return err + } + update.Address = ip.IP.Address + update.ID = ip.IP.ID + update.Organization = ip.IP.Organization + + resp, err := s.PutResponse(api.ComputeAPI, fmt.Sprintf("ips/%s", ipID), update) + if err != nil { + return err + } + if resp.StatusCode != http.StatusOK { + return err + } + resp.Body.Close() + return nil +} + +// NOTE copied from github.com/scaleway/scaleway-cli/pkg/api/helpers.go +// the helpers.go file pulls in quite a lot dependencies, and they're just convenience wrappers anyway + +func deleteServerSafe(s *api.ScalewayAPI, serverID string) error { + server, err := s.GetServer(serverID) + if err != nil { + return err + } + + if server.State != "stopped" { + if err := s.PostServerAction(serverID, "poweroff"); err != nil { + return err + } + if err := waitForServerState(s, serverID, "stopped"); err != nil { + return err + } + } + + if err := s.DeleteServer(serverID); err != nil { + return err + } + if rootVolume, ok := server.Volumes["0"]; ok { + if err := s.DeleteVolume(rootVolume.Identifier); err != nil { + return err + } + } + + return nil +} + +func waitForServerState(s *api.ScalewayAPI, serverID string, targetState string) error { + var server *api.ScalewayServer + var err error + + var currentState string + + for { + server, err = s.GetServer(serverID) + if err != nil { + return err + } + if currentState != server.State { + log.Printf("[DEBUG] Server changed state to %q\n", server.State) + currentState = server.State + } + if server.State == targetState { + break + } + time.Sleep(1 * time.Second) + } + + return nil +} diff --git a/builtin/providers/scaleway/provider.go b/builtin/providers/scaleway/provider.go new file mode 100644 index 000000000..f2d417b81 --- /dev/null +++ b/builtin/providers/scaleway/provider.go @@ -0,0 +1,46 @@ +package scaleway + +import ( + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +// Provider returns a terraform.ResourceProvider. +func Provider() terraform.ResourceProvider { + return &schema.Provider{ + Schema: map[string]*schema.Schema{ + "access_key": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("SCALEWAY_ACCESS_KEY", nil), + Description: "The API key for Scaleway API operations.", + }, + "organization": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("SCALEWAY_ORGANIZATION", nil), + Description: "The Organization ID for Scaleway API operations.", + }, + }, + + ResourcesMap: map[string]*schema.Resource{ + "scaleway_server": resourceScalewayServer(), + "scaleway_ip": resourceScalewayIP(), + "scaleway_security_group": resourceScalewaySecurityGroup(), + "scaleway_security_group_rule": resourceScalewaySecurityGroupRule(), + "scaleway_volume": resourceScalewayVolume(), + "scaleway_volume_attachment": resourceScalewayVolumeAttachment(), + }, + + ConfigureFunc: providerConfigure, + } +} + +func providerConfigure(d *schema.ResourceData) (interface{}, error) { + config := Config{ + Organization: d.Get("organization").(string), + APIKey: d.Get("access_key").(string), + } + + return config.Client() +} diff --git a/builtin/providers/scaleway/provider_test.go b/builtin/providers/scaleway/provider_test.go new file mode 100644 index 000000000..8bcc32b7c --- /dev/null +++ b/builtin/providers/scaleway/provider_test.go @@ -0,0 +1,38 @@ +package scaleway + +import ( + "os" + "testing" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +var testAccProviders map[string]terraform.ResourceProvider +var testAccProvider *schema.Provider + +func init() { + testAccProvider = Provider().(*schema.Provider) + testAccProviders = map[string]terraform.ResourceProvider{ + "scaleway": testAccProvider, + } +} + +func TestProvider(t *testing.T) { + if err := Provider().(*schema.Provider).InternalValidate(); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestProvider_impl(t *testing.T) { + var _ terraform.ResourceProvider = Provider() +} + +func testAccPreCheck(t *testing.T) { + if v := os.Getenv("SCALEWAY_ORGANIZATION"); v == "" { + t.Fatal("SCALEWAY_ORGANIZATION must be set for acceptance tests") + } + if v := os.Getenv("SCALEWAY_ACCESS_KEY"); v == "" { + t.Fatal("SCALEWAY_ACCESS_KEY must be set for acceptance tests") + } +} diff --git a/builtin/providers/scaleway/resource_scaleway_ip.go b/builtin/providers/scaleway/resource_scaleway_ip.go new file mode 100644 index 000000000..b4fe7003f --- /dev/null +++ b/builtin/providers/scaleway/resource_scaleway_ip.go @@ -0,0 +1,86 @@ +package scaleway + +import ( + "log" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/scaleway/scaleway-cli/pkg/api" +) + +func resourceScalewayIP() *schema.Resource { + return &schema.Resource{ + Create: resourceScalewayIPCreate, + Read: resourceScalewayIPRead, + Update: resourceScalewayIPUpdate, + Delete: resourceScalewayIPDelete, + Schema: map[string]*schema.Schema{ + "server": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "ip": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceScalewayIPCreate(d *schema.ResourceData, m interface{}) error { + scaleway := m.(*Client).scaleway + resp, err := scaleway.NewIP() + if err != nil { + return err + } + + d.SetId(resp.IP.ID) + return resourceScalewayIPUpdate(d, m) +} + +func resourceScalewayIPRead(d *schema.ResourceData, m interface{}) error { + scaleway := m.(*Client).scaleway + log.Printf("[DEBUG] Reading IP\n") + + resp, err := scaleway.GetIP(d.Id()) + if err != nil { + log.Printf("[DEBUG] Error reading ip: %q\n", err) + if serr, ok := err.(api.ScalewayAPIError); ok { + if serr.StatusCode == 404 { + d.SetId("") + return nil + } + } + return err + } + + d.Set("ip", resp.IP.Address) + d.Set("server", resp.IP.Server.Identifier) + return nil +} + +func resourceScalewayIPUpdate(d *schema.ResourceData, m interface{}) error { + scaleway := m.(*Client).scaleway + if d.HasChange("server") { + if d.Get("server").(string) != "" { + log.Printf("[DEBUG] Attaching IP %q to server %q\n", d.Id(), d.Get("server").(string)) + if err := scaleway.AttachIP(d.Id(), d.Get("server").(string)); err != nil { + return err + } + } else { + log.Printf("[DEBUG] Detaching IP %q\n", d.Id()) + return DetachIP(scaleway, d.Id()) + } + } + + return resourceScalewayIPRead(d, m) +} + +func resourceScalewayIPDelete(d *schema.ResourceData, m interface{}) error { + scaleway := m.(*Client).scaleway + err := scaleway.DeleteIP(d.Id()) + if err != nil { + return err + } + d.SetId("") + return nil +} diff --git a/builtin/providers/scaleway/resource_scaleway_ip_test.go b/builtin/providers/scaleway/resource_scaleway_ip_test.go new file mode 100644 index 000000000..464817eff --- /dev/null +++ b/builtin/providers/scaleway/resource_scaleway_ip_test.go @@ -0,0 +1,140 @@ +package scaleway + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccScalewayIP_Basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckScalewayIPDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckScalewayIPConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckScalewayIPExists("scaleway_ip.base"), + ), + }, + resource.TestStep{ + Config: testAccCheckScalewayIPAttachConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckScalewayIPExists("scaleway_ip.base"), + testAccCheckScalewayIPAttachment("scaleway_ip.base", func(serverID string) bool { + return serverID != "" + }, "attachment failed"), + ), + }, + resource.TestStep{ + Config: testAccCheckScalewayIPConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckScalewayIPExists("scaleway_ip.base"), + testAccCheckScalewayIPAttachment("scaleway_ip.base", func(serverID string) bool { + return serverID == "" + }, "detachment failed"), + ), + }, + }, + }) +} + +func testAccCheckScalewayIPDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*Client).scaleway + + for _, rs := range s.RootModule().Resources { + if rs.Type != "scaleway" { + continue + } + + _, err := client.GetIP(rs.Primary.ID) + + if err == nil { + return fmt.Errorf("IP still exists") + } + } + + return nil +} + +func testAccCheckScalewayIPAttributes() resource.TestCheckFunc { + return func(s *terraform.State) error { + return nil + } +} + +func testAccCheckScalewayIPExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No IP ID is set") + } + + client := testAccProvider.Meta().(*Client).scaleway + ip, err := client.GetIP(rs.Primary.ID) + + if err != nil { + return err + } + + if ip.IP.ID != rs.Primary.ID { + return fmt.Errorf("Record not found") + } + + return nil + } +} + +func testAccCheckScalewayIPAttachment(n string, check func(string) bool, msg string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No IP ID is set") + } + + client := testAccProvider.Meta().(*Client).scaleway + ip, err := client.GetIP(rs.Primary.ID) + + if err != nil { + return err + } + + if !check(ip.IP.Server.Identifier) { + return fmt.Errorf("IP check failed: %q", msg) + } + + return nil + } +} + +var testAccCheckScalewayIPConfig = ` +resource "scaleway_ip" "base" { +} +` + +var testAccCheckScalewayIPAttachConfig = fmt.Sprintf(` +resource "scaleway_server" "base" { + name = "test" + # ubuntu 14.04 + image = "%s" + type = "C1" + state = "stopped" +} + +resource "scaleway_ip" "base" { + server = "${scaleway_server.base.id}" +} +`, armImageIdentifier) diff --git a/builtin/providers/scaleway/resource_scaleway_security_group.go b/builtin/providers/scaleway/resource_scaleway_security_group.go new file mode 100644 index 000000000..6c5da13d6 --- /dev/null +++ b/builtin/providers/scaleway/resource_scaleway_security_group.go @@ -0,0 +1,118 @@ +package scaleway + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/scaleway/scaleway-cli/pkg/api" +) + +func resourceScalewaySecurityGroup() *schema.Resource { + return &schema.Resource{ + Create: resourceScalewaySecurityGroupCreate, + Read: resourceScalewaySecurityGroupRead, + Update: resourceScalewaySecurityGroupUpdate, + Delete: resourceScalewaySecurityGroupDelete, + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + } +} + +func resourceScalewaySecurityGroupCreate(d *schema.ResourceData, m interface{}) error { + scaleway := m.(*Client).scaleway + + req := api.ScalewayNewSecurityGroup{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + Organization: scaleway.Organization, + } + + err := scaleway.PostSecurityGroup(req) + if err != nil { + if serr, ok := err.(api.ScalewayAPIError); ok { + log.Printf("[DEBUG] Error creating security group: %q\n", serr.APIMessage) + } + + return err + } + + resp, err := scaleway.GetSecurityGroups() + if err != nil { + return err + } + + for _, group := range resp.SecurityGroups { + if group.Name == req.Name { + d.SetId(group.ID) + break + } + } + + if d.Id() == "" { + return fmt.Errorf("Failed to find created security group.") + } + + return resourceScalewaySecurityGroupRead(d, m) +} + +func resourceScalewaySecurityGroupRead(d *schema.ResourceData, m interface{}) error { + scaleway := m.(*Client).scaleway + resp, err := scaleway.GetASecurityGroup(d.Id()) + + if err != nil { + if serr, ok := err.(api.ScalewayAPIError); ok { + log.Printf("[DEBUG] Error reading security group: %q\n", serr.APIMessage) + + if serr.StatusCode == 404 { + d.SetId("") + return nil + } + } + + return err + } + + d.Set("name", resp.SecurityGroups.Name) + d.Set("description", resp.SecurityGroups.Description) + + return nil +} + +func resourceScalewaySecurityGroupUpdate(d *schema.ResourceData, m interface{}) error { + scaleway := m.(*Client).scaleway + + var req = api.ScalewayNewSecurityGroup{ + Organization: scaleway.Organization, + Name: d.Get("name").(string), + Description: d.Get("description").(string), + } + + if err := scaleway.PutSecurityGroup(req, d.Id()); err != nil { + log.Printf("[DEBUG] Error reading security group: %q\n", err) + + return err + } + + return resourceScalewaySecurityGroupRead(d, m) +} + +func resourceScalewaySecurityGroupDelete(d *schema.ResourceData, m interface{}) error { + scaleway := m.(*Client).scaleway + + err := scaleway.DeleteSecurityGroup(d.Id()) + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/builtin/providers/scaleway/resource_scaleway_security_group_rule.go b/builtin/providers/scaleway/resource_scaleway_security_group_rule.go new file mode 100644 index 000000000..85c2f3575 --- /dev/null +++ b/builtin/providers/scaleway/resource_scaleway_security_group_rule.go @@ -0,0 +1,162 @@ +package scaleway + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/scaleway/scaleway-cli/pkg/api" +) + +func resourceScalewaySecurityGroupRule() *schema.Resource { + return &schema.Resource{ + Create: resourceScalewaySecurityGroupRuleCreate, + Read: resourceScalewaySecurityGroupRuleRead, + Update: resourceScalewaySecurityGroupRuleUpdate, + Delete: resourceScalewaySecurityGroupRuleDelete, + Schema: map[string]*schema.Schema{ + "security_group": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "action": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "accept" && value != "drop" { + errors = append(errors, fmt.Errorf("%q must be one of 'accept', 'drop'", k)) + } + return + }, + }, + "direction": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "inbound" && value != "outbound" { + errors = append(errors, fmt.Errorf("%q must be one of 'inbound', 'outbound'", k)) + } + return + }, + }, + "ip_range": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "protocol": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "ICMP" && value != "TCP" && value != "UDP" { + errors = append(errors, fmt.Errorf("%q must be one of 'ICMP', 'TCP', 'UDP", k)) + } + return + }, + }, + "port": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + }, + } +} + +func resourceScalewaySecurityGroupRuleCreate(d *schema.ResourceData, m interface{}) error { + scaleway := m.(*Client).scaleway + + req := api.ScalewayNewSecurityGroupRule{ + Action: d.Get("action").(string), + Direction: d.Get("direction").(string), + IPRange: d.Get("ip_range").(string), + Protocol: d.Get("protocol").(string), + DestPortFrom: d.Get("port").(int), + } + + err := scaleway.PostSecurityGroupRule(d.Get("security_group").(string), req) + if err != nil { + if serr, ok := err.(api.ScalewayAPIError); ok { + log.Printf("[DEBUG] Error creating Security Group Rule: %q\n", serr.APIMessage) + } + + return err + } + + resp, err := scaleway.GetSecurityGroupRules(d.Get("security_group").(string)) + if err != nil { + return err + } + + for _, rule := range resp.Rules { + if rule.Action == req.Action && rule.Direction == req.Direction && rule.IPRange == req.IPRange && rule.Protocol == req.Protocol { + d.SetId(rule.ID) + break + } + } + + if d.Id() == "" { + return fmt.Errorf("Failed to find created security group rule") + } + + return resourceScalewaySecurityGroupRuleRead(d, m) +} + +func resourceScalewaySecurityGroupRuleRead(d *schema.ResourceData, m interface{}) error { + scaleway := m.(*Client).scaleway + rule, err := scaleway.GetASecurityGroupRule(d.Get("security_group").(string), d.Id()) + + if err != nil { + if serr, ok := err.(api.ScalewayAPIError); ok { + log.Printf("[DEBUG] error reading Security Group Rule: %q\n", serr.APIMessage) + + if serr.StatusCode == 404 { + d.SetId("") + return nil + } + } + + return err + } + + d.Set("action", rule.Rules.Action) + d.Set("direction", rule.Rules.Direction) + d.Set("ip_range", rule.Rules.IPRange) + d.Set("protocol", rule.Rules.Protocol) + d.Set("port", rule.Rules.DestPortFrom) + + return nil +} + +func resourceScalewaySecurityGroupRuleUpdate(d *schema.ResourceData, m interface{}) error { + scaleway := m.(*Client).scaleway + + var req = api.ScalewayNewSecurityGroupRule{ + Action: d.Get("action").(string), + Direction: d.Get("direction").(string), + IPRange: d.Get("ip_range").(string), + Protocol: d.Get("protocol").(string), + DestPortFrom: d.Get("port").(int), + } + + if err := scaleway.PutSecurityGroupRule(req, d.Get("security_group").(string), d.Id()); err != nil { + log.Printf("[DEBUG] error updating Security Group Rule: %q", err) + + return err + } + + return resourceScalewaySecurityGroupRuleRead(d, m) +} + +func resourceScalewaySecurityGroupRuleDelete(d *schema.ResourceData, m interface{}) error { + scaleway := m.(*Client).scaleway + + err := scaleway.DeleteSecurityGroupRule(d.Get("security_group").(string), d.Id()) + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/builtin/providers/scaleway/resource_scaleway_security_group_rule_test.go b/builtin/providers/scaleway/resource_scaleway_security_group_rule_test.go new file mode 100644 index 000000000..aeafa41b6 --- /dev/null +++ b/builtin/providers/scaleway/resource_scaleway_security_group_rule_test.go @@ -0,0 +1,158 @@ +package scaleway + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/scaleway/scaleway-cli/pkg/api" +) + +func TestAccScalewaySecurityGroupRule_Basic(t *testing.T) { + var group api.ScalewaySecurityGroups + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckScalewaySecurityGroupRuleDestroy(&group), + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckScalewaySecurityGroupRuleConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckScalewaySecurityGroupsExists("scaleway_security_group.base", &group), + resource.TestCheckResourceAttr("scaleway_security_group_rule.http", "action", "drop"), + resource.TestCheckResourceAttr("scaleway_security_group_rule.http", "direction", "inbound"), + resource.TestCheckResourceAttr("scaleway_security_group_rule.http", "ip_range", "0.0.0.0/0"), + resource.TestCheckResourceAttr("scaleway_security_group_rule.http", "protocol", "TCP"), + testAccCheckScalewaySecurityGroupRuleExists("scaleway_security_group_rule.http", &group), + testAccCheckScalewaySecurityGroupRuleAttributes("scaleway_security_group_rule.http", &group), + ), + }, + }, + }) +} + +func testAccCheckScalewaySecurityGroupsExists(n string, group *api.ScalewaySecurityGroups) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Security Group Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Security Group is set") + } + + conn := testAccProvider.Meta().(*Client).scaleway + resp, err := conn.GetASecurityGroup(rs.Primary.ID) + + if err != nil { + return err + } + + if resp.SecurityGroups.ID == rs.Primary.ID { + *group = resp.SecurityGroups + return nil + } + + return fmt.Errorf("Security Group not found") + } +} + +func testAccCheckScalewaySecurityGroupRuleDestroy(group *api.ScalewaySecurityGroups) func(*terraform.State) error { + return func(s *terraform.State) error { + client := testAccProvider.Meta().(*Client).scaleway + + for _, rs := range s.RootModule().Resources { + if rs.Type != "scaleway" { + continue + } + + _, err := client.GetASecurityGroupRule(group.ID, rs.Primary.ID) + + if err == nil { + return fmt.Errorf("Security Group still exists") + } + } + + return nil + } +} + +func testAccCheckScalewaySecurityGroupRuleAttributes(n string, group *api.ScalewaySecurityGroups) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Unknown resource: %s", n) + } + + client := testAccProvider.Meta().(*Client).scaleway + rule, err := client.GetASecurityGroupRule(group.ID, rs.Primary.ID) + if err != nil { + return err + } + + if rule.Rules.Action != "drop" { + return fmt.Errorf("Wrong rule action") + } + if rule.Rules.Direction != "inbound" { + return fmt.Errorf("wrong rule direction") + } + if rule.Rules.IPRange != "0.0.0.0/0" { + return fmt.Errorf("wrong rule IP Range") + } + if rule.Rules.Protocol != "TCP" { + return fmt.Errorf("wrong rule protocol") + } + if rule.Rules.DestPortFrom != 80 { + return fmt.Errorf("Wrong port") + } + + return nil + } +} + +func testAccCheckScalewaySecurityGroupRuleExists(n string, group *api.ScalewaySecurityGroups) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + + if !ok { + return fmt.Errorf("Security Group Rule Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Security Group Rule ID is set") + } + + client := testAccProvider.Meta().(*Client).scaleway + rule, err := client.GetASecurityGroupRule(group.ID, rs.Primary.ID) + + if err != nil { + return err + } + + if rule.Rules.ID != rs.Primary.ID { + return fmt.Errorf("Record not found") + } + + return nil + } +} + +var testAccCheckScalewaySecurityGroupRuleConfig = ` +resource "scaleway_security_group" "base" { + name = "public" + description = "public gateway" +} + +resource "scaleway_security_group_rule" "http" { + security_group = "${scaleway_security_group.base.id}" + + action = "drop" + direction = "inbound" + ip_range = "0.0.0.0/0" + protocol = "TCP" + port = 80 +} +` diff --git a/builtin/providers/scaleway/resource_scaleway_security_group_test.go b/builtin/providers/scaleway/resource_scaleway_security_group_test.go new file mode 100644 index 000000000..22d351305 --- /dev/null +++ b/builtin/providers/scaleway/resource_scaleway_security_group_test.go @@ -0,0 +1,104 @@ +package scaleway + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccScalewaySecurityGroup_Basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckScalewaySecurityGroupDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckScalewaySecurityGroupConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckScalewaySecurityGroupExists("scaleway_security_group.base"), + testAccCheckScalewaySecurityGroupAttributes("scaleway_security_group.base"), + resource.TestCheckResourceAttr("scaleway_security_group.base", "name", "public"), + resource.TestCheckResourceAttr("scaleway_security_group.base", "description", "public gateway"), + ), + }, + }, + }) +} + +func testAccCheckScalewaySecurityGroupDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*Client).scaleway + + for _, rs := range s.RootModule().Resources { + if rs.Type != "scaleway" { + continue + } + + _, err := client.GetASecurityGroup(rs.Primary.ID) + + if err == nil { + return fmt.Errorf("Security Group still exists") + } + } + + return nil +} + +func testAccCheckScalewaySecurityGroupAttributes(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Unknown resource: %s", n) + } + + client := testAccProvider.Meta().(*Client).scaleway + group, err := client.GetASecurityGroup(rs.Primary.ID) + if err != nil { + return err + } + + if group.SecurityGroups.Name != "public" { + return fmt.Errorf("Security Group has wrong name") + } + if group.SecurityGroups.Description != "public gateway" { + return fmt.Errorf("Security Group has wrong description") + } + + return nil + } +} + +func testAccCheckScalewaySecurityGroupExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Security Group ID is set") + } + + client := testAccProvider.Meta().(*Client).scaleway + group, err := client.GetASecurityGroup(rs.Primary.ID) + + if err != nil { + return err + } + + if group.SecurityGroups.ID != rs.Primary.ID { + return fmt.Errorf("Record not found") + } + + return nil + } +} + +var testAccCheckScalewaySecurityGroupConfig = ` +resource "scaleway_security_group" "base" { + name = "public" + description = "public gateway" +} +` diff --git a/builtin/providers/scaleway/resource_scaleway_server.go b/builtin/providers/scaleway/resource_scaleway_server.go new file mode 100644 index 000000000..0dddfa8e7 --- /dev/null +++ b/builtin/providers/scaleway/resource_scaleway_server.go @@ -0,0 +1,183 @@ +package scaleway + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/scaleway/scaleway-cli/pkg/api" +) + +func resourceScalewayServer() *schema.Resource { + return &schema.Resource{ + Create: resourceScalewayServerCreate, + Read: resourceScalewayServerRead, + Update: resourceScalewayServerUpdate, + Delete: resourceScalewayServerDelete, + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "image": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "bootscript": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "tags": &schema.Schema{ + Type: schema.TypeList, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Optional: true, + }, + "ipv4_address_private": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "ipv4_address_public": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "state": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "dynamic_ip_required": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "state_detail": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceScalewayServerCreate(d *schema.ResourceData, m interface{}) error { + scaleway := m.(*Client).scaleway + + image := d.Get("image").(string) + var server = api.ScalewayServerDefinition{ + Name: d.Get("name").(string), + Image: String(image), + Organization: scaleway.Organization, + } + + server.DynamicIPRequired = Bool(d.Get("dynamic_ip_required").(bool)) + server.CommercialType = d.Get("type").(string) + + if bootscript, ok := d.GetOk("bootscript"); ok { + server.Bootscript = String(bootscript.(string)) + } + + if tags, ok := d.GetOk("tags"); ok { + server.Tags = tags.([]string) + } + + id, err := scaleway.PostServer(server) + if err != nil { + return err + } + + d.SetId(id) + if d.Get("state").(string) != "stopped" { + err = scaleway.PostServerAction(id, "poweron") + if err != nil { + return err + } + + err = waitForServerState(scaleway, id, "running") + } + + if err != nil { + return err + } + + return resourceScalewayServerRead(d, m) +} + +func resourceScalewayServerRead(d *schema.ResourceData, m interface{}) error { + scaleway := m.(*Client).scaleway + server, err := scaleway.GetServer(d.Id()) + + if err != nil { + if serr, ok := err.(api.ScalewayAPIError); ok { + log.Printf("[DEBUG] Error reading server: %q\n", serr.APIMessage) + + if serr.StatusCode == 404 { + d.SetId("") + return nil + } + } + + return err + } + + d.Set("ipv4_address_private", server.PrivateIP) + d.Set("ipv4_address_public", server.PublicAddress.IP) + d.Set("state", server.State) + d.Set("state_detail", server.StateDetail) + + d.SetConnInfo(map[string]string{ + "type": "ssh", + "host": server.PublicAddress.IP, + }) + + return nil +} + +func resourceScalewayServerUpdate(d *schema.ResourceData, m interface{}) error { + scaleway := m.(*Client).scaleway + + var req api.ScalewayServerPatchDefinition + + if d.HasChange("name") { + name := d.Get("name").(string) + req.Name = &name + } + + if d.HasChange("dynamic_ip_required") { + req.DynamicIPRequired = Bool(d.Get("dynamic_ip_required").(bool)) + } + + if err := scaleway.PatchServer(d.Id(), req); err != nil { + return fmt.Errorf("Failed patching scaleway server: %q", err) + } + + return resourceScalewayServerRead(d, m) +} + +func resourceScalewayServerDelete(d *schema.ResourceData, m interface{}) error { + scaleway := m.(*Client).scaleway + + def, err := scaleway.GetServer(d.Id()) + if err != nil { + if serr, ok := err.(api.ScalewayAPIError); ok { + if serr.StatusCode == 404 { + d.SetId("") + return nil + } + } + return err + } + + err = deleteServerSafe(scaleway, def.Identifier) + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/builtin/providers/scaleway/resource_scaleway_server_test.go b/builtin/providers/scaleway/resource_scaleway_server_test.go new file mode 100644 index 000000000..b8fa3ff48 --- /dev/null +++ b/builtin/providers/scaleway/resource_scaleway_server_test.go @@ -0,0 +1,113 @@ +package scaleway + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccScalewayServer_Basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckScalewayServerDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckScalewayServerConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckScalewayServerExists("scaleway_server.base"), + testAccCheckScalewayServerAttributes("scaleway_server.base"), + resource.TestCheckResourceAttr( + "scaleway_server.base", "type", "C1"), + resource.TestCheckResourceAttr( + "scaleway_server.base", "name", "test"), + ), + }, + }, + }) +} + +func testAccCheckScalewayServerDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*Client).scaleway + + for _, rs := range s.RootModule().Resources { + if rs.Type != "scaleway" { + continue + } + + _, err := client.GetServer(rs.Primary.ID) + + if err == nil { + return fmt.Errorf("Server still exists") + } + } + + return nil +} + +func testAccCheckScalewayServerAttributes(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Unknown resource: %s", n) + } + + client := testAccProvider.Meta().(*Client).scaleway + server, err := client.GetServer(rs.Primary.ID) + + if err != nil { + return err + } + + if server.Name != "test" { + return fmt.Errorf("Server has wrong name") + } + if server.Image.Identifier != armImageIdentifier { + return fmt.Errorf("Wrong server image") + } + if server.CommercialType != "C1" { + return fmt.Errorf("Wrong server type") + } + + return nil + } +} + +func testAccCheckScalewayServerExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Server ID is set") + } + + client := testAccProvider.Meta().(*Client).scaleway + server, err := client.GetServer(rs.Primary.ID) + + if err != nil { + return err + } + + if server.Identifier != rs.Primary.ID { + return fmt.Errorf("Record not found") + } + + return nil + } +} + +var armImageIdentifier = "5faef9cd-ea9b-4a63-9171-9e26bec03dbc" + +var testAccCheckScalewayServerConfig = fmt.Sprintf(` +resource "scaleway_server" "base" { + name = "test" + # ubuntu 14.04 + image = "%s" + type = "C1" +}`, armImageIdentifier) diff --git a/builtin/providers/scaleway/resource_scaleway_volume.go b/builtin/providers/scaleway/resource_scaleway_volume.go new file mode 100644 index 000000000..6090a52cc --- /dev/null +++ b/builtin/providers/scaleway/resource_scaleway_volume.go @@ -0,0 +1,127 @@ +package scaleway + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/scaleway/scaleway-cli/pkg/api" +) + +const gb uint64 = 1000 * 1000 * 1000 + +func resourceScalewayVolume() *schema.Resource { + return &schema.Resource{ + Create: resourceScalewayVolumeCreate, + Read: resourceScalewayVolumeRead, + Update: resourceScalewayVolumeUpdate, + Delete: resourceScalewayVolumeDelete, + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "size_in_gb": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + if value < 1 || value > 150 { + errors = append(errors, fmt.Errorf("%q be more than 1 and less than 150", k)) + } + return + }, + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "l_ssd" { + errors = append(errors, fmt.Errorf("%q must be l_ssd", k)) + } + return + }, + }, + "server": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceScalewayVolumeCreate(d *schema.ResourceData, m interface{}) error { + scaleway := m.(*Client).scaleway + size := uint64(d.Get("size_in_gb").(int)) * gb + req := api.ScalewayVolumeDefinition{ + Name: d.Get("name").(string), + Size: size, + Type: d.Get("type").(string), + Organization: scaleway.Organization, + } + volumeID, err := scaleway.PostVolume(req) + if err != nil { + return fmt.Errorf("Error Creating volume: %q", err) + } + d.SetId(volumeID) + return resourceScalewayVolumeRead(d, m) +} + +func resourceScalewayVolumeRead(d *schema.ResourceData, m interface{}) error { + scaleway := m.(*Client).scaleway + volume, err := scaleway.GetVolume(d.Id()) + if err != nil { + if serr, ok := err.(api.ScalewayAPIError); ok { + log.Printf("[DEBUG] Error reading volume: %q\n", serr.APIMessage) + + if serr.StatusCode == 404 { + d.SetId("") + return nil + } + } + + return err + } + d.Set("name", volume.Name) + d.Set("size_in_gb", volume.Size/gb) + d.Set("type", volume.VolumeType) + d.Set("server", "") + if volume.Server != nil { + d.Set("server", volume.Server.Identifier) + } + return nil +} + +func resourceScalewayVolumeUpdate(d *schema.ResourceData, m interface{}) error { + scaleway := m.(*Client).scaleway + + var req api.ScalewayVolumePutDefinition + if d.HasChange("name") { + req.Name = String(d.Get("name").(string)) + } + + if d.HasChange("size_in_gb") { + size := uint64(d.Get("size_in_gb").(int)) * gb + req.Size = &size + } + + scaleway.PutVolume(d.Id(), req) + return resourceScalewayVolumeRead(d, m) +} + +func resourceScalewayVolumeDelete(d *schema.ResourceData, m interface{}) error { + scaleway := m.(*Client).scaleway + err := scaleway.DeleteVolume(d.Id()) + if err != nil { + if serr, ok := err.(api.ScalewayAPIError); ok { + if serr.StatusCode == 404 { + d.SetId("") + return nil + } + } + return err + } + d.SetId("") + return nil +} diff --git a/builtin/providers/scaleway/resource_scaleway_volume_attachment.go b/builtin/providers/scaleway/resource_scaleway_volume_attachment.go new file mode 100644 index 000000000..3b24172eb --- /dev/null +++ b/builtin/providers/scaleway/resource_scaleway_volume_attachment.go @@ -0,0 +1,201 @@ +package scaleway + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/scaleway/scaleway-cli/pkg/api" +) + +func resourceScalewayVolumeAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceScalewayVolumeAttachmentCreate, + Read: resourceScalewayVolumeAttachmentRead, + Delete: resourceScalewayVolumeAttachmentDelete, + Schema: map[string]*schema.Schema{ + "server": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "volume": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceScalewayVolumeAttachmentCreate(d *schema.ResourceData, m interface{}) error { + scaleway := m.(*Client).scaleway + + var startServerAgain = false + server, err := scaleway.GetServer(d.Get("server").(string)) + if err != nil { + fmt.Printf("Failed getting server: %q", err) + return err + } + + // volumes can only be modified when the server is powered off + if server.State != "stopped" { + startServerAgain = true + + if err := scaleway.PostServerAction(server.Identifier, "poweroff"); err != nil { + return err + } + + if err := waitForServerState(scaleway, server.Identifier, "stopped"); err != nil { + return err + } + } + + volumes := make(map[string]api.ScalewayVolume) + for i, volume := range server.Volumes { + volumes[i] = volume + } + + vol, err := scaleway.GetVolume(d.Get("volume").(string)) + if err != nil { + return err + } + volumes[fmt.Sprintf("%d", len(volumes)+1)] = *vol + + // the API request requires most volume attributes to be unset to succeed + for k, v := range volumes { + v.Size = 0 + v.CreationDate = "" + v.Organization = "" + v.ModificationDate = "" + v.VolumeType = "" + v.Server = nil + v.ExportURI = "" + + volumes[k] = v + } + + var req = api.ScalewayServerPatchDefinition{ + Volumes: &volumes, + } + if err := scaleway.PatchServer(d.Get("server").(string), req); err != nil { + return fmt.Errorf("Failed attaching volume to server: %q", err) + } + + if startServerAgain { + if err := scaleway.PostServerAction(d.Get("server").(string), "poweron"); err != nil { + return err + } + + if err := waitForServerState(scaleway, d.Get("server").(string), "running"); err != nil { + return err + } + } + + d.SetId(fmt.Sprintf("scaleway-server:%s/volume/%s", d.Get("server").(string), d.Get("volume").(string))) + + return resourceScalewayVolumeAttachmentRead(d, m) +} + +func resourceScalewayVolumeAttachmentRead(d *schema.ResourceData, m interface{}) error { + scaleway := m.(*Client).scaleway + + server, err := scaleway.GetServer(d.Get("server").(string)) + if err != nil { + if serr, ok := err.(api.ScalewayAPIError); ok { + log.Printf("[DEBUG] Error reading server: %q\n", serr.APIMessage) + + if serr.StatusCode == 404 { + d.SetId("") + return nil + } + } + return err + } + + if _, err := scaleway.GetVolume(d.Get("volume").(string)); err != nil { + if serr, ok := err.(api.ScalewayAPIError); ok { + log.Printf("[DEBUG] Error reading volume: %q\n", serr.APIMessage) + + if serr.StatusCode == 404 { + d.SetId("") + return nil + } + } + return err + } + + for _, volume := range server.Volumes { + if volume.Identifier == d.Get("volume").(string) { + return nil + } + } + + log.Printf("[DEBUG] Volume %q not attached to server %q\n", d.Get("volume").(string), d.Get("server").(string)) + d.SetId("") + return nil +} + +func resourceScalewayVolumeAttachmentDelete(d *schema.ResourceData, m interface{}) error { + scaleway := m.(*Client).scaleway + var startServerAgain = false + + server, err := scaleway.GetServer(d.Get("server").(string)) + if err != nil { + return err + } + + // volumes can only be modified when the server is powered off + if server.State != "stopped" { + startServerAgain = true + + if err := scaleway.PostServerAction(server.Identifier, "poweroff"); err != nil { + return err + } + + if err := waitForServerState(scaleway, server.Identifier, "stopped"); err != nil { + return err + } + } + + volumes := make(map[string]api.ScalewayVolume) + for _, volume := range server.Volumes { + if volume.Identifier != d.Get("volume").(string) { + volumes[fmt.Sprintf("%d", len(volumes))] = volume + } + } + + // the API request requires most volume attributes to be unset to succeed + for k, v := range volumes { + v.Size = 0 + v.CreationDate = "" + v.Organization = "" + v.ModificationDate = "" + v.VolumeType = "" + v.Server = nil + v.ExportURI = "" + + volumes[k] = v + } + + var req = api.ScalewayServerPatchDefinition{ + Volumes: &volumes, + } + if err := scaleway.PatchServer(d.Get("server").(string), req); err != nil { + return err + } + + if startServerAgain { + if err := scaleway.PostServerAction(d.Get("server").(string), "poweron"); err != nil { + return err + } + + if err := waitForServerState(scaleway, d.Get("server").(string), "running"); err != nil { + return err + } + } + + d.SetId("") + + return nil +} diff --git a/builtin/providers/scaleway/resource_scaleway_volume_attachment_test.go b/builtin/providers/scaleway/resource_scaleway_volume_attachment_test.go new file mode 100644 index 000000000..33d54f9f9 --- /dev/null +++ b/builtin/providers/scaleway/resource_scaleway_volume_attachment_test.go @@ -0,0 +1,93 @@ +package scaleway + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccScalewayVolumeAttachment_Basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckScalewayVolumeAttachmentDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckScalewayVolumeAttachmentConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckScalewayVolumeAttachmentExists("scaleway_volume_attachment.test"), + ), + }, + }, + }) +} + +func testAccCheckScalewayVolumeAttachmentDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*Client).scaleway + + for _, rs := range s.RootModule().Resources { + if rs.Type != "scaleway" { + continue + } + + s, err := client.GetServer(rs.Primary.Attributes["server"]) + if err != nil { + fmt.Printf("Failed getting server: %q", err) + return err + } + + for _, volume := range s.Volumes { + if volume.Identifier == rs.Primary.Attributes["volume"] { + return fmt.Errorf("Attachment still exists") + } + } + } + + return nil +} + +func testAccCheckScalewayVolumeAttachmentExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + client := testAccProvider.Meta().(*Client).scaleway + + rs, _ := s.RootModule().Resources[n] + + server, err := client.GetServer(rs.Primary.Attributes["server"]) + if err != nil { + fmt.Printf("Failed getting server: %q", err) + return err + } + + for _, volume := range server.Volumes { + if volume.Identifier == rs.Primary.Attributes["volume"] { + return nil + } + } + + return fmt.Errorf("Attachment does not exist") + } +} + +var x86_64ImageIdentifier = "aecaed73-51a5-4439-a127-6d8229847145" + +var testAccCheckScalewayVolumeAttachmentConfig = fmt.Sprintf(` +resource "scaleway_server" "base" { + name = "test" + # ubuntu 14.04 + image = "%s" + type = "C2S" + # state = "stopped" +} + +resource "scaleway_volume" "test" { + name = "test" + size_in_gb = 20 + type = "l_ssd" +} + +resource "scaleway_volume_attachment" "test" { + server = "${scaleway_server.base.id}" + volume = "${scaleway_volume.test.id}" +}`, x86_64ImageIdentifier) diff --git a/builtin/providers/scaleway/resource_scaleway_volume_test.go b/builtin/providers/scaleway/resource_scaleway_volume_test.go new file mode 100644 index 000000000..fa3feb062 --- /dev/null +++ b/builtin/providers/scaleway/resource_scaleway_volume_test.go @@ -0,0 +1,107 @@ +package scaleway + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccScalewayVolume_Basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckScalewayVolumeDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckScalewayVolumeConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckScalewayVolumeExists("scaleway_volume.test"), + testAccCheckScalewayVolumeAttributes("scaleway_volume.test"), + ), + }, + }, + }) +} + +func testAccCheckScalewayVolumeDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*Client).scaleway + + for _, rs := range s.RootModule().Resources { + if rs.Type != "scaleway" { + continue + } + + _, err := client.GetVolume(rs.Primary.ID) + + if err == nil { + return fmt.Errorf("Volume still exists") + } + } + + return nil +} + +func testAccCheckScalewayVolumeAttributes(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Unknown resource: %s", n) + } + + client := testAccProvider.Meta().(*Client).scaleway + volume, err := client.GetVolume(rs.Primary.ID) + + if err != nil { + return err + } + + if volume.Name != "test" { + return fmt.Errorf("volume has wrong name: %q", volume.Name) + } + if volume.Size != 2000000000 { + return fmt.Errorf("volume has wrong size: %d", volume.Size) + } + if volume.VolumeType != "l_ssd" { + return fmt.Errorf("volume has volume type: %q", volume.VolumeType) + } + + return nil + } +} + +func testAccCheckScalewayVolumeExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Volume ID is set") + } + + client := testAccProvider.Meta().(*Client).scaleway + volume, err := client.GetVolume(rs.Primary.ID) + + if err != nil { + return err + } + + if volume.Identifier != rs.Primary.ID { + return fmt.Errorf("Record not found") + } + + return nil + } +} + +var testAccCheckScalewayVolumeConfig = ` +resource "scaleway_volume" "test" { + name = "test" + size_in_gb = 2 + type = "l_ssd" +} +` diff --git a/command/internal_plugin_list.go b/command/internal_plugin_list.go index 4ce91b409..5bb36224a 100644 --- a/command/internal_plugin_list.go +++ b/command/internal_plugin_list.go @@ -39,6 +39,7 @@ import ( powerdnsprovider "github.com/hashicorp/terraform/builtin/providers/powerdns" randomprovider "github.com/hashicorp/terraform/builtin/providers/random" rundeckprovider "github.com/hashicorp/terraform/builtin/providers/rundeck" + scalewayprovider "github.com/hashicorp/terraform/builtin/providers/scaleway" softlayerprovider "github.com/hashicorp/terraform/builtin/providers/softlayer" statuscakeprovider "github.com/hashicorp/terraform/builtin/providers/statuscake" templateprovider "github.com/hashicorp/terraform/builtin/providers/template" @@ -92,6 +93,7 @@ var InternalProviders = map[string]plugin.ProviderFunc{ "powerdns": powerdnsprovider.Provider, "random": randomprovider.Provider, "rundeck": rundeckprovider.Provider, + "scaleway": scalewayprovider.Provider, "softlayer": softlayerprovider.Provider, "statuscake": statuscakeprovider.Provider, "template": templateprovider.Provider, diff --git a/vendor/github.com/moul/anonuuid/LICENSE b/vendor/github.com/moul/anonuuid/LICENSE new file mode 100644 index 000000000..492e2c629 --- /dev/null +++ b/vendor/github.com/moul/anonuuid/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Manfred Touron + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/moul/anonuuid/README.md b/vendor/github.com/moul/anonuuid/README.md new file mode 100644 index 000000000..f5c9a57eb --- /dev/null +++ b/vendor/github.com/moul/anonuuid/README.md @@ -0,0 +1,170 @@ +# AnonUUID + +[![Build Status](https://travis-ci.org/moul/anonuuid.svg)](https://travis-ci.org/moul/anonuuid) +[![GoDoc](https://godoc.org/github.com/moul/anonuuid?status.svg)](https://godoc.org/github.com/moul/anonuuid) +[![Coverage Status](https://coveralls.io/repos/moul/anonuuid/badge.svg?branch=master&service=github)](https://coveralls.io/github/moul/anonuuid?branch=master) + +:wrench: Anonymize UUIDs outputs (written in Golang) + +![AnonUUID Logo](https://raw.githubusercontent.com/moul/anonuuid/master/assets/anonuuid.png) + +**anonuuid** anonymize an input string by replacing all UUIDs by an anonymized +new one. + +The fake UUIDs are cached, so if AnonUUID encounter the same real UUIDs multiple +times, the translation will be the same. + +## Usage + +```console +$ anonuuid --help +NAME: + anonuuid - Anonymize UUIDs outputs + +USAGE: + anonuuid [global options] command [command options] [arguments...] + +VERSION: + 1.0.0-dev + +AUTHOR(S): + Manfred Touron + +COMMANDS: + help, h Shows a list of commands or help for one command + +GLOBAL OPTIONS: + --hexspeak Generate hexspeak style fake UUIDs + --random, -r Generate random fake UUIDs + --keep-beginning Keep first part of the UUID unchanged + --keep-end Keep last part of the UUID unchanged + --prefix, -p Prefix generated UUIDs + --suffix Suffix generated UUIDs + --help, -h show help + --version, -v print the version + ``` + +## Example + +Replace all UUIDs and cache the correspondance. + +```command +$ anonuuid git:(master) ✗ cat < 32 { + part = part[:32] + } + uuid := part[:8] + "-" + part[8:12] + "-1" + part[13:16] + "-" + part[16:20] + "-" + part[20:32] + + err := IsUUID(uuid) + if err != nil { + return "", err + } + + return uuid, nil +} + +// GenerateRandomUUID returns an UUID based on random strings +func GenerateRandomUUID(length int) (string, error) { + var letters = []rune("abcdef0123456789") + + b := make([]rune, length) + for i := range b { + b[i] = letters[rand.Intn(len(letters))] + } + return FormatUUID(string(b)) +} + +// GenerateHexspeakUUID returns an UUID formatted string containing hexspeak words +func GenerateHexspeakUUID(i int) (string, error) { + if i < 0 { + i = -i + } + hexspeaks := []string{ + "0ff1ce", + "31337", + "4b1d", + "badc0de", + "badcafe", + "badf00d", + "deadbabe", + "deadbeef", + "deadc0de", + "deadfeed", + "fee1bad", + } + return FormatUUID(hexspeaks[i%len(hexspeaks)]) +} + +// GenerateLenUUID returns an UUID formatted string based on an index number +func GenerateLenUUID(i int) (string, error) { + if i < 0 { + i = 2<<29 + i + } + return FormatUUID(fmt.Sprintf("%x", i)) +} diff --git a/vendor/github.com/renstrom/fuzzysearch/LICENSE b/vendor/github.com/renstrom/fuzzysearch/LICENSE new file mode 100644 index 000000000..9cc753370 --- /dev/null +++ b/vendor/github.com/renstrom/fuzzysearch/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Peter Renström + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/renstrom/fuzzysearch/fuzzy/fuzzy.go b/vendor/github.com/renstrom/fuzzysearch/fuzzy/fuzzy.go new file mode 100644 index 000000000..63277d51e --- /dev/null +++ b/vendor/github.com/renstrom/fuzzysearch/fuzzy/fuzzy.go @@ -0,0 +1,167 @@ +// Fuzzy searching allows for flexibly matching a string with partial input, +// useful for filtering data very quickly based on lightweight user input. +package fuzzy + +import ( + "unicode" + "unicode/utf8" +) + +var noop = func(r rune) rune { return r } + +// Match returns true if source matches target using a fuzzy-searching +// algorithm. Note that it doesn't implement Levenshtein distance (see +// RankMatch instead), but rather a simplified version where there's no +// approximation. The method will return true only if each character in the +// source can be found in the target and occurs after the preceding matches. +func Match(source, target string) bool { + return match(source, target, noop) +} + +// MatchFold is a case-insensitive version of Match. +func MatchFold(source, target string) bool { + return match(source, target, unicode.ToLower) +} + +func match(source, target string, fn func(rune) rune) bool { + lenDiff := len(target) - len(source) + + if lenDiff < 0 { + return false + } + + if lenDiff == 0 && source == target { + return true + } + +Outer: + for _, r1 := range source { + for i, r2 := range target { + if fn(r1) == fn(r2) { + target = target[i+utf8.RuneLen(r2):] + continue Outer + } + } + return false + } + + return true +} + +// Find will return a list of strings in targets that fuzzy matches source. +func Find(source string, targets []string) []string { + return find(source, targets, noop) +} + +// FindFold is a case-insensitive version of Find. +func FindFold(source string, targets []string) []string { + return find(source, targets, unicode.ToLower) +} + +func find(source string, targets []string, fn func(rune) rune) []string { + var matches []string + + for _, target := range targets { + if match(source, target, fn) { + matches = append(matches, target) + } + } + + return matches +} + +// RankMatch is similar to Match except it will measure the Levenshtein +// distance between the source and the target and return its result. If there +// was no match, it will return -1. +// Given the requirements of match, RankMatch only needs to perform a subset of +// the Levenshtein calculation, only deletions need be considered, required +// additions and substitutions would fail the match test. +func RankMatch(source, target string) int { + return rank(source, target, noop) +} + +// RankMatchFold is a case-insensitive version of RankMatch. +func RankMatchFold(source, target string) int { + return rank(source, target, unicode.ToLower) +} + +func rank(source, target string, fn func(rune) rune) int { + lenDiff := len(target) - len(source) + + if lenDiff < 0 { + return -1 + } + + if lenDiff == 0 && source == target { + return 0 + } + + runeDiff := 0 + +Outer: + for _, r1 := range source { + for i, r2 := range target { + if fn(r1) == fn(r2) { + target = target[i+utf8.RuneLen(r2):] + continue Outer + } else { + runeDiff++ + } + } + return -1 + } + + // Count up remaining char + for len(target) > 0 { + target = target[utf8.RuneLen(rune(target[0])):] + runeDiff++ + } + + return runeDiff +} + +// RankFind is similar to Find, except it will also rank all matches using +// Levenshtein distance. +func RankFind(source string, targets []string) Ranks { + var r Ranks + for _, target := range find(source, targets, noop) { + distance := LevenshteinDistance(source, target) + r = append(r, Rank{source, target, distance}) + } + return r +} + +// RankFindFold is a case-insensitive version of RankFind. +func RankFindFold(source string, targets []string) Ranks { + var r Ranks + for _, target := range find(source, targets, unicode.ToLower) { + distance := LevenshteinDistance(source, target) + r = append(r, Rank{source, target, distance}) + } + return r +} + +type Rank struct { + // Source is used as the source for matching. + Source string + + // Target is the word matched against. + Target string + + // Distance is the Levenshtein distance between Source and Target. + Distance int +} + +type Ranks []Rank + +func (r Ranks) Len() int { + return len(r) +} + +func (r Ranks) Swap(i, j int) { + r[i], r[j] = r[j], r[i] +} + +func (r Ranks) Less(i, j int) bool { + return r[i].Distance < r[j].Distance +} diff --git a/vendor/github.com/renstrom/fuzzysearch/fuzzy/levenshtein.go b/vendor/github.com/renstrom/fuzzysearch/fuzzy/levenshtein.go new file mode 100644 index 000000000..237923d34 --- /dev/null +++ b/vendor/github.com/renstrom/fuzzysearch/fuzzy/levenshtein.go @@ -0,0 +1,43 @@ +package fuzzy + +// LevenshteinDistance measures the difference between two strings. +// The Levenshtein distance between two words is the minimum number of +// single-character edits (i.e. insertions, deletions or substitutions) +// required to change one word into the other. +// +// This implemention is optimized to use O(min(m,n)) space and is based on the +// optimized C version found here: +// http://en.wikibooks.org/wiki/Algorithm_implementation/Strings/Levenshtein_distance#C +func LevenshteinDistance(s, t string) int { + r1, r2 := []rune(s), []rune(t) + column := make([]int, len(r1)+1) + + for y := 1; y <= len(r1); y++ { + column[y] = y + } + + for x := 1; x <= len(r2); x++ { + column[0] = x + + for y, lastDiag := 1, x-1; y <= len(r1); y++ { + oldDiag := column[y] + cost := 0 + if r1[y-1] != r2[x-1] { + cost = 1 + } + column[y] = min(column[y]+1, column[y-1]+1, lastDiag+cost) + lastDiag = oldDiag + } + } + + return column[len(r1)] +} + +func min(a, b, c int) int { + if a < b && a < c { + return a + } else if b < c { + return b + } + return c +} diff --git a/vendor/github.com/scaleway/scaleway-cli/LICENSE.md b/vendor/github.com/scaleway/scaleway-cli/LICENSE.md new file mode 100644 index 000000000..7503a16ca --- /dev/null +++ b/vendor/github.com/scaleway/scaleway-cli/LICENSE.md @@ -0,0 +1,22 @@ +The MIT License +=============== + +Copyright (c) **2014-2016 Scaleway ([@scaleway](https://twitter.com/scaleway))** + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/scaleway/scaleway-cli/pkg/api/README.md b/vendor/github.com/scaleway/scaleway-cli/pkg/api/README.md new file mode 100644 index 000000000..559a7018d --- /dev/null +++ b/vendor/github.com/scaleway/scaleway-cli/pkg/api/README.md @@ -0,0 +1,25 @@ +# Scaleway's API + +[![GoDoc](https://godoc.org/github.com/scaleway/scaleway-cli/pkg/api?status.svg)](https://godoc.org/github.com/scaleway/scaleway-cli/pkg/api) + +This package contains facilities to play with the Scaleway API, it includes the following features: + +- dedicated configuration file containing credentials to deal with the API +- caching to resolve UUIDs without contacting the API + +## Links + +- [API documentation](https://developer.scaleway.com) +- [Official Python SDK](https://github.com/scaleway/python-scaleway) +- Projects using this SDK + - https://github.com/scaleway/devhub + - https://github.com/scaleway/docker-machine-driver-scaleway + - https://github.com/scaleway-community/scaleway-ubuntu-coreos/blob/master/overlay/usr/local/update-firewall/scw-api/cache.go + - https://github.com/pulcy/quark + - https://github.com/hex-sh/terraform-provider-scaleway + - https://github.com/tscolari/bosh-scaleway-cpi +- Other **golang** clients + - https://github.com/lalyos/onlabs + - https://github.com/meatballhat/packer-builder-onlinelabs + - https://github.com/nlamirault/go-scaleway + - https://github.com/golang/build/blob/master/cmd/scaleway/scaleway.go diff --git a/vendor/github.com/scaleway/scaleway-cli/pkg/api/api.go b/vendor/github.com/scaleway/scaleway-cli/pkg/api/api.go new file mode 100644 index 000000000..cd3a81e28 --- /dev/null +++ b/vendor/github.com/scaleway/scaleway-cli/pkg/api/api.go @@ -0,0 +1,2667 @@ +// Copyright (C) 2015 Scaleway. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE.md file. + +// Interact with Scaleway API + +// Package api contains client and functions to interact with Scaleway API +package api + +import ( + "bytes" + "crypto/tls" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "sort" + "strings" + "text/tabwriter" + "text/template" + "time" +) + +// Default values +var ( + ComputeAPI = "https://api.scaleway.com/" + AccountAPI = "https://account.scaleway.com/" + MetadataAPI = "http://169.254.42.42/" + MarketplaceAPI = "https://api-marketplace.scaleway.com" +) + +func init() { + if url := os.Getenv("SCW_COMPUTE_API"); url != "" { + ComputeAPI = url + } + if url := os.Getenv("SCW_ACCOUNT_API"); url != "" { + AccountAPI = url + } + if url := os.Getenv("SCW_METADATA_API"); url != "" { + MetadataAPI = url + } + if url := os.Getenv("SCW_MARKETPLACE_API"); url != "" { + MarketplaceAPI = url + } +} + +// ScalewayAPI is the interface used to communicate with the Scaleway API +type ScalewayAPI struct { + // Organization is the identifier of the Scaleway organization + Organization string + + // Token is the authentication token for the Scaleway organization + Token string + + // Password is the authentication password + password string + + userAgent string + + // Cache is used to quickly resolve identifiers from names + Cache *ScalewayCache + + client *http.Client + verbose bool + + // + Logger +} + +// ScalewayAPIError represents a Scaleway API Error +type ScalewayAPIError struct { + // Message is a human-friendly error message + APIMessage string `json:"message,omitempty"` + + // Type is a string code that defines the kind of error + Type string `json:"type,omitempty"` + + // Fields contains detail about validation error + Fields map[string][]string `json:"fields,omitempty"` + + // StatusCode is the HTTP status code received + StatusCode int `json:"-"` + + // Message + Message string `json:"-"` +} + +// Error returns a string representing the error +func (e ScalewayAPIError) Error() string { + var b bytes.Buffer + for k, v := range map[string]interface{}{ + "StatusCode": e.StatusCode, + "Type": e.Type, + "Message": e.Message, + "APIMessage": e.APIMessage, + } { + fmt.Fprintf(&b, " %-30s %s", fmt.Sprintf("%s: ", k), v) + } + return b.String() +} + +// HideAPICredentials removes API credentials from a string +func (s *ScalewayAPI) HideAPICredentials(input string) string { + output := input + if s.Token != "" { + output = strings.Replace(output, s.Token, "00000000-0000-4000-8000-000000000000", -1) + } + if s.Organization != "" { + output = strings.Replace(output, s.Organization, "00000000-0000-5000-9000-000000000000", -1) + } + if s.password != "" { + output = strings.Replace(output, s.password, "XX-XX-XX-XX", -1) + } + return output +} + +// ScalewayIPAddress represents a Scaleway IP address +type ScalewayIPAddress struct { + // Identifier is a unique identifier for the IP address + Identifier string `json:"id,omitempty"` + + // IP is an IPv4 address + IP string `json:"address,omitempty"` + + // Dynamic is a flag that defines an IP that change on each reboot + Dynamic *bool `json:"dynamic,omitempty"` +} + +// ScalewayVolume represents a Scaleway Volume +type ScalewayVolume struct { + // Identifier is a unique identifier for the volume + Identifier string `json:"id,omitempty"` + + // Size is the allocated size of the volume + Size uint64 `json:"size,omitempty"` + + // CreationDate is the creation date of the volume + CreationDate string `json:"creation_date,omitempty"` + + // ModificationDate is the date of the last modification of the volume + ModificationDate string `json:"modification_date,omitempty"` + + // Organization is the organization owning the volume + Organization string `json:"organization,omitempty"` + + // Name is the name of the volume + Name string `json:"name,omitempty"` + + // Server is the server using this image + Server *struct { + Identifier string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + } `json:"server,omitempty"` + + // VolumeType is a Scaleway identifier for the kind of volume (default: l_ssd) + VolumeType string `json:"volume_type,omitempty"` + + // ExportURI represents the url used by initrd/scripts to attach the volume + ExportURI string `json:"export_uri,omitempty"` +} + +// ScalewayOneVolume represents the response of a GET /volumes/UUID API call +type ScalewayOneVolume struct { + Volume ScalewayVolume `json:"volume,omitempty"` +} + +// ScalewayVolumes represents a group of Scaleway volumes +type ScalewayVolumes struct { + // Volumes holds scaleway volumes of the response + Volumes []ScalewayVolume `json:"volumes,omitempty"` +} + +// ScalewayVolumeDefinition represents a Scaleway volume definition +type ScalewayVolumeDefinition struct { + // Name is the user-defined name of the volume + Name string `json:"name"` + + // Image is the image used by the volume + Size uint64 `json:"size"` + + // Bootscript is the bootscript used by the volume + Type string `json:"volume_type"` + + // Organization is the owner of the volume + Organization string `json:"organization"` +} + +// ScalewayVolumePutDefinition represents a Scaleway volume with nullable fields (for PUT) +type ScalewayVolumePutDefinition struct { + Identifier *string `json:"id,omitempty"` + Size *uint64 `json:"size,omitempty"` + CreationDate *string `json:"creation_date,omitempty"` + ModificationDate *string `json:"modification_date,omitempty"` + Organization *string `json:"organization,omitempty"` + Name *string `json:"name,omitempty"` + Server struct { + Identifier *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + } `json:"server,omitempty"` + VolumeType *string `json:"volume_type,omitempty"` + ExportURI *string `json:"export_uri,omitempty"` +} + +// ScalewayImage represents a Scaleway Image +type ScalewayImage struct { + // Identifier is a unique identifier for the image + Identifier string `json:"id,omitempty"` + + // Name is a user-defined name for the image + Name string `json:"name,omitempty"` + + // CreationDate is the creation date of the image + CreationDate string `json:"creation_date,omitempty"` + + // ModificationDate is the date of the last modification of the image + ModificationDate string `json:"modification_date,omitempty"` + + // RootVolume is the root volume bound to the image + RootVolume ScalewayVolume `json:"root_volume,omitempty"` + + // Public is true for public images and false for user images + Public bool `json:"public,omitempty"` + + // Bootscript is the bootscript bound to the image + DefaultBootscript *ScalewayBootscript `json:"default_bootscript,omitempty"` + + // Organization is the owner of the image + Organization string `json:"organization,omitempty"` + + // Arch is the architecture target of the image + Arch string `json:"arch,omitempty"` + + // FIXME: extra_volumes +} + +// ScalewayImageIdentifier represents a Scaleway Image Identifier +type ScalewayImageIdentifier struct { + Identifier string + Arch string + Region string + Owner string +} + +// ScalewayOneImage represents the response of a GET /images/UUID API call +type ScalewayOneImage struct { + Image ScalewayImage `json:"image,omitempty"` +} + +// ScalewayImages represents a group of Scaleway images +type ScalewayImages struct { + // Images holds scaleway images of the response + Images []ScalewayImage `json:"images,omitempty"` +} + +// ScalewaySnapshot represents a Scaleway Snapshot +type ScalewaySnapshot struct { + // Identifier is a unique identifier for the snapshot + Identifier string `json:"id,omitempty"` + + // Name is a user-defined name for the snapshot + Name string `json:"name,omitempty"` + + // CreationDate is the creation date of the snapshot + CreationDate string `json:"creation_date,omitempty"` + + // ModificationDate is the date of the last modification of the snapshot + ModificationDate string `json:"modification_date,omitempty"` + + // Size is the allocated size of the volume + Size uint64 `json:"size,omitempty"` + + // Organization is the owner of the snapshot + Organization string `json:"organization"` + + // State is the current state of the snapshot + State string `json:"state"` + + // VolumeType is the kind of volume behind the snapshot + VolumeType string `json:"volume_type"` + + // BaseVolume is the volume from which the snapshot inherits + BaseVolume ScalewayVolume `json:"base_volume,omitempty"` +} + +// ScalewayOneSnapshot represents the response of a GET /snapshots/UUID API call +type ScalewayOneSnapshot struct { + Snapshot ScalewaySnapshot `json:"snapshot,omitempty"` +} + +// ScalewaySnapshots represents a group of Scaleway snapshots +type ScalewaySnapshots struct { + // Snapshots holds scaleway snapshots of the response + Snapshots []ScalewaySnapshot `json:"snapshots,omitempty"` +} + +// ScalewayBootscript represents a Scaleway Bootscript +type ScalewayBootscript struct { + Bootcmdargs string `json:"bootcmdargs,omitempty"` + Dtb string `json:"dtb,omitempty"` + Initrd string `json:"initrd,omitempty"` + Kernel string `json:"kernel,omitempty"` + + // Arch is the architecture target of the bootscript + Arch string `json:"architecture,omitempty"` + + // Identifier is a unique identifier for the bootscript + Identifier string `json:"id,omitempty"` + + // Organization is the owner of the bootscript + Organization string `json:"organization,omitempty"` + + // Name is a user-defined name for the bootscript + Title string `json:"title,omitempty"` + + // Public is true for public bootscripts and false for user bootscripts + Public bool `json:"public,omitempty"` + + Default bool `json:"default,omitempty"` +} + +// ScalewayOneBootscript represents the response of a GET /bootscripts/UUID API call +type ScalewayOneBootscript struct { + Bootscript ScalewayBootscript `json:"bootscript,omitempty"` +} + +// ScalewayBootscripts represents a group of Scaleway bootscripts +type ScalewayBootscripts struct { + // Bootscripts holds Scaleway bootscripts of the response + Bootscripts []ScalewayBootscript `json:"bootscripts,omitempty"` +} + +// ScalewayTask represents a Scaleway Task +type ScalewayTask struct { + // Identifier is a unique identifier for the task + Identifier string `json:"id,omitempty"` + + // StartDate is the start date of the task + StartDate string `json:"started_at,omitempty"` + + // TerminationDate is the termination date of the task + TerminationDate string `json:"terminated_at,omitempty"` + + HrefFrom string `json:"href_from,omitempty"` + + Description string `json:"description,omitempty"` + + Status string `json:"status,omitempty"` + + Progress int `json:"progress,omitempty"` +} + +// ScalewayOneTask represents the response of a GET /tasks/UUID API call +type ScalewayOneTask struct { + Task ScalewayTask `json:"task,omitempty"` +} + +// ScalewayTasks represents a group of Scaleway tasks +type ScalewayTasks struct { + // Tasks holds scaleway tasks of the response + Tasks []ScalewayTask `json:"tasks,omitempty"` +} + +// ScalewaySecurityGroupRule definition +type ScalewaySecurityGroupRule struct { + Direction string `json:"direction"` + Protocol string `json:"protocol"` + IPRange string `json:"ip_range"` + DestPortFrom int `json:"dest_port_from,omitempty"` + Action string `json:"action"` + Postion int `json:"position"` + DestPortTo string `json:"dest_port_to"` + Editable bool `json:"editable"` + ID string `json:"id"` +} + +// ScalewayGetSecurityGroupRules represents the response of a GET /security_group/{groupID}/rules +type ScalewayGetSecurityGroupRules struct { + Rules []ScalewaySecurityGroupRule `json:"rules"` +} + +// ScalewayGetSecurityGroupRule represents the response of a GET /security_group/{groupID}/rules/{ruleID} +type ScalewayGetSecurityGroupRule struct { + Rules ScalewaySecurityGroupRule `json:"rule"` +} + +// ScalewayNewSecurityGroupRule definition POST/PUT request /security_group/{groupID} +type ScalewayNewSecurityGroupRule struct { + Action string `json:"action"` + Direction string `json:"direction"` + IPRange string `json:"ip_range"` + Protocol string `json:"protocol"` + DestPortFrom int `json:"dest_port_from,omitempty"` +} + +// ScalewaySecurityGroups definition +type ScalewaySecurityGroups struct { + Description string `json:"description"` + ID string `json:"id"` + Organization string `json:"organization"` + Name string `json:"name"` + Servers []ScalewaySecurityGroup `json:"servers"` + EnableDefaultSecurity bool `json:"enable_default_security"` + OrganizationDefault bool `json:"organization_default"` +} + +// ScalewayGetSecurityGroups represents the response of a GET /security_groups/ +type ScalewayGetSecurityGroups struct { + SecurityGroups []ScalewaySecurityGroups `json:"security_groups"` +} + +// ScalewayGetSecurityGroup represents the response of a GET /security_groups/{groupID} +type ScalewayGetSecurityGroup struct { + SecurityGroups ScalewaySecurityGroups `json:"security_group"` +} + +// ScalewayIPDefinition represents the IP's fields +type ScalewayIPDefinition struct { + Organization string `json:"organization"` + Reverse string `json:"reverse"` + ID string `json:"id"` + Server struct { + Identifier string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + } `json:"server,omitempty"` + Address string `json:"address"` +} + +// ScalewayGetIPS represents the response of a GET /ips/ +type ScalewayGetIPS struct { + IPS []ScalewayIPDefinition `json:"ips"` +} + +// ScalewayGetIP represents the response of a GET /ips/{id_ip} +type ScalewayGetIP struct { + IP ScalewayIPDefinition `json:"ip"` +} + +// ScalewaySecurityGroup represents a Scaleway security group +type ScalewaySecurityGroup struct { + // Identifier is a unique identifier for the security group + Identifier string `json:"id,omitempty"` + + // Name is the user-defined name of the security group + Name string `json:"name,omitempty"` +} + +// ScalewayNewSecurityGroup definition POST/PUT request /security_groups +type ScalewayNewSecurityGroup struct { + Organization string `json:"organization"` + Name string `json:"name"` + Description string `json:"description"` +} + +// ScalewayServer represents a Scaleway server +type ScalewayServer struct { + // Arch is the architecture target of the server + Arch string `json:"arch,omitempty"` + + // Identifier is a unique identifier for the server + Identifier string `json:"id,omitempty"` + + // Name is the user-defined name of the server + Name string `json:"name,omitempty"` + + // CreationDate is the creation date of the server + CreationDate string `json:"creation_date,omitempty"` + + // ModificationDate is the date of the last modification of the server + ModificationDate string `json:"modification_date,omitempty"` + + // Image is the image used by the server + Image ScalewayImage `json:"image,omitempty"` + + // DynamicIPRequired is a flag that defines a server with a dynamic ip address attached + DynamicIPRequired *bool `json:"dynamic_ip_required,omitempty"` + + // PublicIP is the public IP address bound to the server + PublicAddress ScalewayIPAddress `json:"public_ip,omitempty"` + + // State is the current status of the server + State string `json:"state,omitempty"` + + // StateDetail is the detailed status of the server + StateDetail string `json:"state_detail,omitempty"` + + // PrivateIP represents the private IPV4 attached to the server (changes on each boot) + PrivateIP string `json:"private_ip,omitempty"` + + // Bootscript is the unique identifier of the selected bootscript + Bootscript *ScalewayBootscript `json:"bootscript,omitempty"` + + // Hostname represents the ServerName in a format compatible with unix's hostname + Hostname string `json:"hostname,omitempty"` + + // Tags represents user-defined tags + Tags []string `json:"tags,omitempty"` + + // Volumes are the attached volumes + Volumes map[string]ScalewayVolume `json:"volumes,omitempty"` + + // SecurityGroup is the selected security group object + SecurityGroup ScalewaySecurityGroup `json:"security_group,omitempty"` + + // Organization is the owner of the server + Organization string `json:"organization,omitempty"` + + // CommercialType is the commercial type of the server (i.e: C1, C2[SML], VC1S) + CommercialType string `json:"commercial_type,omitempty"` + + // Location of the server + Location struct { + Platform string `json:"platform_id,omitempty"` + Chassis string `json:"chassis_id,omitempty"` + Cluster string `json:"cluster_id,omitempty"` + Hypervisor string `json:"hypervisor_id,omitempty"` + Blade string `json:"blade_id,omitempty"` + Node string `json:"node_id,omitempty"` + } `json:"location,omitempty"` + + IPV6 *ScalewayIPV6Definition `json:"ipv6,omitempty"` + + EnableIPV6 bool `json:"enable_ipv6,omitempty"` +} + +// ScalewayIPV6Definition represents a Scaleway ipv6 +type ScalewayIPV6Definition struct { + Netmask string `json:"netmask"` + Gateway string `json:"gateway"` + Address string `json:"address"` +} + +// ScalewayServerPatchDefinition represents a Scaleway server with nullable fields (for PATCH) +type ScalewayServerPatchDefinition struct { + Arch *string `json:"arch,omitempty"` + Name *string `json:"name,omitempty"` + CreationDate *string `json:"creation_date,omitempty"` + ModificationDate *string `json:"modification_date,omitempty"` + Image *ScalewayImage `json:"image,omitempty"` + DynamicIPRequired *bool `json:"dynamic_ip_required,omitempty"` + PublicAddress *ScalewayIPAddress `json:"public_ip,omitempty"` + State *string `json:"state,omitempty"` + StateDetail *string `json:"state_detail,omitempty"` + PrivateIP *string `json:"private_ip,omitempty"` + Bootscript *string `json:"bootscript,omitempty"` + Hostname *string `json:"hostname,omitempty"` + Volumes *map[string]ScalewayVolume `json:"volumes,omitempty"` + SecurityGroup *ScalewaySecurityGroup `json:"security_group,omitempty"` + Organization *string `json:"organization,omitempty"` + Tags *[]string `json:"tags,omitempty"` + IPV6 *ScalewayIPV6Definition `json:"ipv6,omitempty"` + EnableIPV6 *bool `json:"enable_ipv6,omitempty"` +} + +// ScalewayServerDefinition represents a Scaleway server with image definition +type ScalewayServerDefinition struct { + // Name is the user-defined name of the server + Name string `json:"name"` + + // Image is the image used by the server + Image *string `json:"image,omitempty"` + + // Volumes are the attached volumes + Volumes map[string]string `json:"volumes,omitempty"` + + // DynamicIPRequired is a flag that defines a server with a dynamic ip address attached + DynamicIPRequired *bool `json:"dynamic_ip_required,omitempty"` + + // Bootscript is the bootscript used by the server + Bootscript *string `json:"bootscript"` + + // Tags are the metadata tags attached to the server + Tags []string `json:"tags,omitempty"` + + // Organization is the owner of the server + Organization string `json:"organization"` + + // CommercialType is the commercial type of the server (i.e: C1, C2[SML], VC1S) + CommercialType string `json:"commercial_type"` + + PublicIP string `json:"public_ip,omitempty"` + + EnableIPV6 bool `json:"enable_ipv6,omitempty"` +} + +// ScalewayOneServer represents the response of a GET /servers/UUID API call +type ScalewayOneServer struct { + Server ScalewayServer `json:"server,omitempty"` +} + +// ScalewayServers represents a group of Scaleway servers +type ScalewayServers struct { + // Servers holds scaleway servers of the response + Servers []ScalewayServer `json:"servers,omitempty"` +} + +// ScalewayServerAction represents an action to perform on a Scaleway server +type ScalewayServerAction struct { + // Action is the name of the action to trigger + Action string `json:"action,omitempty"` +} + +// ScalewaySnapshotDefinition represents a Scaleway snapshot definition +type ScalewaySnapshotDefinition struct { + VolumeIDentifier string `json:"volume_id"` + Name string `json:"name,omitempty"` + Organization string `json:"organization"` +} + +// ScalewayImageDefinition represents a Scaleway image definition +type ScalewayImageDefinition struct { + SnapshotIDentifier string `json:"root_volume"` + Name string `json:"name,omitempty"` + Organization string `json:"organization"` + Arch string `json:"arch"` + DefaultBootscript *string `json:"default_bootscript,omitempty"` +} + +// ScalewayRoleDefinition represents a Scaleway Token UserId Role +type ScalewayRoleDefinition struct { + Organization ScalewayOrganizationDefinition `json:"organization,omitempty"` + Role string `json:"role,omitempty"` +} + +// ScalewayTokenDefinition represents a Scaleway Token +type ScalewayTokenDefinition struct { + UserID string `json:"user_id"` + Description string `json:"description,omitempty"` + Roles ScalewayRoleDefinition `json:"roles"` + Expires string `json:"expires"` + InheritsUsersPerms bool `json:"inherits_user_perms"` + ID string `json:"id"` +} + +// ScalewayTokensDefinition represents a Scaleway Tokens +type ScalewayTokensDefinition struct { + Token ScalewayTokenDefinition `json:"token"` +} + +// ScalewayContainerData represents a Scaleway container data (S3) +type ScalewayContainerData struct { + LastModified string `json:"last_modified"` + Name string `json:"name"` + Size string `json:"size"` +} + +// ScalewayGetContainerDatas represents a list of Scaleway containers data (S3) +type ScalewayGetContainerDatas struct { + Container []ScalewayContainerData `json:"container"` +} + +// ScalewayContainer represents a Scaleway container (S3) +type ScalewayContainer struct { + ScalewayOrganizationDefinition `json:"organization"` + Name string `json:"name"` + Size string `json:"size"` +} + +// ScalewayGetContainers represents a list of Scaleway containers (S3) +type ScalewayGetContainers struct { + Containers []ScalewayContainer `json:"containers"` +} + +// ScalewayConnectResponse represents the answer from POST /tokens +type ScalewayConnectResponse struct { + Token ScalewayTokenDefinition `json:"token"` +} + +// ScalewayConnect represents the data to connect +type ScalewayConnect struct { + Email string `json:"email"` + Password string `json:"password"` + Description string `json:"description"` + Expires bool `json:"expires"` +} + +// ScalewayOrganizationDefinition represents a Scaleway Organization +type ScalewayOrganizationDefinition struct { + ID string `json:"id"` + Name string `json:"name"` + Users []ScalewayUserDefinition `json:"users"` +} + +// ScalewayOrganizationsDefinition represents a Scaleway Organizations +type ScalewayOrganizationsDefinition struct { + Organizations []ScalewayOrganizationDefinition `json:"organizations"` +} + +// ScalewayUserDefinition represents a Scaleway User +type ScalewayUserDefinition struct { + Email string `json:"email"` + Firstname string `json:"firstname"` + Fullname string `json:"fullname"` + ID string `json:"id"` + Lastname string `json:"lastname"` + Organizations []ScalewayOrganizationDefinition `json:"organizations"` + Roles []ScalewayRoleDefinition `json:"roles"` + SSHPublicKeys []ScalewayKeyDefinition `json:"ssh_public_keys"` +} + +// ScalewayUsersDefinition represents the response of a GET /user +type ScalewayUsersDefinition struct { + User ScalewayUserDefinition `json:"user"` +} + +// ScalewayKeyDefinition represents a key +type ScalewayKeyDefinition struct { + Key string `json:"key"` + Fingerprint string `json:"fingerprint,omitempty"` +} + +// ScalewayUserPatchSSHKeyDefinition represents a User Patch +type ScalewayUserPatchSSHKeyDefinition struct { + SSHPublicKeys []ScalewayKeyDefinition `json:"ssh_public_keys"` +} + +// ScalewayDashboardResp represents a dashboard received from the API +type ScalewayDashboardResp struct { + Dashboard ScalewayDashboard +} + +// ScalewayDashboard represents a dashboard +type ScalewayDashboard struct { + VolumesCount int `json:"volumes_count"` + RunningServersCount int `json:"running_servers_count"` + ImagesCount int `json:"images_count"` + SnapshotsCount int `json:"snapshots_count"` + ServersCount int `json:"servers_count"` + IPsCount int `json:"ips_count"` +} + +// ScalewayPermissions represents the response of GET /permissions +type ScalewayPermissions map[string]ScalewayPermCategory + +// ScalewayPermCategory represents ScalewayPermissions's fields +type ScalewayPermCategory map[string][]string + +// ScalewayPermissionDefinition represents the permissions +type ScalewayPermissionDefinition struct { + Permissions ScalewayPermissions `json:"permissions"` +} + +// ScalewayUserdatas represents the response of a GET /user_data +type ScalewayUserdatas struct { + UserData []string `json:"user_data"` +} + +// ScalewayQuota represents a map of quota (name, value) +type ScalewayQuota map[string]int + +// ScalewayGetQuotas represents the response of GET /organizations/{orga_id}/quotas +type ScalewayGetQuotas struct { + Quotas ScalewayQuota `json:"quotas"` +} + +// ScalewayUserdata represents []byte +type ScalewayUserdata []byte + +// FuncMap used for json inspection +var FuncMap = template.FuncMap{ + "json": func(v interface{}) string { + a, _ := json.Marshal(v) + return string(a) + }, +} + +// MarketLocalImageDefinition represents localImage of marketplace version +type MarketLocalImageDefinition struct { + Arch string `json:"arch"` + ID string `json:"id"` + Zone string `json:"zone"` +} + +// MarketLocalImages represents an array of local images +type MarketLocalImages struct { + LocalImages []MarketLocalImageDefinition `json:"local_images"` +} + +// MarketLocalImage represents local image +type MarketLocalImage struct { + LocalImages MarketLocalImageDefinition `json:"local_image"` +} + +// MarketVersionDefinition represents version of marketplace image +type MarketVersionDefinition struct { + CreationDate string `json:"creation_date"` + ID string `json:"id"` + Image struct { + ID string `json:"id"` + Name string `json:"name"` + } `json:"image"` + ModificationDate string `json:"modification_date"` + Name string `json:"name"` + MarketLocalImages +} + +// MarketVersions represents an array of marketplace image versions +type MarketVersions struct { + Versions []MarketVersionDefinition `json:"versions"` +} + +// MarketVersion represents version of marketplace image +type MarketVersion struct { + Version MarketVersionDefinition `json:"version"` +} + +// MarketImage represents MarketPlace image +type MarketImage struct { + Categories []string `json:"categories"` + CreationDate string `json:"creation_date"` + CurrentPublicVersion string `json:"current_public_version"` + Description string `json:"description"` + ID string `json:"id"` + Logo string `json:"logo"` + ModificationDate string `json:"modification_date"` + Name string `json:"name"` + Organization struct { + ID string `json:"id"` + Name string `json:"name"` + } `json:"organization"` + Public bool `json:"-"` + MarketVersions +} + +// MarketImages represents MarketPlace images +type MarketImages struct { + Images []MarketImage `json:"images"` +} + +// NewScalewayAPI creates a ready-to-use ScalewayAPI client +func NewScalewayAPI(organization, token, userAgent string, options ...func(*ScalewayAPI)) (*ScalewayAPI, error) { + cache, err := NewScalewayCache() + if err != nil { + return nil, err + } + s := &ScalewayAPI{ + // exposed + Organization: organization, + Token: token, + Cache: cache, + Logger: NewDefaultLogger(), + verbose: os.Getenv("SCW_VERBOSE_API") != "", + password: "", + userAgent: userAgent, + + // internal + client: &http.Client{}, + } + for _, option := range options { + option(s) + } + + if os.Getenv("SCW_TLSVERIFY") == "0" { + s.client.Transport = &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + } + } + + return s, nil +} + +// ClearCache clears the cache +func (s *ScalewayAPI) ClearCache() { + s.Cache.Clear() +} + +// Sync flushes out the cache to the disk +func (s *ScalewayAPI) Sync() { + s.Cache.Save() +} + +// GetResponse returns an http.Response object for the requested resource +func (s *ScalewayAPI) GetResponse(apiURL, resource string) (*http.Response, error) { + uri := fmt.Sprintf("%s/%s", strings.TrimRight(apiURL, "/"), resource) + + req, err := http.NewRequest("GET", uri, nil) + if err != nil { + return nil, err + } + req.Header.Set("X-Auth-Token", s.Token) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("User-Agent", s.userAgent) + + s.LogHTTP(req) + + return s.client.Do(req) +} + +// PostResponse returns an http.Response object for the updated resource +func (s *ScalewayAPI) PostResponse(apiURL, resource string, data interface{}) (*http.Response, error) { + uri := fmt.Sprintf("%s/%s", strings.TrimRight(apiURL, "/"), resource) + payload := new(bytes.Buffer) + encoder := json.NewEncoder(payload) + if err := encoder.Encode(data); err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", uri, payload) + if err != nil { + return nil, err + } + req.Header.Set("X-Auth-Token", s.Token) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("User-Agent", s.userAgent) + + s.LogHTTP(req) + + return s.client.Do(req) +} + +// PatchResponse returns an http.Response object for the updated resource +func (s *ScalewayAPI) PatchResponse(apiURL, resource string, data interface{}) (*http.Response, error) { + uri := fmt.Sprintf("%s/%s", strings.TrimRight(apiURL, "/"), resource) + payload := new(bytes.Buffer) + encoder := json.NewEncoder(payload) + if err := encoder.Encode(data); err != nil { + return nil, err + } + + req, err := http.NewRequest("PATCH", uri, payload) + if err != nil { + return nil, err + } + req.Header.Set("X-Auth-Token", s.Token) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("User-Agent", s.userAgent) + + s.LogHTTP(req) + + return s.client.Do(req) +} + +// PutResponse returns an http.Response object for the updated resource +func (s *ScalewayAPI) PutResponse(apiURL, resource string, data interface{}) (*http.Response, error) { + uri := fmt.Sprintf("%s/%s", strings.TrimRight(apiURL, "/"), resource) + payload := new(bytes.Buffer) + encoder := json.NewEncoder(payload) + if err := encoder.Encode(data); err != nil { + return nil, err + } + + req, err := http.NewRequest("PUT", uri, payload) + if err != nil { + return nil, err + } + req.Header.Set("X-Auth-Token", s.Token) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("User-Agent", s.userAgent) + + s.LogHTTP(req) + + return s.client.Do(req) +} + +// DeleteResponse returns an http.Response object for the deleted resource +func (s *ScalewayAPI) DeleteResponse(apiURL, resource string) (*http.Response, error) { + uri := fmt.Sprintf("%s/%s", strings.TrimRight(apiURL, "/"), resource) + + req, err := http.NewRequest("DELETE", uri, nil) + if err != nil { + return nil, err + } + req.Header.Set("X-Auth-Token", s.Token) + req.Header.Set("Content-Type", "application/json") + req.Header.Set("User-Agent", s.userAgent) + + s.LogHTTP(req) + + return s.client.Do(req) +} + +// handleHTTPError checks the statusCode and displays the error +func (s *ScalewayAPI) handleHTTPError(goodStatusCode []int, resp *http.Response) ([]byte, error) { + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + if resp.StatusCode >= 500 { + return nil, errors.New(string(body)) + } + good := false + for _, code := range goodStatusCode { + if code == resp.StatusCode { + good = true + } + } + if !good { + var scwError ScalewayAPIError + + if err := json.Unmarshal(body, &scwError); err != nil { + return nil, err + } + scwError.StatusCode = resp.StatusCode + s.Debugf("%s", scwError.Error()) + return nil, scwError + } + if s.verbose { + var js bytes.Buffer + + err = json.Indent(&js, body, "", " ") + if err != nil { + s.Debugf("%s", string(body)) + } else { + s.Debugf("%s", js.String()) + } + } + return body, nil +} + +// GetServers gets the list of servers from the ScalewayAPI +func (s *ScalewayAPI) GetServers(all bool, limit int) (*[]ScalewayServer, error) { + query := url.Values{} + if !all { + query.Set("state", "running") + } + if limit > 0 { + // FIXME: wait for the API to be ready + // query.Set("per_page", strconv.Itoa(limit)) + } + if all && limit == 0 { + s.Cache.ClearServers() + } + resp, err := s.GetResponse(ComputeAPI, "servers?"+query.Encode()) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + body, err := s.handleHTTPError([]int{200}, resp) + if err != nil { + return nil, err + } + var servers ScalewayServers + if err = json.Unmarshal(body, &servers); err != nil { + return nil, err + } + for _, server := range servers.Servers { + // FIXME region, arch, owner, title + s.Cache.InsertServer(server.Identifier, "fr-1", server.Arch, server.Organization, server.Name) + } + // FIXME: when API limit is ready, remove the following code + if limit > 0 && limit < len(servers.Servers) { + servers.Servers = servers.Servers[0:limit] + } + return &servers.Servers, nil +} + +// GetServer gets a server from the ScalewayAPI +func (s *ScalewayAPI) GetServer(serverID string) (*ScalewayServer, error) { + resp, err := s.GetResponse(ComputeAPI, "servers/"+serverID) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + body, err := s.handleHTTPError([]int{200}, resp) + if err != nil { + return nil, err + } + + var oneServer ScalewayOneServer + + if err = json.Unmarshal(body, &oneServer); err != nil { + return nil, err + } + // FIXME region, arch, owner, title + s.Cache.InsertServer(oneServer.Server.Identifier, "fr-1", oneServer.Server.Arch, oneServer.Server.Organization, oneServer.Server.Name) + return &oneServer.Server, nil +} + +// PostServerAction posts an action on a server +func (s *ScalewayAPI) PostServerAction(serverID, action string) error { + data := ScalewayServerAction{ + Action: action, + } + resp, err := s.PostResponse(ComputeAPI, fmt.Sprintf("servers/%s/action", serverID), data) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return err + } + + _, err = s.handleHTTPError([]int{202}, resp) + return err +} + +// DeleteServer deletes a server +func (s *ScalewayAPI) DeleteServer(serverID string) error { + defer s.Cache.RemoveServer(serverID) + resp, err := s.DeleteResponse(ComputeAPI, fmt.Sprintf("servers/%s", serverID)) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return err + } + + if _, err = s.handleHTTPError([]int{204}, resp); err != nil { + return err + } + return nil +} + +// PostServer creates a new server +func (s *ScalewayAPI) PostServer(definition ScalewayServerDefinition) (string, error) { + definition.Organization = s.Organization + + resp, err := s.PostResponse(ComputeAPI, "servers", definition) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return "", err + } + + body, err := s.handleHTTPError([]int{201}, resp) + if err != nil { + return "", err + } + var server ScalewayOneServer + + if err = json.Unmarshal(body, &server); err != nil { + return "", err + } + // FIXME region, arch, owner, title + s.Cache.InsertServer(server.Server.Identifier, "fr-1", server.Server.Arch, server.Server.Organization, server.Server.Name) + return server.Server.Identifier, nil +} + +// PatchUserSSHKey updates a user +func (s *ScalewayAPI) PatchUserSSHKey(UserID string, definition ScalewayUserPatchSSHKeyDefinition) error { + resp, err := s.PatchResponse(AccountAPI, fmt.Sprintf("users/%s", UserID), definition) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return err + } + if _, err := s.handleHTTPError([]int{200}, resp); err != nil { + return err + } + return nil +} + +// PatchServer updates a server +func (s *ScalewayAPI) PatchServer(serverID string, definition ScalewayServerPatchDefinition) error { + resp, err := s.PatchResponse(ComputeAPI, fmt.Sprintf("servers/%s", serverID), definition) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return err + } + + if _, err := s.handleHTTPError([]int{200}, resp); err != nil { + return err + } + return nil +} + +// PostSnapshot creates a new snapshot +func (s *ScalewayAPI) PostSnapshot(volumeID string, name string) (string, error) { + definition := ScalewaySnapshotDefinition{ + VolumeIDentifier: volumeID, + Name: name, + Organization: s.Organization, + } + resp, err := s.PostResponse(ComputeAPI, "snapshots", definition) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return "", err + } + + body, err := s.handleHTTPError([]int{201}, resp) + if err != nil { + return "", err + } + var snapshot ScalewayOneSnapshot + + if err = json.Unmarshal(body, &snapshot); err != nil { + return "", err + } + // FIXME region, arch, owner, title + s.Cache.InsertSnapshot(snapshot.Snapshot.Identifier, "fr-1", "", snapshot.Snapshot.Organization, snapshot.Snapshot.Name) + return snapshot.Snapshot.Identifier, nil +} + +// PostImage creates a new image +func (s *ScalewayAPI) PostImage(volumeID string, name string, bootscript string, arch string) (string, error) { + definition := ScalewayImageDefinition{ + SnapshotIDentifier: volumeID, + Name: name, + Organization: s.Organization, + Arch: arch, + } + if bootscript != "" { + definition.DefaultBootscript = &bootscript + } + + resp, err := s.PostResponse(ComputeAPI, "images", definition) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return "", err + } + + body, err := s.handleHTTPError([]int{201}, resp) + if err != nil { + return "", err + } + var image ScalewayOneImage + + if err = json.Unmarshal(body, &image); err != nil { + return "", err + } + // FIXME region, arch, owner, title + s.Cache.InsertImage(image.Image.Identifier, "fr-1", image.Image.Arch, image.Image.Organization, image.Image.Name, "") + return image.Image.Identifier, nil +} + +// PostVolume creates a new volume +func (s *ScalewayAPI) PostVolume(definition ScalewayVolumeDefinition) (string, error) { + definition.Organization = s.Organization + if definition.Type == "" { + definition.Type = "l_ssd" + } + + resp, err := s.PostResponse(ComputeAPI, "volumes", definition) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return "", err + } + + body, err := s.handleHTTPError([]int{201}, resp) + if err != nil { + return "", err + } + var volume ScalewayOneVolume + + if err = json.Unmarshal(body, &volume); err != nil { + return "", err + } + // FIXME: s.Cache.InsertVolume(volume.Volume.Identifier, volume.Volume.Name) + return volume.Volume.Identifier, nil +} + +// PutVolume updates a volume +func (s *ScalewayAPI) PutVolume(volumeID string, definition ScalewayVolumePutDefinition) error { + resp, err := s.PutResponse(ComputeAPI, fmt.Sprintf("volumes/%s", volumeID), definition) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return err + } + + _, err = s.handleHTTPError([]int{200}, resp) + return err +} + +// ResolveServer attempts to find a matching Identifier for the input string +func (s *ScalewayAPI) ResolveServer(needle string) (ScalewayResolverResults, error) { + servers := s.Cache.LookUpServers(needle, true) + if len(servers) == 0 { + if _, err := s.GetServers(true, 0); err != nil { + return nil, err + } + servers = s.Cache.LookUpServers(needle, true) + } + return servers, nil +} + +// ResolveVolume attempts to find a matching Identifier for the input string +func (s *ScalewayAPI) ResolveVolume(needle string) (ScalewayResolverResults, error) { + volumes := s.Cache.LookUpVolumes(needle, true) + if len(volumes) == 0 { + if _, err := s.GetVolumes(); err != nil { + return nil, err + } + volumes = s.Cache.LookUpVolumes(needle, true) + } + return volumes, nil +} + +// ResolveSnapshot attempts to find a matching Identifier for the input string +func (s *ScalewayAPI) ResolveSnapshot(needle string) (ScalewayResolverResults, error) { + snapshots := s.Cache.LookUpSnapshots(needle, true) + if len(snapshots) == 0 { + if _, err := s.GetSnapshots(); err != nil { + return nil, err + } + snapshots = s.Cache.LookUpSnapshots(needle, true) + } + return snapshots, nil +} + +// ResolveImage attempts to find a matching Identifier for the input string +func (s *ScalewayAPI) ResolveImage(needle string) (ScalewayResolverResults, error) { + images := s.Cache.LookUpImages(needle, true) + if len(images) == 0 { + if _, err := s.GetImages(); err != nil { + return nil, err + } + images = s.Cache.LookUpImages(needle, true) + } + return images, nil +} + +// ResolveBootscript attempts to find a matching Identifier for the input string +func (s *ScalewayAPI) ResolveBootscript(needle string) (ScalewayResolverResults, error) { + bootscripts := s.Cache.LookUpBootscripts(needle, true) + if len(bootscripts) == 0 { + if _, err := s.GetBootscripts(); err != nil { + return nil, err + } + bootscripts = s.Cache.LookUpBootscripts(needle, true) + } + return bootscripts, nil +} + +// GetImages gets the list of images from the ScalewayAPI +func (s *ScalewayAPI) GetImages() (*[]MarketImage, error) { + images, err := s.GetMarketPlaceImages("") + if err != nil { + return nil, err + } + s.Cache.ClearImages() + for i, image := range images.Images { + if image.CurrentPublicVersion != "" { + for _, version := range image.Versions { + if version.ID == image.CurrentPublicVersion { + for _, localImage := range version.LocalImages { + images.Images[i].Public = true + s.Cache.InsertImage(localImage.ID, localImage.Zone, localImage.Arch, image.Organization.ID, image.Name, image.CurrentPublicVersion) + } + } + } + } + } + resp, err := s.GetResponse(ComputeAPI, "images?organization="+s.Organization) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + body, err := s.handleHTTPError([]int{200}, resp) + if err != nil { + return nil, err + } + var OrgaImages ScalewayImages + + if err = json.Unmarshal(body, &OrgaImages); err != nil { + return nil, err + } + for _, orgaImage := range OrgaImages.Images { + s.Cache.InsertImage(orgaImage.Identifier, "fr-1", orgaImage.Arch, orgaImage.Organization, orgaImage.Name, "") + images.Images = append(images.Images, MarketImage{ + Categories: []string{"MyImages"}, + CreationDate: orgaImage.CreationDate, + CurrentPublicVersion: orgaImage.Identifier, + ModificationDate: orgaImage.ModificationDate, + Name: orgaImage.Name, + Public: false, + MarketVersions: MarketVersions{ + Versions: []MarketVersionDefinition{ + { + CreationDate: orgaImage.CreationDate, + ID: orgaImage.Identifier, + ModificationDate: orgaImage.ModificationDate, + MarketLocalImages: MarketLocalImages{ + LocalImages: []MarketLocalImageDefinition{ + { + Arch: orgaImage.Arch, + ID: orgaImage.Identifier, + Zone: "fr-1", + }, + }, + }, + }, + }, + }, + }) + } + return &images.Images, nil +} + +// GetImage gets an image from the ScalewayAPI +func (s *ScalewayAPI) GetImage(imageID string) (*ScalewayImage, error) { + resp, err := s.GetResponse(ComputeAPI, "images/"+imageID) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + body, err := s.handleHTTPError([]int{200}, resp) + if err != nil { + return nil, err + } + var oneImage ScalewayOneImage + + if err = json.Unmarshal(body, &oneImage); err != nil { + return nil, err + } + // FIXME region, arch, owner, title + s.Cache.InsertImage(oneImage.Image.Identifier, "fr-1", oneImage.Image.Arch, oneImage.Image.Organization, oneImage.Image.Name, "") + return &oneImage.Image, nil +} + +// DeleteImage deletes a image +func (s *ScalewayAPI) DeleteImage(imageID string) error { + defer s.Cache.RemoveImage(imageID) + resp, err := s.DeleteResponse(ComputeAPI, fmt.Sprintf("images/%s", imageID)) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return err + } + + if _, err := s.handleHTTPError([]int{204}, resp); err != nil { + return err + } + return nil +} + +// DeleteSnapshot deletes a snapshot +func (s *ScalewayAPI) DeleteSnapshot(snapshotID string) error { + defer s.Cache.RemoveSnapshot(snapshotID) + resp, err := s.DeleteResponse(ComputeAPI, fmt.Sprintf("snapshots/%s", snapshotID)) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return err + } + + if _, err := s.handleHTTPError([]int{204}, resp); err != nil { + return err + } + return nil +} + +// DeleteVolume deletes a volume +func (s *ScalewayAPI) DeleteVolume(volumeID string) error { + defer s.Cache.RemoveVolume(volumeID) + resp, err := s.DeleteResponse(ComputeAPI, fmt.Sprintf("volumes/%s", volumeID)) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return err + } + + if _, err := s.handleHTTPError([]int{204}, resp); err != nil { + return err + } + return nil +} + +// GetSnapshots gets the list of snapshots from the ScalewayAPI +func (s *ScalewayAPI) GetSnapshots() (*[]ScalewaySnapshot, error) { + query := url.Values{} + s.Cache.ClearSnapshots() + + resp, err := s.GetResponse(ComputeAPI, "snapshots?"+query.Encode()) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + body, err := s.handleHTTPError([]int{200}, resp) + if err != nil { + return nil, err + } + var snapshots ScalewaySnapshots + + if err = json.Unmarshal(body, &snapshots); err != nil { + return nil, err + } + for _, snapshot := range snapshots.Snapshots { + // FIXME region, arch, owner, title + s.Cache.InsertSnapshot(snapshot.Identifier, "fr-1", "", snapshot.Organization, snapshot.Name) + } + return &snapshots.Snapshots, nil +} + +// GetSnapshot gets a snapshot from the ScalewayAPI +func (s *ScalewayAPI) GetSnapshot(snapshotID string) (*ScalewaySnapshot, error) { + resp, err := s.GetResponse(ComputeAPI, "snapshots/"+snapshotID) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + body, err := s.handleHTTPError([]int{200}, resp) + if err != nil { + return nil, err + } + var oneSnapshot ScalewayOneSnapshot + + if err = json.Unmarshal(body, &oneSnapshot); err != nil { + return nil, err + } + // FIXME region, arch, owner, title + s.Cache.InsertSnapshot(oneSnapshot.Snapshot.Identifier, "fr-1", "", oneSnapshot.Snapshot.Organization, oneSnapshot.Snapshot.Name) + return &oneSnapshot.Snapshot, nil +} + +// GetVolumes gets the list of volumes from the ScalewayAPI +func (s *ScalewayAPI) GetVolumes() (*[]ScalewayVolume, error) { + query := url.Values{} + s.Cache.ClearVolumes() + + resp, err := s.GetResponse(ComputeAPI, "volumes?"+query.Encode()) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + body, err := s.handleHTTPError([]int{200}, resp) + if err != nil { + return nil, err + } + var volumes ScalewayVolumes + + if err = json.Unmarshal(body, &volumes); err != nil { + return nil, err + } + for _, volume := range volumes.Volumes { + // FIXME region, arch, owner, title + s.Cache.InsertVolume(volume.Identifier, "fr-1", "", volume.Organization, volume.Name) + } + return &volumes.Volumes, nil +} + +// GetVolume gets a volume from the ScalewayAPI +func (s *ScalewayAPI) GetVolume(volumeID string) (*ScalewayVolume, error) { + resp, err := s.GetResponse(ComputeAPI, "volumes/"+volumeID) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + body, err := s.handleHTTPError([]int{200}, resp) + if err != nil { + return nil, err + } + var oneVolume ScalewayOneVolume + + if err = json.Unmarshal(body, &oneVolume); err != nil { + return nil, err + } + // FIXME region, arch, owner, title + s.Cache.InsertVolume(oneVolume.Volume.Identifier, "fr-1", "", oneVolume.Volume.Organization, oneVolume.Volume.Name) + return &oneVolume.Volume, nil +} + +// GetBootscripts gets the list of bootscripts from the ScalewayAPI +func (s *ScalewayAPI) GetBootscripts() (*[]ScalewayBootscript, error) { + query := url.Values{} + s.Cache.ClearBootscripts() + resp, err := s.GetResponse(ComputeAPI, "bootscripts?"+query.Encode()) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + body, err := s.handleHTTPError([]int{200}, resp) + if err != nil { + return nil, err + } + var bootscripts ScalewayBootscripts + + if err = json.Unmarshal(body, &bootscripts); err != nil { + return nil, err + } + for _, bootscript := range bootscripts.Bootscripts { + // FIXME region, arch, owner, title + s.Cache.InsertBootscript(bootscript.Identifier, "fr-1", bootscript.Arch, bootscript.Organization, bootscript.Title) + } + return &bootscripts.Bootscripts, nil +} + +// GetBootscript gets a bootscript from the ScalewayAPI +func (s *ScalewayAPI) GetBootscript(bootscriptID string) (*ScalewayBootscript, error) { + resp, err := s.GetResponse(ComputeAPI, "bootscripts/"+bootscriptID) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + body, err := s.handleHTTPError([]int{200}, resp) + if err != nil { + return nil, err + } + var oneBootscript ScalewayOneBootscript + + if err = json.Unmarshal(body, &oneBootscript); err != nil { + return nil, err + } + // FIXME region, arch, owner, title + s.Cache.InsertBootscript(oneBootscript.Bootscript.Identifier, "fr-1", oneBootscript.Bootscript.Arch, oneBootscript.Bootscript.Organization, oneBootscript.Bootscript.Title) + return &oneBootscript.Bootscript, nil +} + +// GetUserdatas gets list of userdata for a server +func (s *ScalewayAPI) GetUserdatas(serverID string, metadata bool) (*ScalewayUserdatas, error) { + var url, endpoint string + + endpoint = ComputeAPI + if metadata { + url = "/user_data" + endpoint = MetadataAPI + } else { + url = fmt.Sprintf("servers/%s/user_data", serverID) + } + + resp, err := s.GetResponse(endpoint, url) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + body, err := s.handleHTTPError([]int{200}, resp) + if err != nil { + return nil, err + } + var userdatas ScalewayUserdatas + + if err = json.Unmarshal(body, &userdatas); err != nil { + return nil, err + } + return &userdatas, nil +} + +func (s *ScalewayUserdata) String() string { + return string(*s) +} + +// GetUserdata gets a specific userdata for a server +func (s *ScalewayAPI) GetUserdata(serverID, key string, metadata bool) (*ScalewayUserdata, error) { + var url, endpoint string + + endpoint = ComputeAPI + if metadata { + url = fmt.Sprintf("/user_data/%s", key) + endpoint = MetadataAPI + } else { + url = fmt.Sprintf("servers/%s/user_data/%s", serverID, key) + } + + var err error + resp, err := s.GetResponse(endpoint, url) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + if resp.StatusCode != 200 { + return nil, fmt.Errorf("no such user_data %q (%d)", key, resp.StatusCode) + } + var data ScalewayUserdata + data, err = ioutil.ReadAll(resp.Body) + return &data, err +} + +// PatchUserdata sets a user data +func (s *ScalewayAPI) PatchUserdata(serverID, key string, value []byte, metadata bool) error { + var resource, endpoint string + + endpoint = ComputeAPI + if metadata { + resource = fmt.Sprintf("/user_data/%s", key) + endpoint = MetadataAPI + } else { + resource = fmt.Sprintf("servers/%s/user_data/%s", serverID, key) + } + + uri := fmt.Sprintf("%s/%s", strings.TrimRight(endpoint, "/"), resource) + payload := new(bytes.Buffer) + payload.Write(value) + + req, err := http.NewRequest("PATCH", uri, payload) + if err != nil { + return err + } + + req.Header.Set("X-Auth-Token", s.Token) + req.Header.Set("Content-Type", "text/plain") + req.Header.Set("User-Agent", s.userAgent) + + s.LogHTTP(req) + + resp, err := s.client.Do(req) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return err + } + + if resp.StatusCode == 204 { + return nil + } + + return fmt.Errorf("cannot set user_data (%d)", resp.StatusCode) +} + +// DeleteUserdata deletes a server user_data +func (s *ScalewayAPI) DeleteUserdata(serverID, key string, metadata bool) error { + var url, endpoint string + + endpoint = ComputeAPI + if metadata { + url = fmt.Sprintf("/user_data/%s", key) + endpoint = MetadataAPI + } else { + url = fmt.Sprintf("servers/%s/user_data/%s", serverID, key) + } + + resp, err := s.DeleteResponse(endpoint, url) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return err + } + + _, err = s.handleHTTPError([]int{204}, resp) + return err +} + +// GetTasks get the list of tasks from the ScalewayAPI +func (s *ScalewayAPI) GetTasks() (*[]ScalewayTask, error) { + query := url.Values{} + resp, err := s.GetResponse(ComputeAPI, "tasks?"+query.Encode()) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + body, err := s.handleHTTPError([]int{200}, resp) + if err != nil { + return nil, err + } + var tasks ScalewayTasks + + if err = json.Unmarshal(body, &tasks); err != nil { + return nil, err + } + return &tasks.Tasks, nil +} + +// CheckCredentials performs a dummy check to ensure we can contact the API +func (s *ScalewayAPI) CheckCredentials() error { + query := url.Values{} + query.Set("token_id", s.Token) + + resp, err := s.GetResponse(AccountAPI, "tokens?"+query.Encode()) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return err + } + + if _, err := s.handleHTTPError([]int{200}, resp); err != nil { + return err + } + return nil +} + +// GetUserID returns the userID +func (s *ScalewayAPI) GetUserID() (string, error) { + resp, err := s.GetResponse(AccountAPI, fmt.Sprintf("tokens/%s", s.Token)) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return "", err + } + + body, err := s.handleHTTPError([]int{200}, resp) + if err != nil { + return "", err + } + var token ScalewayTokensDefinition + + if err = json.Unmarshal(body, &token); err != nil { + return "", err + } + return token.Token.UserID, nil +} + +// GetOrganization returns Organization +func (s *ScalewayAPI) GetOrganization() (*ScalewayOrganizationsDefinition, error) { + resp, err := s.GetResponse(AccountAPI, "organizations") + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + body, err := s.handleHTTPError([]int{200}, resp) + if err != nil { + return nil, err + } + var data ScalewayOrganizationsDefinition + + if err = json.Unmarshal(body, &data); err != nil { + return nil, err + } + return &data, nil +} + +// GetUser returns the user +func (s *ScalewayAPI) GetUser() (*ScalewayUserDefinition, error) { + userID, err := s.GetUserID() + if err != nil { + return nil, err + } + resp, err := s.GetResponse(AccountAPI, fmt.Sprintf("users/%s", userID)) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + body, err := s.handleHTTPError([]int{200}, resp) + if err != nil { + return nil, err + } + var user ScalewayUsersDefinition + + if err = json.Unmarshal(body, &user); err != nil { + return nil, err + } + return &user.User, nil +} + +// GetPermissions returns the permissions +func (s *ScalewayAPI) GetPermissions() (*ScalewayPermissionDefinition, error) { + resp, err := s.GetResponse(AccountAPI, fmt.Sprintf("tokens/%s/permissions", s.Token)) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + body, err := s.handleHTTPError([]int{200}, resp) + if err != nil { + return nil, err + } + var permissions ScalewayPermissionDefinition + + if err = json.Unmarshal(body, &permissions); err != nil { + return nil, err + } + return &permissions, nil +} + +// GetDashboard returns the dashboard +func (s *ScalewayAPI) GetDashboard() (*ScalewayDashboard, error) { + resp, err := s.GetResponse(ComputeAPI, "dashboard") + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + body, err := s.handleHTTPError([]int{200}, resp) + if err != nil { + return nil, err + } + var dashboard ScalewayDashboardResp + + if err = json.Unmarshal(body, &dashboard); err != nil { + return nil, err + } + return &dashboard.Dashboard, nil +} + +// GetServerID returns exactly one server matching +func (s *ScalewayAPI) GetServerID(needle string) (string, error) { + // Parses optional type prefix, i.e: "server:name" -> "name" + _, needle = parseNeedle(needle) + + servers, err := s.ResolveServer(needle) + if err != nil { + return "", fmt.Errorf("Unable to resolve server %s: %s", needle, err) + } + if len(servers) == 1 { + return servers[0].Identifier, nil + } + if len(servers) == 0 { + return "", fmt.Errorf("No such server: %s", needle) + } + return "", showResolverResults(needle, servers) +} + +func showResolverResults(needle string, results ScalewayResolverResults) error { + w := tabwriter.NewWriter(os.Stderr, 20, 1, 3, ' ', 0) + defer w.Flush() + sort.Sort(results) + for _, result := range results { + if result.Arch == "" { + result.Arch = "n/a" + } + fmt.Fprintf(w, "- %s\t%s\t%s\t%s\n", result.TruncIdentifier(), result.CodeName(), result.Name, result.Arch) + } + return fmt.Errorf("Too many candidates for %s (%d)", needle, len(results)) +} + +// GetVolumeID returns exactly one volume matching +func (s *ScalewayAPI) GetVolumeID(needle string) (string, error) { + // Parses optional type prefix, i.e: "volume:name" -> "name" + _, needle = parseNeedle(needle) + + volumes, err := s.ResolveVolume(needle) + if err != nil { + return "", fmt.Errorf("Unable to resolve volume %s: %s", needle, err) + } + if len(volumes) == 1 { + return volumes[0].Identifier, nil + } + if len(volumes) == 0 { + return "", fmt.Errorf("No such volume: %s", needle) + } + return "", showResolverResults(needle, volumes) +} + +// GetSnapshotID returns exactly one snapshot matching +func (s *ScalewayAPI) GetSnapshotID(needle string) (string, error) { + // Parses optional type prefix, i.e: "snapshot:name" -> "name" + _, needle = parseNeedle(needle) + + snapshots, err := s.ResolveSnapshot(needle) + if err != nil { + return "", fmt.Errorf("Unable to resolve snapshot %s: %s", needle, err) + } + if len(snapshots) == 1 { + return snapshots[0].Identifier, nil + } + if len(snapshots) == 0 { + return "", fmt.Errorf("No such snapshot: %s", needle) + } + return "", showResolverResults(needle, snapshots) +} + +// FilterImagesByArch removes entry that doesn't match with architecture +func FilterImagesByArch(res ScalewayResolverResults, arch string) (ret ScalewayResolverResults) { + if arch == "*" { + return res + } + for _, result := range res { + if result.Arch == arch { + ret = append(ret, result) + } + } + return +} + +// GetImageID returns exactly one image matching +func (s *ScalewayAPI) GetImageID(needle, arch string) (*ScalewayImageIdentifier, error) { + // Parses optional type prefix, i.e: "image:name" -> "name" + _, needle = parseNeedle(needle) + + images, err := s.ResolveImage(needle) + if err != nil { + return nil, fmt.Errorf("Unable to resolve image %s: %s", needle, err) + } + images = FilterImagesByArch(images, arch) + if len(images) == 1 { + return &ScalewayImageIdentifier{ + Identifier: images[0].Identifier, + Arch: images[0].Arch, + // FIXME region, owner hardcoded + Region: "fr-1", + Owner: "", + }, nil + } + if len(images) == 0 { + return nil, fmt.Errorf("No such image: %s", needle) + } + return nil, showResolverResults(needle, images) +} + +// GetSecurityGroups returns a ScalewaySecurityGroups +func (s *ScalewayAPI) GetSecurityGroups() (*ScalewayGetSecurityGroups, error) { + resp, err := s.GetResponse(ComputeAPI, "security_groups") + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + body, err := s.handleHTTPError([]int{200}, resp) + if err != nil { + return nil, err + } + var securityGroups ScalewayGetSecurityGroups + + if err = json.Unmarshal(body, &securityGroups); err != nil { + return nil, err + } + return &securityGroups, nil +} + +// GetSecurityGroupRules returns a ScalewaySecurityGroupRules +func (s *ScalewayAPI) GetSecurityGroupRules(groupID string) (*ScalewayGetSecurityGroupRules, error) { + resp, err := s.GetResponse(ComputeAPI, fmt.Sprintf("security_groups/%s/rules", groupID)) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + body, err := s.handleHTTPError([]int{200}, resp) + if err != nil { + return nil, err + } + var securityGroupRules ScalewayGetSecurityGroupRules + + if err = json.Unmarshal(body, &securityGroupRules); err != nil { + return nil, err + } + return &securityGroupRules, nil +} + +// GetASecurityGroupRule returns a ScalewaySecurityGroupRule +func (s *ScalewayAPI) GetASecurityGroupRule(groupID string, rulesID string) (*ScalewayGetSecurityGroupRule, error) { + resp, err := s.GetResponse(ComputeAPI, fmt.Sprintf("security_groups/%s/rules/%s", groupID, rulesID)) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + body, err := s.handleHTTPError([]int{200}, resp) + if err != nil { + return nil, err + } + var securityGroupRules ScalewayGetSecurityGroupRule + + if err = json.Unmarshal(body, &securityGroupRules); err != nil { + return nil, err + } + return &securityGroupRules, nil +} + +// GetASecurityGroup returns a ScalewaySecurityGroup +func (s *ScalewayAPI) GetASecurityGroup(groupsID string) (*ScalewayGetSecurityGroup, error) { + resp, err := s.GetResponse(ComputeAPI, fmt.Sprintf("security_groups/%s", groupsID)) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + body, err := s.handleHTTPError([]int{200}, resp) + if err != nil { + return nil, err + } + var securityGroups ScalewayGetSecurityGroup + + if err = json.Unmarshal(body, &securityGroups); err != nil { + return nil, err + } + return &securityGroups, nil +} + +// PostSecurityGroup posts a group on a server +func (s *ScalewayAPI) PostSecurityGroup(group ScalewayNewSecurityGroup) error { + resp, err := s.PostResponse(ComputeAPI, "security_groups", group) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return err + } + + _, err = s.handleHTTPError([]int{201}, resp) + return err +} + +// PostSecurityGroupRule posts a rule on a server +func (s *ScalewayAPI) PostSecurityGroupRule(SecurityGroupID string, rules ScalewayNewSecurityGroupRule) error { + resp, err := s.PostResponse(ComputeAPI, fmt.Sprintf("security_groups/%s/rules", SecurityGroupID), rules) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return err + } + + _, err = s.handleHTTPError([]int{201}, resp) + return err +} + +// DeleteSecurityGroup deletes a SecurityGroup +func (s *ScalewayAPI) DeleteSecurityGroup(securityGroupID string) error { + resp, err := s.DeleteResponse(ComputeAPI, fmt.Sprintf("security_groups/%s", securityGroupID)) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return err + } + + _, err = s.handleHTTPError([]int{204}, resp) + return err +} + +// PutSecurityGroup updates a SecurityGroup +func (s *ScalewayAPI) PutSecurityGroup(group ScalewayNewSecurityGroup, securityGroupID string) error { + resp, err := s.PutResponse(ComputeAPI, fmt.Sprintf("security_groups/%s", securityGroupID), group) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return err + } + + _, err = s.handleHTTPError([]int{200}, resp) + return err +} + +// PutSecurityGroupRule updates a SecurityGroupRule +func (s *ScalewayAPI) PutSecurityGroupRule(rules ScalewayNewSecurityGroupRule, securityGroupID, RuleID string) error { + resp, err := s.PutResponse(ComputeAPI, fmt.Sprintf("security_groups/%s/rules/%s", securityGroupID, RuleID), rules) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return err + } + + _, err = s.handleHTTPError([]int{200}, resp) + return err +} + +// DeleteSecurityGroupRule deletes a SecurityGroupRule +func (s *ScalewayAPI) DeleteSecurityGroupRule(SecurityGroupID, RuleID string) error { + resp, err := s.DeleteResponse(ComputeAPI, fmt.Sprintf("security_groups/%s/rules/%s", SecurityGroupID, RuleID)) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return err + } + + _, err = s.handleHTTPError([]int{204}, resp) + return err +} + +// GetContainers returns a ScalewayGetContainers +func (s *ScalewayAPI) GetContainers() (*ScalewayGetContainers, error) { + resp, err := s.GetResponse(ComputeAPI, "containers") + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + body, err := s.handleHTTPError([]int{200}, resp) + if err != nil { + return nil, err + } + var containers ScalewayGetContainers + + if err = json.Unmarshal(body, &containers); err != nil { + return nil, err + } + return &containers, nil +} + +// GetContainerDatas returns a ScalewayGetContainerDatas +func (s *ScalewayAPI) GetContainerDatas(container string) (*ScalewayGetContainerDatas, error) { + resp, err := s.GetResponse(ComputeAPI, fmt.Sprintf("containers/%s", container)) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + body, err := s.handleHTTPError([]int{200}, resp) + if err != nil { + return nil, err + } + var datas ScalewayGetContainerDatas + + if err = json.Unmarshal(body, &datas); err != nil { + return nil, err + } + return &datas, nil +} + +// GetIPS returns a ScalewayGetIPS +func (s *ScalewayAPI) GetIPS() (*ScalewayGetIPS, error) { + resp, err := s.GetResponse(ComputeAPI, "ips") + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + body, err := s.handleHTTPError([]int{200}, resp) + if err != nil { + return nil, err + } + var ips ScalewayGetIPS + + if err = json.Unmarshal(body, &ips); err != nil { + return nil, err + } + return &ips, nil +} + +// NewIP returns a new IP +func (s *ScalewayAPI) NewIP() (*ScalewayGetIP, error) { + var orga struct { + Organization string `json:"organization"` + } + orga.Organization = s.Organization + resp, err := s.PostResponse(ComputeAPI, "ips", orga) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + body, err := s.handleHTTPError([]int{201}, resp) + if err != nil { + return nil, err + } + var ip ScalewayGetIP + + if err = json.Unmarshal(body, &ip); err != nil { + return nil, err + } + return &ip, nil +} + +// AttachIP attachs an IP to a server +func (s *ScalewayAPI) AttachIP(ipID, serverID string) error { + var update struct { + Address string `json:"address"` + ID string `json:"id"` + Reverse *string `json:"reverse"` + Organization string `json:"organization"` + Server string `json:"server"` + } + + ip, err := s.GetIP(ipID) + if err != nil { + return err + } + update.Address = ip.IP.Address + update.ID = ip.IP.ID + update.Organization = ip.IP.Organization + update.Server = serverID + resp, err := s.PutResponse(ComputeAPI, fmt.Sprintf("ips/%s", ipID), update) + if err != nil { + return err + } + _, err = s.handleHTTPError([]int{200}, resp) + return err +} + +// DeleteIP deletes an IP +func (s *ScalewayAPI) DeleteIP(ipID string) error { + resp, err := s.DeleteResponse(ComputeAPI, fmt.Sprintf("ips/%s", ipID)) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return err + } + + if _, err := s.handleHTTPError([]int{204}, resp); err != nil { + return err + } + return nil +} + +// GetIP returns a ScalewayGetIP +func (s *ScalewayAPI) GetIP(ipID string) (*ScalewayGetIP, error) { + resp, err := s.GetResponse(ComputeAPI, fmt.Sprintf("ips/%s", ipID)) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + body, err := s.handleHTTPError([]int{200}, resp) + if err != nil { + return nil, err + } + var ip ScalewayGetIP + + if err = json.Unmarshal(body, &ip); err != nil { + return nil, err + } + return &ip, nil +} + +// GetQuotas returns a ScalewayGetQuotas +func (s *ScalewayAPI) GetQuotas() (*ScalewayGetQuotas, error) { + resp, err := s.GetResponse(AccountAPI, fmt.Sprintf("organizations/%s/quotas", s.Organization)) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + body, err := s.handleHTTPError([]int{200}, resp) + if err != nil { + return nil, err + } + var quotas ScalewayGetQuotas + + if err = json.Unmarshal(body, "as); err != nil { + return nil, err + } + return "as, nil +} + +// GetBootscriptID returns exactly one bootscript matching +func (s *ScalewayAPI) GetBootscriptID(needle, arch string) (string, error) { + // Parses optional type prefix, i.e: "bootscript:name" -> "name" + if len(strings.Split(needle, ":")) == 1 { + return needle, nil + } + + _, needle = parseNeedle(needle) + + bootscripts, err := s.ResolveBootscript(needle) + if err != nil { + return "", fmt.Errorf("Unable to resolve bootscript %s: %s", needle, err) + } + bootscripts.FilterByArch(arch) + if len(bootscripts) == 1 { + return bootscripts[0].Identifier, nil + } + if len(bootscripts) == 0 { + return "", fmt.Errorf("No such bootscript: %s", needle) + } + return "", showResolverResults(needle, bootscripts) +} + +func rootNetDial(network, addr string) (net.Conn, error) { + dialer := net.Dialer{ + Timeout: 10 * time.Second, + KeepAlive: 10 * time.Second, + } + + // bruteforce privileged ports + var localAddr net.Addr + var err error + for port := 1; port <= 1024; port++ { + localAddr, err = net.ResolveTCPAddr("tcp", fmt.Sprintf(":%d", port)) + + // this should never happen + if err != nil { + return nil, err + } + + dialer.LocalAddr = localAddr + + conn, err := dialer.Dial(network, addr) + + // if err is nil, dialer.Dial succeed, so let's go + // else, err != nil, but we don't care + if err == nil { + return conn, nil + } + } + // if here, all privileged ports were tried without success + return nil, fmt.Errorf("bind: permission denied, are you root ?") +} + +// SetPassword register the password +func (s *ScalewayAPI) SetPassword(password string) { + s.password = password +} + +// GetMarketPlaceImages returns images from marketplace +func (s *ScalewayAPI) GetMarketPlaceImages(uuidImage string) (*MarketImages, error) { + resp, err := s.GetResponse(MarketplaceAPI, fmt.Sprintf("images/%s", uuidImage)) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + body, err := s.handleHTTPError([]int{200}, resp) + if err != nil { + return nil, err + } + var ret MarketImages + + if uuidImage != "" { + ret.Images = make([]MarketImage, 1) + + var img MarketImage + + if err = json.Unmarshal(body, &img); err != nil { + return nil, err + } + ret.Images[0] = img + } else { + if err = json.Unmarshal(body, &ret); err != nil { + return nil, err + } + } + return &ret, nil +} + +// GetMarketPlaceImageVersions returns image version +func (s *ScalewayAPI) GetMarketPlaceImageVersions(uuidImage, uuidVersion string) (*MarketVersions, error) { + resp, err := s.GetResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%s", uuidImage, uuidVersion)) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + body, err := s.handleHTTPError([]int{200}, resp) + if err != nil { + return nil, err + } + var ret MarketVersions + + if uuidImage != "" { + var version MarketVersion + ret.Versions = make([]MarketVersionDefinition, 1) + + if err = json.Unmarshal(body, &version); err != nil { + return nil, err + } + ret.Versions[0] = version.Version + } else { + if err = json.Unmarshal(body, &ret); err != nil { + return nil, err + } + } + return &ret, nil +} + +// GetMarketPlaceImageCurrentVersion return the image current version +func (s *ScalewayAPI) GetMarketPlaceImageCurrentVersion(uuidImage string) (*MarketVersion, error) { + resp, err := s.GetResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions/current", uuidImage)) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + + body, err := s.handleHTTPError([]int{200}, resp) + if err != nil { + return nil, err + } + var ret MarketVersion + + if err = json.Unmarshal(body, &ret); err != nil { + return nil, err + } + return &ret, nil +} + +// GetMarketPlaceLocalImages returns images from local region +func (s *ScalewayAPI) GetMarketPlaceLocalImages(uuidImage, uuidVersion, uuidLocalImage string) (*MarketLocalImages, error) { + resp, err := s.GetResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%s/local_images/%s", uuidImage, uuidVersion, uuidLocalImage)) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return nil, err + } + body, err := s.handleHTTPError([]int{200}, resp) + if err != nil { + return nil, err + } + var ret MarketLocalImages + if uuidLocalImage != "" { + var localImage MarketLocalImage + ret.LocalImages = make([]MarketLocalImageDefinition, 1) + + if err = json.Unmarshal(body, &localImage); err != nil { + return nil, err + } + ret.LocalImages[0] = localImage.LocalImages + } else { + if err = json.Unmarshal(body, &ret); err != nil { + return nil, err + } + } + return &ret, nil +} + +// PostMarketPlaceImage adds new image +func (s *ScalewayAPI) PostMarketPlaceImage(images MarketImage) error { + resp, err := s.PostResponse(MarketplaceAPI, "images/", images) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return err + } + _, err = s.handleHTTPError([]int{202}, resp) + return err +} + +// PostMarketPlaceImageVersion adds new image version +func (s *ScalewayAPI) PostMarketPlaceImageVersion(uuidImage string, version MarketVersion) error { + resp, err := s.PostResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions", uuidImage), version) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return err + } + _, err = s.handleHTTPError([]int{202}, resp) + return err +} + +// PostMarketPlaceLocalImage adds new local image +func (s *ScalewayAPI) PostMarketPlaceLocalImage(uuidImage, uuidVersion, uuidLocalImage string, local MarketLocalImage) error { + resp, err := s.PostResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%s/local_images/%v", uuidImage, uuidVersion, uuidLocalImage), local) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return err + } + _, err = s.handleHTTPError([]int{202}, resp) + return err +} + +// PutMarketPlaceImage updates image +func (s *ScalewayAPI) PutMarketPlaceImage(uudiImage string, images MarketImage) error { + resp, err := s.PutResponse(MarketplaceAPI, fmt.Sprintf("images/%v", uudiImage), images) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return err + } + _, err = s.handleHTTPError([]int{200}, resp) + return err +} + +// PutMarketPlaceImageVersion updates image version +func (s *ScalewayAPI) PutMarketPlaceImageVersion(uuidImage, uuidVersion string, version MarketVersion) error { + resp, err := s.PutResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%v", uuidImage, uuidVersion), version) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return err + } + _, err = s.handleHTTPError([]int{200}, resp) + return err +} + +// PutMarketPlaceLocalImage updates local image +func (s *ScalewayAPI) PutMarketPlaceLocalImage(uuidImage, uuidVersion, uuidLocalImage string, local MarketLocalImage) error { + resp, err := s.PostResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%s/local_images/%v", uuidImage, uuidVersion, uuidLocalImage), local) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return err + } + _, err = s.handleHTTPError([]int{200}, resp) + return err +} + +// DeleteMarketPlaceImage deletes image +func (s *ScalewayAPI) DeleteMarketPlaceImage(uudImage string) error { + resp, err := s.DeleteResponse(MarketplaceAPI, fmt.Sprintf("images/%v", uudImage)) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return err + } + _, err = s.handleHTTPError([]int{204}, resp) + return err +} + +// DeleteMarketPlaceImageVersion delete image version +func (s *ScalewayAPI) DeleteMarketPlaceImageVersion(uuidImage, uuidVersion string) error { + resp, err := s.DeleteResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%v", uuidImage, uuidVersion)) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return err + } + _, err = s.handleHTTPError([]int{204}, resp) + return err +} + +// DeleteMarketPlaceLocalImage deletes local image +func (s *ScalewayAPI) DeleteMarketPlaceLocalImage(uuidImage, uuidVersion, uuidLocalImage string) error { + resp, err := s.DeleteResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%s/local_images/%v", uuidImage, uuidVersion, uuidLocalImage)) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return err + } + _, err = s.handleHTTPError([]int{204}, resp) + return err +} diff --git a/vendor/github.com/scaleway/scaleway-cli/pkg/api/api_test.go b/vendor/github.com/scaleway/scaleway-cli/pkg/api/api_test.go new file mode 100644 index 000000000..de127c947 --- /dev/null +++ b/vendor/github.com/scaleway/scaleway-cli/pkg/api/api_test.go @@ -0,0 +1,21 @@ +package api + +import ( + "testing" + + "github.com/scaleway/scaleway-cli/pkg/scwversion" + . "github.com/smartystreets/goconvey/convey" +) + +func TestNewScalewayAPI(t *testing.T) { + Convey("Testing NewScalewayAPI()", t, func() { + api, err := NewScalewayAPI("my-organization", "my-token", scwversion.UserAgent()) + So(err, ShouldBeNil) + So(api, ShouldNotBeNil) + So(api.Token, ShouldEqual, "my-token") + So(api.Organization, ShouldEqual, "my-organization") + So(api.Cache, ShouldNotBeNil) + So(api.client, ShouldNotBeNil) + So(api.Logger, ShouldNotBeNil) + }) +} diff --git a/vendor/github.com/scaleway/scaleway-cli/pkg/api/cache.go b/vendor/github.com/scaleway/scaleway-cli/pkg/api/cache.go new file mode 100644 index 000000000..72d119059 --- /dev/null +++ b/vendor/github.com/scaleway/scaleway-cli/pkg/api/cache.go @@ -0,0 +1,731 @@ +// Copyright (C) 2015 Scaleway. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE.md file. + +package api + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + "regexp" + "strings" + "sync" + + "github.com/moul/anonuuid" + "github.com/renstrom/fuzzysearch/fuzzy" +) + +const ( + // CacheRegion permits to access at the region field + CacheRegion = iota + // CacheArch permits to access at the arch field + CacheArch + // CacheOwner permits to access at the owner field + CacheOwner + // CacheTitle permits to access at the title field + CacheTitle + // CacheMarketPlaceUUID is used to determine the UUID of local images + CacheMarketPlaceUUID + + // CacheMaxfield is used to determine the size of array + CacheMaxfield +) + +// ScalewayCache is used not to query the API to resolve full identifiers +type ScalewayCache struct { + // Images contains names of Scaleway images indexed by identifier + Images map[string][CacheMaxfield]string `json:"images"` + + // Snapshots contains names of Scaleway snapshots indexed by identifier + Snapshots map[string][CacheMaxfield]string `json:"snapshots"` + + // Volumes contains names of Scaleway volumes indexed by identifier + Volumes map[string][CacheMaxfield]string `json:"volumes"` + + // Bootscripts contains names of Scaleway bootscripts indexed by identifier + Bootscripts map[string][CacheMaxfield]string `json:"bootscripts"` + + // Servers contains names of Scaleway servers indexed by identifier + Servers map[string][CacheMaxfield]string `json:"servers"` + + // Path is the path to the cache file + Path string `json:"-"` + + // Modified tells if the cache needs to be overwritten or not + Modified bool `json:"-"` + + // Lock allows ScalewayCache to be used concurrently + Lock sync.Mutex `json:"-"` +} + +const ( + // IdentifierUnknown is used when we don't know explicitely the type key of the object (used for nil comparison) + IdentifierUnknown = 1 << iota + // IdentifierServer is the type key of cached server objects + IdentifierServer + // IdentifierImage is the type key of cached image objects + IdentifierImage + // IdentifierSnapshot is the type key of cached snapshot objects + IdentifierSnapshot + // IdentifierBootscript is the type key of cached bootscript objects + IdentifierBootscript + // IdentifierVolume is the type key of cached volume objects + IdentifierVolume +) + +// ScalewayResolverResult is a structure containing human-readable information +// about resolver results. This structure is used to display the user choices. +type ScalewayResolverResult struct { + Identifier string + Type int + Name string + Arch string + Needle string + RankMatch int +} + +// ScalewayResolverResults is a list of `ScalewayResolverResult` +type ScalewayResolverResults []ScalewayResolverResult + +// NewScalewayResolverResult returns a new ScalewayResolverResult +func NewScalewayResolverResult(Identifier, Name, Arch string, Type int) ScalewayResolverResult { + if err := anonuuid.IsUUID(Identifier); err != nil { + log.Fatal(err) + } + return ScalewayResolverResult{ + Identifier: Identifier, + Type: Type, + Name: Name, + Arch: Arch, + } +} + +func (s ScalewayResolverResults) Len() int { + return len(s) +} + +func (s ScalewayResolverResults) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s ScalewayResolverResults) Less(i, j int) bool { + return s[i].RankMatch < s[j].RankMatch +} + +// TruncIdentifier returns first 8 characters of an Identifier (UUID) +func (s *ScalewayResolverResult) TruncIdentifier() string { + return s.Identifier[:8] +} + +func identifierTypeName(kind int) string { + switch kind { + case IdentifierServer: + return "Server" + case IdentifierImage: + return "Image" + case IdentifierSnapshot: + return "Snapshot" + case IdentifierVolume: + return "Volume" + case IdentifierBootscript: + return "Bootscript" + } + return "" +} + +// CodeName returns a full resource name with typed prefix +func (s *ScalewayResolverResult) CodeName() string { + name := strings.ToLower(s.Name) + name = regexp.MustCompile(`[^a-z0-9-]`).ReplaceAllString(name, "-") + name = regexp.MustCompile(`--+`).ReplaceAllString(name, "-") + name = strings.Trim(name, "-") + + return fmt.Sprintf("%s:%s", strings.ToLower(identifierTypeName(s.Type)), name) +} + +// FilterByArch deletes the elements which not match with arch +func (s *ScalewayResolverResults) FilterByArch(arch string) { +REDO: + for i := range *s { + if (*s)[i].Arch != arch { + (*s)[i] = (*s)[len(*s)-1] + *s = (*s)[:len(*s)-1] + goto REDO + } + } +} + +// NewScalewayCache loads a per-user cache +func NewScalewayCache() (*ScalewayCache, error) { + homeDir := os.Getenv("HOME") // *nix + if homeDir == "" { // Windows + homeDir = os.Getenv("USERPROFILE") + } + if homeDir == "" { + homeDir = "/tmp" + } + cachePath := filepath.Join(homeDir, ".scw-cache.db") + var cache ScalewayCache + cache.Path = cachePath + _, err := os.Stat(cachePath) + if os.IsNotExist(err) { + cache.Clear() + return &cache, nil + } else if err != nil { + return nil, err + } + file, err := ioutil.ReadFile(cachePath) + if err != nil { + return nil, err + } + err = json.Unmarshal(file, &cache) + if err != nil { + // fix compatibility with older version + if err = os.Remove(cachePath); err != nil { + return nil, err + } + cache.Clear() + return &cache, nil + } + if cache.Images == nil { + cache.Images = make(map[string][CacheMaxfield]string) + } + if cache.Snapshots == nil { + cache.Snapshots = make(map[string][CacheMaxfield]string) + } + if cache.Volumes == nil { + cache.Volumes = make(map[string][CacheMaxfield]string) + } + if cache.Servers == nil { + cache.Servers = make(map[string][CacheMaxfield]string) + } + if cache.Bootscripts == nil { + cache.Bootscripts = make(map[string][CacheMaxfield]string) + } + return &cache, nil +} + +// Clear removes all information from the cache +func (s *ScalewayCache) Clear() { + s.Images = make(map[string][CacheMaxfield]string) + s.Snapshots = make(map[string][CacheMaxfield]string) + s.Volumes = make(map[string][CacheMaxfield]string) + s.Bootscripts = make(map[string][CacheMaxfield]string) + s.Servers = make(map[string][CacheMaxfield]string) + s.Modified = true +} + +// Flush flushes the cache database +func (c *ScalewayCache) Flush() error { + return os.Remove(c.Path) +} + +// Save atomically overwrites the current cache database +func (c *ScalewayCache) Save() error { + c.Lock.Lock() + defer c.Lock.Unlock() + + log.Printf("Writing cache file to disk") + + if c.Modified { + file, err := ioutil.TempFile(filepath.Dir(c.Path), filepath.Base(c.Path)) + if err != nil { + return err + } + defer file.Close() + encoder := json.NewEncoder(file) + err = encoder.Encode(*c) + if err != nil { + os.Remove(file.Name()) + return err + } + + if err := os.Rename(file.Name(), c.Path); err != nil { + os.Remove(file.Name()) + return err + } + } + return nil +} + +// ComputeRankMatch fills `ScalewayResolverResult.RankMatch` with its `fuzzy` score +func (s *ScalewayResolverResult) ComputeRankMatch(needle string) { + s.Needle = needle + s.RankMatch = fuzzy.RankMatch(needle, s.Name) +} + +// LookUpImages attempts to return identifiers matching a pattern +func (c *ScalewayCache) LookUpImages(needle string, acceptUUID bool) ScalewayResolverResults { + c.Lock.Lock() + defer c.Lock.Unlock() + + var res ScalewayResolverResults + + if acceptUUID && anonuuid.IsUUID(needle) == nil { + if fields, ok := c.Images[needle]; ok { + entry := NewScalewayResolverResult(needle, fields[CacheTitle], fields[CacheArch], IdentifierImage) + entry.ComputeRankMatch(needle) + res = append(res, entry) + } + } + + needle = regexp.MustCompile(`^user/`).ReplaceAllString(needle, "") + // FIXME: if 'user/' is in needle, only watch for a user image + nameRegex := regexp.MustCompile(`(?i)` + regexp.MustCompile(`[_-]`).ReplaceAllString(needle, ".*")) + var exactMatches ScalewayResolverResults + for identifier, fields := range c.Images { + if fields[CacheTitle] == needle { + entry := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierImage) + entry.ComputeRankMatch(needle) + exactMatches = append(exactMatches, entry) + } + if strings.HasPrefix(identifier, needle) || nameRegex.MatchString(fields[CacheTitle]) { + entry := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierImage) + entry.ComputeRankMatch(needle) + res = append(res, entry) + } else if strings.HasPrefix(fields[CacheMarketPlaceUUID], needle) || nameRegex.MatchString(fields[CacheMarketPlaceUUID]) { + entry := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierImage) + entry.ComputeRankMatch(needle) + res = append(res, entry) + } + } + + if len(exactMatches) == 1 { + return exactMatches + } + + return removeDuplicatesResults(res) +} + +// LookUpSnapshots attempts to return identifiers matching a pattern +func (c *ScalewayCache) LookUpSnapshots(needle string, acceptUUID bool) ScalewayResolverResults { + c.Lock.Lock() + defer c.Lock.Unlock() + + var res ScalewayResolverResults + + if acceptUUID && anonuuid.IsUUID(needle) == nil { + if fields, ok := c.Snapshots[needle]; ok { + entry := NewScalewayResolverResult(needle, fields[CacheTitle], fields[CacheArch], IdentifierSnapshot) + entry.ComputeRankMatch(needle) + res = append(res, entry) + } + } + + needle = regexp.MustCompile(`^user/`).ReplaceAllString(needle, "") + nameRegex := regexp.MustCompile(`(?i)` + regexp.MustCompile(`[_-]`).ReplaceAllString(needle, ".*")) + var exactMatches ScalewayResolverResults + for identifier, fields := range c.Snapshots { + if fields[CacheTitle] == needle { + entry := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierSnapshot) + entry.ComputeRankMatch(needle) + exactMatches = append(exactMatches, entry) + } + if strings.HasPrefix(identifier, needle) || nameRegex.MatchString(fields[CacheTitle]) { + entry := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierSnapshot) + entry.ComputeRankMatch(needle) + res = append(res, entry) + } + } + + if len(exactMatches) == 1 { + return exactMatches + } + + return removeDuplicatesResults(res) +} + +// LookUpVolumes attempts to return identifiers matching a pattern +func (c *ScalewayCache) LookUpVolumes(needle string, acceptUUID bool) ScalewayResolverResults { + c.Lock.Lock() + defer c.Lock.Unlock() + + var res ScalewayResolverResults + + if acceptUUID && anonuuid.IsUUID(needle) == nil { + if fields, ok := c.Volumes[needle]; ok { + entry := NewScalewayResolverResult(needle, fields[CacheTitle], fields[CacheArch], IdentifierVolume) + entry.ComputeRankMatch(needle) + res = append(res, entry) + } + } + + nameRegex := regexp.MustCompile(`(?i)` + regexp.MustCompile(`[_-]`).ReplaceAllString(needle, ".*")) + var exactMatches ScalewayResolverResults + for identifier, fields := range c.Volumes { + if fields[CacheTitle] == needle { + entry := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierVolume) + entry.ComputeRankMatch(needle) + exactMatches = append(exactMatches, entry) + } + if strings.HasPrefix(identifier, needle) || nameRegex.MatchString(fields[CacheTitle]) { + entry := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierVolume) + entry.ComputeRankMatch(needle) + res = append(res, entry) + } + } + + if len(exactMatches) == 1 { + return exactMatches + } + + return removeDuplicatesResults(res) +} + +// LookUpBootscripts attempts to return identifiers matching a pattern +func (c *ScalewayCache) LookUpBootscripts(needle string, acceptUUID bool) ScalewayResolverResults { + c.Lock.Lock() + defer c.Lock.Unlock() + + var res ScalewayResolverResults + + if acceptUUID && anonuuid.IsUUID(needle) == nil { + if fields, ok := c.Bootscripts[needle]; ok { + entry := NewScalewayResolverResult(needle, fields[CacheTitle], fields[CacheArch], IdentifierBootscript) + entry.ComputeRankMatch(needle) + res = append(res, entry) + } + } + + nameRegex := regexp.MustCompile(`(?i)` + regexp.MustCompile(`[_-]`).ReplaceAllString(needle, ".*")) + var exactMatches ScalewayResolverResults + for identifier, fields := range c.Bootscripts { + if fields[CacheTitle] == needle { + entry := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierBootscript) + entry.ComputeRankMatch(needle) + exactMatches = append(exactMatches, entry) + } + if strings.HasPrefix(identifier, needle) || nameRegex.MatchString(fields[CacheTitle]) { + entry := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierBootscript) + entry.ComputeRankMatch(needle) + res = append(res, entry) + } + } + + if len(exactMatches) == 1 { + return exactMatches + } + + return removeDuplicatesResults(res) +} + +// LookUpServers attempts to return identifiers matching a pattern +func (c *ScalewayCache) LookUpServers(needle string, acceptUUID bool) ScalewayResolverResults { + c.Lock.Lock() + defer c.Lock.Unlock() + + var res ScalewayResolverResults + + if acceptUUID && anonuuid.IsUUID(needle) == nil { + if fields, ok := c.Servers[needle]; ok { + entry := NewScalewayResolverResult(needle, fields[CacheTitle], fields[CacheArch], IdentifierServer) + entry.ComputeRankMatch(needle) + res = append(res, entry) + } + } + + nameRegex := regexp.MustCompile(`(?i)` + regexp.MustCompile(`[_-]`).ReplaceAllString(needle, ".*")) + var exactMatches ScalewayResolverResults + for identifier, fields := range c.Servers { + if fields[CacheTitle] == needle { + entry := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierServer) + entry.ComputeRankMatch(needle) + exactMatches = append(exactMatches, entry) + } + if strings.HasPrefix(identifier, needle) || nameRegex.MatchString(fields[CacheTitle]) { + entry := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierServer) + entry.ComputeRankMatch(needle) + res = append(res, entry) + } + } + + if len(exactMatches) == 1 { + return exactMatches + } + + return removeDuplicatesResults(res) +} + +// removeDuplicatesResults transforms an array into a unique array +func removeDuplicatesResults(elements ScalewayResolverResults) ScalewayResolverResults { + encountered := map[string]ScalewayResolverResult{} + + // Create a map of all unique elements. + for v := range elements { + encountered[elements[v].Identifier] = elements[v] + } + + // Place all keys from the map into a slice. + results := ScalewayResolverResults{} + for _, result := range encountered { + results = append(results, result) + } + return results +} + +// parseNeedle parses a user needle and try to extract a forced object type +// i.e: +// - server:blah-blah -> kind=server, needle=blah-blah +// - blah-blah -> kind="", needle=blah-blah +// - not-existing-type:blah-blah +func parseNeedle(input string) (identifierType int, needle string) { + parts := strings.Split(input, ":") + if len(parts) == 2 { + switch parts[0] { + case "server": + return IdentifierServer, parts[1] + case "image": + return IdentifierImage, parts[1] + case "snapshot": + return IdentifierSnapshot, parts[1] + case "bootscript": + return IdentifierBootscript, parts[1] + case "volume": + return IdentifierVolume, parts[1] + } + } + return IdentifierUnknown, input +} + +// LookUpIdentifiers attempts to return identifiers matching a pattern +func (c *ScalewayCache) LookUpIdentifiers(needle string) ScalewayResolverResults { + results := ScalewayResolverResults{} + + identifierType, needle := parseNeedle(needle) + + if identifierType&(IdentifierUnknown|IdentifierServer) > 0 { + for _, result := range c.LookUpServers(needle, false) { + entry := NewScalewayResolverResult(result.Identifier, result.Name, result.Arch, IdentifierServer) + entry.ComputeRankMatch(needle) + results = append(results, entry) + } + } + + if identifierType&(IdentifierUnknown|IdentifierImage) > 0 { + for _, result := range c.LookUpImages(needle, false) { + entry := NewScalewayResolverResult(result.Identifier, result.Name, result.Arch, IdentifierImage) + entry.ComputeRankMatch(needle) + results = append(results, entry) + } + } + + if identifierType&(IdentifierUnknown|IdentifierSnapshot) > 0 { + for _, result := range c.LookUpSnapshots(needle, false) { + entry := NewScalewayResolverResult(result.Identifier, result.Name, result.Arch, IdentifierSnapshot) + entry.ComputeRankMatch(needle) + results = append(results, entry) + } + } + + if identifierType&(IdentifierUnknown|IdentifierVolume) > 0 { + for _, result := range c.LookUpVolumes(needle, false) { + entry := NewScalewayResolverResult(result.Identifier, result.Name, result.Arch, IdentifierVolume) + entry.ComputeRankMatch(needle) + results = append(results, entry) + } + } + + if identifierType&(IdentifierUnknown|IdentifierBootscript) > 0 { + for _, result := range c.LookUpBootscripts(needle, false) { + entry := NewScalewayResolverResult(result.Identifier, result.Name, result.Arch, IdentifierBootscript) + entry.ComputeRankMatch(needle) + results = append(results, entry) + } + } + + return results +} + +// InsertServer registers a server in the cache +func (c *ScalewayCache) InsertServer(identifier, region, arch, owner, name string) { + c.Lock.Lock() + defer c.Lock.Unlock() + + fields, exists := c.Servers[identifier] + if !exists || fields[CacheTitle] != name { + c.Servers[identifier] = [CacheMaxfield]string{region, arch, owner, name} + c.Modified = true + } +} + +// RemoveServer removes a server from the cache +func (c *ScalewayCache) RemoveServer(identifier string) { + c.Lock.Lock() + defer c.Lock.Unlock() + + delete(c.Servers, identifier) + c.Modified = true +} + +// ClearServers removes all servers from the cache +func (c *ScalewayCache) ClearServers() { + c.Lock.Lock() + defer c.Lock.Unlock() + + c.Servers = make(map[string][CacheMaxfield]string) + c.Modified = true +} + +// InsertImage registers an image in the cache +func (c *ScalewayCache) InsertImage(identifier, region, arch, owner, name, marketPlaceUUID string) { + c.Lock.Lock() + defer c.Lock.Unlock() + + fields, exists := c.Images[identifier] + if !exists || fields[CacheTitle] != name { + c.Images[identifier] = [CacheMaxfield]string{region, arch, owner, name, marketPlaceUUID} + c.Modified = true + } +} + +// RemoveImage removes a server from the cache +func (c *ScalewayCache) RemoveImage(identifier string) { + c.Lock.Lock() + defer c.Lock.Unlock() + + delete(c.Images, identifier) + c.Modified = true +} + +// ClearImages removes all images from the cache +func (c *ScalewayCache) ClearImages() { + c.Lock.Lock() + defer c.Lock.Unlock() + + c.Images = make(map[string][CacheMaxfield]string) + c.Modified = true +} + +// InsertSnapshot registers an snapshot in the cache +func (c *ScalewayCache) InsertSnapshot(identifier, region, arch, owner, name string) { + c.Lock.Lock() + defer c.Lock.Unlock() + + fields, exists := c.Snapshots[identifier] + if !exists || fields[CacheTitle] != name { + c.Snapshots[identifier] = [CacheMaxfield]string{region, arch, owner, name} + c.Modified = true + } +} + +// RemoveSnapshot removes a server from the cache +func (c *ScalewayCache) RemoveSnapshot(identifier string) { + c.Lock.Lock() + defer c.Lock.Unlock() + + delete(c.Snapshots, identifier) + c.Modified = true +} + +// ClearSnapshots removes all snapshots from the cache +func (c *ScalewayCache) ClearSnapshots() { + c.Lock.Lock() + defer c.Lock.Unlock() + + c.Snapshots = make(map[string][CacheMaxfield]string) + c.Modified = true +} + +// InsertVolume registers an volume in the cache +func (c *ScalewayCache) InsertVolume(identifier, region, arch, owner, name string) { + c.Lock.Lock() + defer c.Lock.Unlock() + + fields, exists := c.Volumes[identifier] + if !exists || fields[CacheTitle] != name { + c.Volumes[identifier] = [CacheMaxfield]string{region, arch, owner, name} + c.Modified = true + } +} + +// RemoveVolume removes a server from the cache +func (c *ScalewayCache) RemoveVolume(identifier string) { + c.Lock.Lock() + defer c.Lock.Unlock() + + delete(c.Volumes, identifier) + c.Modified = true +} + +// ClearVolumes removes all volumes from the cache +func (c *ScalewayCache) ClearVolumes() { + c.Lock.Lock() + defer c.Lock.Unlock() + + c.Volumes = make(map[string][CacheMaxfield]string) + c.Modified = true +} + +// InsertBootscript registers an bootscript in the cache +func (c *ScalewayCache) InsertBootscript(identifier, region, arch, owner, name string) { + c.Lock.Lock() + defer c.Lock.Unlock() + + fields, exists := c.Bootscripts[identifier] + if !exists || fields[CacheTitle] != name { + c.Bootscripts[identifier] = [CacheMaxfield]string{region, arch, owner, name} + c.Modified = true + } +} + +// RemoveBootscript removes a bootscript from the cache +func (c *ScalewayCache) RemoveBootscript(identifier string) { + c.Lock.Lock() + defer c.Lock.Unlock() + + delete(c.Bootscripts, identifier) + c.Modified = true +} + +// ClearBootscripts removes all bootscripts from the cache +func (c *ScalewayCache) ClearBootscripts() { + c.Lock.Lock() + defer c.Lock.Unlock() + + c.Bootscripts = make(map[string][CacheMaxfield]string) + c.Modified = true +} + +// GetNbServers returns the number of servers in the cache +func (c *ScalewayCache) GetNbServers() int { + c.Lock.Lock() + defer c.Lock.Unlock() + + return len(c.Servers) +} + +// GetNbImages returns the number of images in the cache +func (c *ScalewayCache) GetNbImages() int { + c.Lock.Lock() + defer c.Lock.Unlock() + + return len(c.Images) +} + +// GetNbSnapshots returns the number of snapshots in the cache +func (c *ScalewayCache) GetNbSnapshots() int { + c.Lock.Lock() + defer c.Lock.Unlock() + + return len(c.Snapshots) +} + +// GetNbVolumes returns the number of volumes in the cache +func (c *ScalewayCache) GetNbVolumes() int { + c.Lock.Lock() + defer c.Lock.Unlock() + + return len(c.Volumes) +} + +// GetNbBootscripts returns the number of bootscripts in the cache +func (c *ScalewayCache) GetNbBootscripts() int { + c.Lock.Lock() + defer c.Lock.Unlock() + + return len(c.Bootscripts) +} diff --git a/vendor/github.com/scaleway/scaleway-cli/pkg/api/logger.go b/vendor/github.com/scaleway/scaleway-cli/pkg/api/logger.go new file mode 100644 index 000000000..d14a59dcb --- /dev/null +++ b/vendor/github.com/scaleway/scaleway-cli/pkg/api/logger.go @@ -0,0 +1,49 @@ +// Copyright (C) 2015 Scaleway. All rights reserved. +// Use of this source code is governed by a MIT-style +// license that can be found in the LICENSE.md file. + +package api + +import ( + "fmt" + "log" + "net/http" + "os" +) + +// Logger handles logging concerns for the Scaleway API SDK +type Logger interface { + LogHTTP(*http.Request) + Fatalf(format string, v ...interface{}) + Debugf(format string, v ...interface{}) + Infof(format string, v ...interface{}) + Warnf(format string, v ...interface{}) +} + +// NewDefaultLogger returns a logger which is configured for stdout +func NewDefaultLogger() Logger { + return &defaultLogger{ + Logger: log.New(os.Stdout, "", log.LstdFlags), + } +} + +type defaultLogger struct { + *log.Logger +} + +func (l *defaultLogger) LogHTTP(r *http.Request) { + l.Printf("%s %s\n", r.Method, r.URL.Path) +} +func (l *defaultLogger) Fatalf(format string, v ...interface{}) { + l.Printf("[FATAL] %s\n", fmt.Sprintf(format, v)) + os.Exit(1) +} +func (l *defaultLogger) Debugf(format string, v ...interface{}) { + l.Printf("[DEBUG] %s\n", fmt.Sprintf(format, v)) +} +func (l *defaultLogger) Infof(format string, v ...interface{}) { + l.Printf("[INFO ] %s\n", fmt.Sprintf(format, v)) +} +func (l *defaultLogger) Warnf(format string, v ...interface{}) { + l.Printf("[WARN ] %s\n", fmt.Sprintf(format, v)) +} diff --git a/vendor/github.com/scaleway/scaleway-cli/pkg/scwversion/version.go b/vendor/github.com/scaleway/scaleway-cli/pkg/scwversion/version.go new file mode 100644 index 000000000..33e0f3719 --- /dev/null +++ b/vendor/github.com/scaleway/scaleway-cli/pkg/scwversion/version.go @@ -0,0 +1,16 @@ +package scwversion + +import "fmt" + +var ( + // VERSION represents the semver version of the package + VERSION = "v1.9.0+dev" + + // GITCOMMIT represents the git commit hash of the package, it is configured at build time + GITCOMMIT string +) + +// UserAgent returns a string to be used by API +func UserAgent() string { + return fmt.Sprintf("scw/%v", VERSION) +} diff --git a/vendor/github.com/scaleway/scaleway-cli/pkg/scwversion/version_test.go b/vendor/github.com/scaleway/scaleway-cli/pkg/scwversion/version_test.go new file mode 100644 index 000000000..39582e39c --- /dev/null +++ b/vendor/github.com/scaleway/scaleway-cli/pkg/scwversion/version_test.go @@ -0,0 +1,14 @@ +package scwversion + +import ( + "testing" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestInit(t *testing.T) { + Convey("Testing init()", t, func() { + So(VERSION, ShouldNotEqual, "") + // So(GITCOMMIT, ShouldNotEqual, "") + }) +} diff --git a/website/source/docs/providers/scaleway/index.html.markdown b/website/source/docs/providers/scaleway/index.html.markdown new file mode 100644 index 000000000..cd1d77ab3 --- /dev/null +++ b/website/source/docs/providers/scaleway/index.html.markdown @@ -0,0 +1,90 @@ +--- +layout: "scaleway" +page_title: "Provider: Scaleway" +sidebar_current: "docs-scaleway-index" +description: |- + The Scaleway provider is used to interact with Scaleway ARM cloud provider. +--- + +# Scaleway Provider + +The Scaleway provider is used to manage Scaleway resources. + +Use the navigation to the left to read about the available resources. + +## Example Usage + +Here is an example that will setup the following: ++ An ARM Server. ++ An IP Address. ++ A security group. + +(create this as sl.tf and run terraform commands from this directory): + +```hcl +provider "scaleway" { + access_key = "" + organization = "" +} + +resource "scaleway_ip" "ip" { + server = "${scaleway_server.test.id}" +} + +resource "scaleway_server" "test" { + name = "test" + image = "aecaed73-51a5-4439-a127-6d8229847145" + type = "C2S" +} + +resource "scaleway_volume" "test" { + name = "test" + size_in_gb = 20 + type = "l_ssd" +} + +resource "scaleway_volume_attachment" "test" { + server = "${scaleway_server.test.id}" + volume = "${scaleway_volume.test.id}" +} + +resource "scaleway_security_group" "http" { + name = "http" + description = "allow HTTP and HTTPS traffic" +} + +resource "scaleway_security_group_rule" "http_accept" { + security_group = "${scaleway_security_group.http.id}" + + action = "accept" + direction = "inbound" + ip_range = "0.0.0.0/0" + protocol = "TCP" + dest_port_from = 80 +} + +resource "scaleway_security_group_rule" "https_accept" { + security_group = "${scaleway_security_group.http.id}" + + action = "accept" + direction = "inbound" + ip_range = "0.0.0.0/0" + protocol = "TCP" + dest_port_from = 443 +} + +``` + +You'll need to provide your Scaleway organization and access key, +so that Terraform can connect. If you don't want to put +credentials in your configuration file, you can leave them +out: + +``` +provider "scaleway" {} +``` + +...and instead set these environment variables: + +- **SCALEWAY_ORGANIZATION**: Your Scaleway organization +- **SCALEWAY_ACCESS_KEY**: Your API Access key diff --git a/website/source/docs/providers/scaleway/r/ip.html.markdown b/website/source/docs/providers/scaleway/r/ip.html.markdown new file mode 100644 index 000000000..cefb412b6 --- /dev/null +++ b/website/source/docs/providers/scaleway/r/ip.html.markdown @@ -0,0 +1,34 @@ +--- +layout: "scaleway" +page_title: "Scaleway: ip" +sidebar_current: "docs-scaleway-resource-ip" +description: |- + Manages Scaleway IPs. +--- + +# scaleway\ip + +Provides IPs for ARM servers. This allows IPs to be created, updated and deleted. +For additional details please refer to [API documentation](https://developer.scaleway.com/#ips). + +## Example Usage + +``` +resource "scaleway_ip" "test_ip" { +} +``` + +## Argument Reference + +The following arguments are supported: + +* `server` - (Optional) ID of ARM server to associate IP with + +Field `server` are editable. + +## Attributes Reference + +The following attributes are exported: + +* `id` - id of the new resource +* `ip` - IP of the new resource diff --git a/website/source/docs/providers/scaleway/r/security_group.html.markdown b/website/source/docs/providers/scaleway/r/security_group.html.markdown new file mode 100644 index 000000000..f02567b28 --- /dev/null +++ b/website/source/docs/providers/scaleway/r/security_group.html.markdown @@ -0,0 +1,36 @@ +--- +layout: "scaleway" +page_title: "Scaleway: security_group" +sidebar_current: "docs-scaleway-resource-security_group" +description: |- + Manages Scaleway security groups. +--- + +# scaleway\security_group + +Provides security groups. This allows security groups to be created, updated and deleted. +For additional details please refer to [API documentation](https://developer.scaleway.com/#security-groups). + +## Example Usage + +``` +resource "scaleway_security_group" "test" { + name = "test" + description = "test" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) name of security group +* `description` - (Required) description of security group + +Field `name`, `description` are editable. + +## Attributes Reference + +The following attributes are exported: + +* `id` - id of the new resource diff --git a/website/source/docs/providers/scaleway/r/security_group_rule.html.markdown b/website/source/docs/providers/scaleway/r/security_group_rule.html.markdown new file mode 100644 index 000000000..de5f695fb --- /dev/null +++ b/website/source/docs/providers/scaleway/r/security_group_rule.html.markdown @@ -0,0 +1,51 @@ +--- +layout: "scaleway" +page_title: "Scaleway: security_group_rule" +sidebar_current: "docs-scaleway-resource-security_group_rule" +description: |- + Manages Scaleway security group rules. +--- + +# scaleway\security_group_rule + +Provides security group rules. This allows security group rules to be created, updated and deleted. +For additional details please refer to [API documentation](https://developer.scaleway.com/#security-groups-manage-rules). + +## Example Usage + +``` +resource "scaleway_security_group" "test" { + name = "test" + description = "test" +} + +resource "scaleway_security_group_rule" "smtp_drop_1" { + security_group = "${scaleway_security_group.test.id}" + + action = "accept" + direction = "inbound" + ip_range = "0.0.0.0/0" + protocol = "TCP" + dest_port_from = 25 +} + +``` + +## Argument Reference + +The following arguments are supported: + +* `action` - (Required) action of rule (`accept`, `drop`) +* `direction` - (Required) direction of rule (`inbound`, `outbound`) +* `ip_range` - (Required) ip_range of rule +* `protocol` - (Required) protocol of rule (`ICMP`, `TCP`, `UDP`) +* `dest_port_from` - (Optional) port range from +* `dest_port_to` - (Optional) port from to + +Field `action`, `direction`, `ip_range`, `protocol`, `dest_port_from`, `dest_port_to` are editable. + +## Attributes Reference + +The following attributes are exported: + +* `id` - id of the new resource diff --git a/website/source/docs/providers/scaleway/r/server.html.markdown b/website/source/docs/providers/scaleway/r/server.html.markdown new file mode 100644 index 000000000..0d6008bab --- /dev/null +++ b/website/source/docs/providers/scaleway/r/server.html.markdown @@ -0,0 +1,38 @@ +--- +layout: "scaleway" +page_title: "Scaleway: server" +sidebar_current: "docs-scaleway-resource-server" +description: |- + Manages Scaleway servers. +--- + +# scaleway\server + +Provides ARM servers. This allows servers to be created, updated and deleted. +For additional details please refer to [API documentation](https://developer.scaleway.com/#servers). + +## Example Usage + +``` +resource "scaleway_server" "test" { + name = "test" + image = "5faef9cd-ea9b-4a63-9171-9e26bec03dbc" + type = "C1" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) name of ARM server +* `image` - (Required) base image of ARM server +* `type` - (Required) type of ARM server + +Field `name`, `type` are editable. + +## Attributes Reference + +The following attributes are exported: + +* `id` - id of the new resource diff --git a/website/source/docs/providers/scaleway/r/volume.html.markdown b/website/source/docs/providers/scaleway/r/volume.html.markdown new file mode 100644 index 000000000..3041bbced --- /dev/null +++ b/website/source/docs/providers/scaleway/r/volume.html.markdown @@ -0,0 +1,44 @@ +--- +layout: "scaleway" +page_title: "Scaleway: volume" +sidebar_current: "docs-scaleway-resource-volume" +description: |- + Manages Scaleway Volumes. +--- + +# scaleway\volume + +Provides ARM volumes. This allows volumes to be created, updated and deleted. +For additional details please refer to [API documentation](https://developer.scaleway.com/#volumes). + +## Example Usage + +``` +resource "scaleway_volume" "test" { + name = "test" + image = "aecaed73-51a5-4439-a127-6d8229847145" + type = "C2S" + volumes = ["${scaleway_volume.test.id}"] +} + +resource "scaleway_volume" "test" { + name = "test" + size_in_gb = 20 + type = "l_ssd" +} + +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) name of volume +* `size_in_gb` - (Required) size of the volume in GB +* `type` - (Required) type of volume + +## Attributes Reference + +The following attributes are exported: + +* `id` - id of the new resource diff --git a/website/source/docs/providers/scaleway/r/volume_attachment.html.markdown b/website/source/docs/providers/scaleway/r/volume_attachment.html.markdown new file mode 100644 index 000000000..271ac1d68 --- /dev/null +++ b/website/source/docs/providers/scaleway/r/volume_attachment.html.markdown @@ -0,0 +1,48 @@ +--- +layout: "scaleway" +page_title: "Scaleway: volume attachment" +sidebar_current: "docs-scaleway-resource-volume attachment" +description: |- + Manages Scaleway Volume attachments for servers. +--- + +# scaleway\volume\_attachment + +This allows volumes to be attached to servers. + +**Warning:** Attaching volumes requires the servers to be powered off. This will lead +to downtime if the server is already in use. + +## Example Usage + +``` +resource "scaleway_server" "test" { + name = "test" + image = "aecaed73-51a5-4439-a127-6d8229847145" + type = "C2S" +} + +resource "scaleway_volume" "test" { + name = "test" + size_in_gb = 20 + type = "l_ssd" +} + +resource "scaleway_volume_attachment" "test" { + server = "${scaleway_server.test.id}" + volume = "${scaleway_volume.test.id}" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `server` - (Required) id of the server +* `volume` - (Required) id of the volume to be attached + +## Attributes Reference + +The following attributes are exported: + +* `id` - id of the new resource diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index b589bdfb6..287ff0350 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -298,6 +298,10 @@ SoftLayer + > + Scaleway + + > Template diff --git a/website/source/layouts/scaleway.erb b/website/source/layouts/scaleway.erb new file mode 100644 index 000000000..f9ab577c2 --- /dev/null +++ b/website/source/layouts/scaleway.erb @@ -0,0 +1,41 @@ +<% wrap_layout :inner do %> + <% content_for :sidebar do %> + + <% end %> + + <%= yield %> +<% end %> From d1666ba76c80bcf0a08bda2ff312b0253bae334d Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 13 Jul 2016 21:05:27 +0100 Subject: [PATCH 0268/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 440d2205a..b84939127 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -42,6 +42,7 @@ FEATURES: * **New Interpolation Function:** `distinct` [GH-7174] * **New Provider:** `grafana` [GH-6206] * **New Provider:** `logentries` [GH-7067] + * **New Provider:** `scaleway` [GH-7331] * **New Provider:** `random` - allows generation of random values without constantly generating diffs [GH-6672] * **New Remote State Provider:** - `gcs` - Google Cloud Storage [GH-6814] * **New Remote State Provider:** - `azure` - Microsoft Azure Storage [GH-7064] From 0cfb9b15f65d482f52ee4c671e7da32943afc7e4 Mon Sep 17 00:00:00 2001 From: stack72 Date: Wed, 13 Jul 2016 21:44:28 +0100 Subject: [PATCH 0269/1238] provider/azurerm: Support Import `azurerm_network_security_rule` Soooo many missing fields not being set on the Read! ``` % make testacc TEST=./builtin/providers/azurerm TESTARGS='-run=TestAccAzureRMNetworkSecurityRule_' ==> Checking that code complies with gofmt requirements... /Users/stacko/Code/go/bin/stringer go generate $(go list ./... | grep -v /terraform/vendor/) 2016/07/13 21:34:24 Generated command/internal_plugin_list.go TF_ACC=1 go test ./builtin/providers/azurerm -v -run=TestAccAzureRMNetworkSecurityRule_ -timeout 120m === RUN TestAccAzureRMNetworkSecurityRule_importBasic --- PASS: TestAccAzureRMNetworkSecurityRule_importBasic (208.10s) === RUN TestAccAzureRMNetworkSecurityRule_basic --- PASS: TestAccAzureRMNetworkSecurityRule_basic (190.66s) === RUN TestAccAzureRMNetworkSecurityRule_addingRules --- PASS: TestAccAzureRMNetworkSecurityRule_addingRules (256.73s) PASS ok github.com/hashicorp/terraform/builtin/providers/azurerm 655.514s ``` --- .../import_arm_network_security_rule_test.go | 29 +++++++++++++++++++ .../resource_arm_network_security_rule.go | 14 +++++++++ 2 files changed, 43 insertions(+) create mode 100644 builtin/providers/azurerm/import_arm_network_security_rule_test.go diff --git a/builtin/providers/azurerm/import_arm_network_security_rule_test.go b/builtin/providers/azurerm/import_arm_network_security_rule_test.go new file mode 100644 index 000000000..8e871767e --- /dev/null +++ b/builtin/providers/azurerm/import_arm_network_security_rule_test.go @@ -0,0 +1,29 @@ +package azurerm + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAzureRMNetworkSecurityRule_importBasic(t *testing.T) { + resourceName := "azurerm_network_security_rule.test" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMNetworkSecurityRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAzureRMNetworkSecurityRule_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"resource_group_name", "network_security_group_name"}, + }, + }, + }) +} diff --git a/builtin/providers/azurerm/resource_arm_network_security_rule.go b/builtin/providers/azurerm/resource_arm_network_security_rule.go index 050781cd3..eb5b4f263 100644 --- a/builtin/providers/azurerm/resource_arm_network_security_rule.go +++ b/builtin/providers/azurerm/resource_arm_network_security_rule.go @@ -14,6 +14,9 @@ func resourceArmNetworkSecurityRule() *schema.Resource { Read: resourceArmNetworkSecurityRuleRead, Update: resourceArmNetworkSecurityRuleCreate, Delete: resourceArmNetworkSecurityRuleDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": { @@ -180,6 +183,17 @@ func resourceArmNetworkSecurityRuleRead(d *schema.ResourceData, meta interface{} return fmt.Errorf("Error making Read request on Azure Network Security Rule %s: %s", sgRuleName, err) } + d.Set("access", resp.Properties.Access) + d.Set("destination_address_prefix", resp.Properties.DestinationAddressPrefix) + d.Set("destination_port_range", resp.Properties.DestinationPortRange) + d.Set("direction", resp.Properties.Direction) + d.Set("description", resp.Properties.Description) + d.Set("name", resp.Name) + d.Set("priority", resp.Properties.Priority) + d.Set("protocol", resp.Properties.Protocol) + d.Set("source_address_prefix", resp.Properties.SourceAddressPrefix) + d.Set("source_port_range", resp.Properties.SourcePortRange) + return nil } From 3d480ca7674c6d3ee95074a338a1a869da4dd339 Mon Sep 17 00:00:00 2001 From: stack72 Date: Thu, 7 Jul 2016 17:10:45 +0100 Subject: [PATCH 0270/1238] provider/aws: Support Import `aws_sqs_queue` Needed to change the test due to SQS having issues recreating the same queue multiple times. Now it uses a random name ``` make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSSQSQueue_' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSSQSQueue_ -timeout 120m === RUN TestAccAWSSQSQueue_importBasic --- PASS: TestAccAWSSQSQueue_importBasic (20.53s) === RUN TestAccAWSSQSQueue_basic --- PASS: TestAccAWSSQSQueue_basic (33.85s) === RUN TestAccAWSSQSQueue_redrivePolicy --- PASS: TestAccAWSSQSQueue_redrivePolicy (26.59s) === RUN TestAccAWSSQSQueue_Policybasic --- PASS: TestAccAWSSQSQueue_Policybasic (36.92s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 117.908s ``` --- .../aws/import_aws_sqs_queue_test.go | 35 +++++++++++++++++++ .../providers/aws/resource_aws_sqs_queue.go | 27 ++++++++++++++ .../aws/resource_aws_sqs_queue_test.go | 11 +++--- 3 files changed, 69 insertions(+), 4 deletions(-) create mode 100644 builtin/providers/aws/import_aws_sqs_queue_test.go diff --git a/builtin/providers/aws/import_aws_sqs_queue_test.go b/builtin/providers/aws/import_aws_sqs_queue_test.go new file mode 100644 index 000000000..45787eb6b --- /dev/null +++ b/builtin/providers/aws/import_aws_sqs_queue_test.go @@ -0,0 +1,35 @@ +package aws + +import ( + "testing" + + "fmt" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAWSSQSQueue_importBasic(t *testing.T) { + resourceName := "aws_sqs_queue.queue-with-defaults" + queueName := fmt.Sprintf("sqs-queue-%s", acctest.RandString(5)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSQSQueueDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSSQSConfigWithDefaults(queueName), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + //The name is never returned after the initial create of the queue. + //It is part of the URL and can be split down if needed + //ImportStateVerifyIgnore: []string{"name"}, + }, + }, + }) +} diff --git a/builtin/providers/aws/resource_aws_sqs_queue.go b/builtin/providers/aws/resource_aws_sqs_queue.go index b253a833f..15c85ba01 100644 --- a/builtin/providers/aws/resource_aws_sqs_queue.go +++ b/builtin/providers/aws/resource_aws_sqs_queue.go @@ -5,10 +5,13 @@ import ( "encoding/json" "fmt" "log" + "net/url" "strconv" "github.com/hashicorp/terraform/helper/schema" + "strings" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/sqs" @@ -34,6 +37,9 @@ func resourceAwsSqsQueue() *schema.Resource { Read: resourceAwsSqsQueueRead, Update: resourceAwsSqsQueueUpdate, Delete: resourceAwsSqsQueueDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ @@ -189,6 +195,12 @@ func resourceAwsSqsQueueRead(d *schema.ResourceData, meta interface{}) error { return err } + name, err := extractNameFromSqsQueueUrl(d.Id()) + if err != nil { + return err + } + d.Set("name", name) + if attributeOutput.Attributes != nil && len(attributeOutput.Attributes) > 0 { attrmap := attributeOutput.Attributes resource := *resourceAwsSqsQueue() @@ -225,3 +237,18 @@ func resourceAwsSqsQueueDelete(d *schema.ResourceData, meta interface{}) error { } return nil } + +func extractNameFromSqsQueueUrl(queue string) (string, error) { + //http://sqs.us-west-2.amazonaws.com/123456789012/queueName + u, err := url.Parse(queue) + if err != nil { + return "", err + } + segments := strings.Split(u.Path, "/") + if len(segments) != 3 { + return "", fmt.Errorf("SQS Url not parsed correctly") + } + + return segments[2], nil + +} diff --git a/builtin/providers/aws/resource_aws_sqs_queue_test.go b/builtin/providers/aws/resource_aws_sqs_queue_test.go index a210fc5d2..5fa2b5b72 100644 --- a/builtin/providers/aws/resource_aws_sqs_queue_test.go +++ b/builtin/providers/aws/resource_aws_sqs_queue_test.go @@ -13,13 +13,14 @@ import ( ) func TestAccAWSSQSQueue_basic(t *testing.T) { + queueName := fmt.Sprintf("sqs-queue-%s", acctest.RandString(5)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSSQSQueueDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccAWSSQSConfigWithDefaults, + Config: testAccAWSSQSConfigWithDefaults(queueName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSSQSExistsWithDefaults("aws_sqs_queue.queue-with-defaults"), ), @@ -194,11 +195,13 @@ func testAccCheckAWSSQSExistsWithOverrides(n string) resource.TestCheckFunc { } } -const testAccAWSSQSConfigWithDefaults = ` +func testAccAWSSQSConfigWithDefaults(r string) string { + return fmt.Sprintf(` resource "aws_sqs_queue" "queue-with-defaults" { - name = "test-sqs-queue-with-defaults" + name = "%s" +} +`, r) } -` const testAccAWSSQSConfigWithOverrides = ` resource "aws_sqs_queue" "queue-with-overrides" { From de60481428ec56df0a94ace5ba84a6c5948745c6 Mon Sep 17 00:00:00 2001 From: clint shryock Date: Wed, 13 Jul 2016 15:38:23 -0600 Subject: [PATCH 0271/1238] provider/aws: Rework Beanstalk optional polling expose a poll_interval for users to configure polling for updates --- ...ource_aws_elastic_beanstalk_environment.go | 105 ++++++++++-------- helper/resource/state.go | 19 +++- 2 files changed, 71 insertions(+), 53 deletions(-) diff --git a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go b/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go index a1a3166c2..c181af3fa 100644 --- a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go +++ b/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go @@ -137,7 +137,6 @@ func resourceAwsElasticBeanstalkEnvironment() *schema.Resource { "poll_interval": &schema.Schema{ Type: schema.TypeString, Optional: true, - Default: "10s", ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { value := v.(string) duration, err := time.ParseDuration(value) @@ -261,18 +260,20 @@ func resourceAwsElasticBeanstalkEnvironmentCreate(d *schema.ResourceData, meta i if err != nil { return err } + pollInterval, err := time.ParseDuration(d.Get("poll_interval").(string)) if err != nil { - return err + log.Printf("[WARN] Error parsing poll_interval, using default backoff") } stateConf := &resource.StateChangeConf{ - Pending: []string{"Launching", "Updating"}, - Target: []string{"Ready"}, - Refresh: environmentStateRefreshFunc(conn, d.Id()), - Timeout: waitForReadyTimeOut, - Delay: 10 * time.Second, - MinTimeout: pollInterval, + Pending: []string{"Launching", "Updating"}, + Target: []string{"Ready"}, + Refresh: environmentStateRefreshFunc(conn, d.Id()), + Timeout: waitForReadyTimeOut, + Delay: 10 * time.Second, + PollInterval: pollInterval, + MinTimeout: 3 * time.Second, } _, err = stateConf.WaitForState() @@ -295,19 +296,24 @@ func resourceAwsElasticBeanstalkEnvironmentUpdate(d *schema.ResourceData, meta i envId := d.Id() + var hasChange bool + updateOpts := elasticbeanstalk.UpdateEnvironmentInput{ EnvironmentId: aws.String(envId), } if d.HasChange("description") { + hasChange = true updateOpts.Description = aws.String(d.Get("description").(string)) } if d.HasChange("solution_stack_name") { + hasChange = true updateOpts.SolutionStackName = aws.String(d.Get("solution_stack_name").(string)) } if d.HasChange("setting") { + hasChange = true o, n := d.GetChange("setting") if o == nil { o = &schema.Set{F: optionSettingValueHash} @@ -323,45 +329,49 @@ func resourceAwsElasticBeanstalkEnvironmentUpdate(d *schema.ResourceData, meta i } if d.HasChange("template_name") { + hasChange = true updateOpts.TemplateName = aws.String(d.Get("template_name").(string)) } - // Get the current time to filter describeBeanstalkEvents messages - t := time.Now() - log.Printf("[DEBUG] Elastic Beanstalk Environment update opts: %s", updateOpts) - _, err := conn.UpdateEnvironment(&updateOpts) - if err != nil { - return err - } + if hasChange { + // Get the current time to filter describeBeanstalkEvents messages + t := time.Now() + log.Printf("[DEBUG] Elastic Beanstalk Environment update opts: %s", updateOpts) + _, err := conn.UpdateEnvironment(&updateOpts) + if err != nil { + return err + } - waitForReadyTimeOut, err := time.ParseDuration(d.Get("wait_for_ready_timeout").(string)) - if err != nil { - return err - } - pollInterval, err := time.ParseDuration(d.Get("poll_interval").(string)) - if err != nil { - return err - } + waitForReadyTimeOut, err := time.ParseDuration(d.Get("wait_for_ready_timeout").(string)) + if err != nil { + return err + } + pollInterval, err := time.ParseDuration(d.Get("poll_interval").(string)) + if err != nil { + log.Printf("[WARN] Error parsing poll_interval, using default backoff") + } - stateConf := &resource.StateChangeConf{ - Pending: []string{"Launching", "Updating"}, - Target: []string{"Ready"}, - Refresh: environmentStateRefreshFunc(conn, d.Id()), - Timeout: waitForReadyTimeOut, - Delay: 10 * time.Second, - MinTimeout: pollInterval, - } + stateConf := &resource.StateChangeConf{ + Pending: []string{"Launching", "Updating"}, + Target: []string{"Ready"}, + Refresh: environmentStateRefreshFunc(conn, d.Id()), + Timeout: waitForReadyTimeOut, + Delay: 10 * time.Second, + PollInterval: pollInterval, + MinTimeout: 3 * time.Second, + } - _, err = stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for Elastic Beanstalk Environment (%s) to become ready: %s", - d.Id(), err) - } + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for Elastic Beanstalk Environment (%s) to become ready: %s", + d.Id(), err) + } - err = describeBeanstalkEvents(conn, d.Id(), t) - if err != nil { - return err + err = describeBeanstalkEvents(conn, d.Id(), t) + if err != nil { + return err + } } return resourceAwsElasticBeanstalkEnvironmentRead(d, meta) @@ -590,16 +600,17 @@ func resourceAwsElasticBeanstalkEnvironmentDelete(d *schema.ResourceData, meta i } pollInterval, err := time.ParseDuration(d.Get("poll_interval").(string)) if err != nil { - return err + log.Printf("[WARN] Error parsing poll_interval, using default backoff") } stateConf := &resource.StateChangeConf{ - Pending: []string{"Terminating"}, - Target: []string{"Terminated"}, - Refresh: environmentStateRefreshFunc(conn, d.Id()), - Timeout: waitForReadyTimeOut, - Delay: 10 * time.Second, - MinTimeout: pollInterval, + Pending: []string{"Terminating"}, + Target: []string{"Terminated"}, + Refresh: environmentStateRefreshFunc(conn, d.Id()), + Timeout: waitForReadyTimeOut, + Delay: 10 * time.Second, + PollInterval: pollInterval, + MinTimeout: 3 * time.Second, } _, err = stateConf.WaitForState() diff --git a/helper/resource/state.go b/helper/resource/state.go index 7f8680fed..afa758699 100644 --- a/helper/resource/state.go +++ b/helper/resource/state.go @@ -26,6 +26,7 @@ type StateChangeConf struct { Target []string // Target state Timeout time.Duration // The amount of time to wait before timeout MinTimeout time.Duration // Smallest time to wait before refreshes + PollInterval time.Duration // Override MinTimeout/backoff and only poll this often NotFoundChecks int // Number of times to allow not found // This is to work around inconsistent APIs @@ -72,14 +73,20 @@ func (conf *StateChangeConf) WaitForState() (interface{}, error) { time.Sleep(conf.Delay) var err error + var wait time.Duration for tries := 0; ; tries++ { // Wait between refreshes using an exponential backoff - wait := time.Duration(math.Pow(2, float64(tries))) * - 100 * time.Millisecond - if wait < conf.MinTimeout { - wait = conf.MinTimeout - } else if wait > 10*time.Second { - wait = 10 * time.Second + // If a poll interval has been specified, choose that interval + if conf.PollInterval > 0 && conf.PollInterval < 180*time.Second { + wait = conf.PollInterval + } else { + wait = time.Duration(math.Pow(2, float64(tries))) * + 100 * time.Millisecond + if wait < conf.MinTimeout { + wait = conf.MinTimeout + } else if wait > 10*time.Second { + wait = 10 * time.Second + } } log.Printf("[TRACE] Waiting %s before next try", wait) From a941963ca24a673162090acf312a64635ffa8064 Mon Sep 17 00:00:00 2001 From: clint shryock Date: Wed, 13 Jul 2016 15:39:53 -0600 Subject: [PATCH 0272/1238] match the system limit --- .../providers/aws/resource_aws_elastic_beanstalk_environment.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go b/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go index c181af3fa..35815f41f 100644 --- a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go +++ b/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go @@ -146,7 +146,7 @@ func resourceAwsElasticBeanstalkEnvironment() *schema.Resource { } if duration < 10*time.Second || duration > 60*time.Second { errors = append(errors, fmt.Errorf( - "%q must be between 10s and 60s", k)) + "%q must be between 10s and 180s", k)) } return }, From bb10d75d7d4682e77b3d8911d233897c365750b2 Mon Sep 17 00:00:00 2001 From: clint shryock Date: Wed, 13 Jul 2016 15:46:12 -0600 Subject: [PATCH 0273/1238] ensure pollInterval is 0 --- .../aws/resource_aws_elastic_beanstalk_environment.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go b/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go index 35815f41f..4072d7850 100644 --- a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go +++ b/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go @@ -263,6 +263,7 @@ func resourceAwsElasticBeanstalkEnvironmentCreate(d *schema.ResourceData, meta i pollInterval, err := time.ParseDuration(d.Get("poll_interval").(string)) if err != nil { + pollInterval = 0 log.Printf("[WARN] Error parsing poll_interval, using default backoff") } @@ -348,6 +349,7 @@ func resourceAwsElasticBeanstalkEnvironmentUpdate(d *schema.ResourceData, meta i } pollInterval, err := time.ParseDuration(d.Get("poll_interval").(string)) if err != nil { + pollInterval = 0 log.Printf("[WARN] Error parsing poll_interval, using default backoff") } @@ -600,6 +602,7 @@ func resourceAwsElasticBeanstalkEnvironmentDelete(d *schema.ResourceData, meta i } pollInterval, err := time.ParseDuration(d.Get("poll_interval").(string)) if err != nil { + pollInterval = 0 log.Printf("[WARN] Error parsing poll_interval, using default backoff") } From d74c2d54bfc2be8fcc123dc58c8fa1c4c6abb2f6 Mon Sep 17 00:00:00 2001 From: clint shryock Date: Wed, 13 Jul 2016 15:57:20 -0600 Subject: [PATCH 0274/1238] update docs --- .../aws/r/elastic_beanstalk_environment.html.markdown | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/website/source/docs/providers/aws/r/elastic_beanstalk_environment.html.markdown b/website/source/docs/providers/aws/r/elastic_beanstalk_environment.html.markdown index 6f6674ddd..97181c0a5 100644 --- a/website/source/docs/providers/aws/r/elastic_beanstalk_environment.html.markdown +++ b/website/source/docs/providers/aws/r/elastic_beanstalk_environment.html.markdown @@ -55,9 +55,10 @@ off of. Example stacks can be found in the [Amazon API documentation][1] [duration](https://golang.org/pkg/time/#ParseDuration) that Terraform should wait for an Elastic Beanstalk Environment to be in a ready state before timing out. -* `poll_interval` – (Default: `10s`) The time between polling the AWS API to +* `poll_interval` – The time between polling the AWS API to check if changes have been applied. Use this to adjust the rate of API calls -for any `create` or `update` action. Minimum `10s`, maximum `60s` +for any `create` or `update` action. Minimum `10s`, maximum `180s`. Omit this to +use the default behavior, which is an exponential backoff * `tags` – (Optional) A set of tags to apply to the Environment. **Note:** at this time the Elastic Beanstalk API does not provide a programatic way of changing these tags after initial application From 37e3aa9d8c92a754ae880096d553959b2dc66005 Mon Sep 17 00:00:00 2001 From: Peter McAtominey Date: Wed, 13 Jul 2016 23:52:20 +0100 Subject: [PATCH 0275/1238] provider/azurerm: destroy azurerm_virtual_machine OS Disk VHD on deletion (#7584) * provider/azurerm: destroy azurerm_virtual_machine OS Disk VHD on deletion The OS Disk previously wasn't deleted with the VM, this causes subsequent apply operations which recreate the VM to fail as the VHD blob already exists. Fixes #6610 * provider/azurerm: add delete_os_disk_on_termination to azurerm_virtual_machine delete_os_disk_on_termination is a bool which defaults to false to avoid making a breaking change, and to follow the same flow as the Azure API --- .../azurerm/resource_arm_virtual_machine.go | 50 ++++- .../resource_arm_virtual_machine_test.go | 204 +++++++++++++++++- 2 files changed, 249 insertions(+), 5 deletions(-) diff --git a/builtin/providers/azurerm/resource_arm_virtual_machine.go b/builtin/providers/azurerm/resource_arm_virtual_machine.go index 95698b009..6c390c74f 100644 --- a/builtin/providers/azurerm/resource_arm_virtual_machine.go +++ b/builtin/providers/azurerm/resource_arm_virtual_machine.go @@ -5,6 +5,7 @@ import ( "fmt" "log" "net/http" + "net/url" "strings" "github.com/Azure/azure-sdk-for-go/arm/compute" @@ -158,6 +159,12 @@ func resourceArmVirtualMachine() *schema.Resource { Set: resourceArmVirtualMachineStorageOsDiskHash, }, + "delete_os_disk_on_termination": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "storage_data_disk": { Type: schema.TypeList, Optional: true, @@ -550,9 +557,48 @@ func resourceArmVirtualMachineDelete(d *schema.ResourceData, meta interface{}) e resGroup := id.ResourceGroup name := id.Path["virtualMachines"] - _, err = vmClient.Delete(resGroup, name, make(chan struct{})) + if _, err = vmClient.Delete(resGroup, name, make(chan struct{})); err != nil { + return err + } - return err + if deleteOsDisk := d.Get("delete_os_disk_on_termination").(bool); !deleteOsDisk { + log.Printf("[INFO] delete_os_disk_on_termination is false, skipping delete") + return nil + } + + osDisk, err := expandAzureRmVirtualMachineOsDisk(d) + if err != nil { + return fmt.Errorf("Error expanding OS Disk") + } + + vhdURL, err := url.Parse(*osDisk.Vhd.URI) + if err != nil { + return fmt.Errorf("Cannot parse OS Disk VHD URI: %s", err) + } + + // VHD URI is in the form: https://storageAccountName.blob.core.windows.net/containerName/blobName + storageAccountName := strings.Split(vhdURL.Host, ".")[0] + path := strings.Split(strings.TrimPrefix(vhdURL.Path, "/"), "/") + containerName := path[0] + blobName := path[1] + + blobClient, storageAccountExists, err := meta.(*ArmClient).getBlobStorageClientForStorageAccount(id.ResourceGroup, storageAccountName) + if err != nil { + return fmt.Errorf("Error creating blob store account for VHD deletion: %s", err) + } + + if !storageAccountExists { + log.Printf("[INFO] Storage Account %q doesn't exist so the VHD blob won't exist", storageAccountName) + return nil + } + + log.Printf("[INFO] Deleting VHD blob %s", blobName) + _, err = blobClient.DeleteBlobIfExists(containerName, blobName, nil) + if err != nil { + return fmt.Errorf("Error deleting VHD blob: %s", err) + } + + return nil } func resourceArmVirtualMachinePlanHash(v interface{}) int { diff --git a/builtin/providers/azurerm/resource_arm_virtual_machine_test.go b/builtin/providers/azurerm/resource_arm_virtual_machine_test.go index eba46cd61..ef098a5a6 100644 --- a/builtin/providers/azurerm/resource_arm_virtual_machine_test.go +++ b/builtin/providers/azurerm/resource_arm_virtual_machine_test.go @@ -14,9 +14,13 @@ func TestAccAzureRMVirtualMachine_basicLinuxMachine(t *testing.T) { ri := acctest.RandInt() config := fmt.Sprintf(testAccAzureRMVirtualMachine_basicLinuxMachine, ri, ri, ri, ri, ri, ri, ri) resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualMachineDestroy, + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: resource.ComposeTestCheckFunc( + testCheckAzureRMVirtualMachineDestroy, + // deletion of OS Disk VHD is opt in + testCheckAzureRMVirtualMachineOSDiskVHDExistance(true), + ), Steps: []resource.TestStep{ { Config: config, @@ -167,6 +171,29 @@ func TestAccAzureRMVirtualMachine_winRMConfig(t *testing.T) { }) } +func TestAccAzureRMVirtualMachine_deleteVHD(t *testing.T) { + ri := acctest.RandInt() + preConfig := fmt.Sprintf(testAccAzureRMVirtualMachine_basicLinuxMachineDestroyOSDisk, ri, ri, ri, ri, ri, ri, ri) + postConfig := fmt.Sprintf(testAccAzureRMVirtualMachine_basicLinuxMachineDeleteVM, ri, ri, ri, ri, ri) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMVirtualMachineDestroy, + Steps: []resource.TestStep{ + { + Config: preConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMVirtualMachineExists("azurerm_virtual_machine.test"), + ), + }, + { + Config: postConfig, + Check: testCheckAzureRMVirtualMachineOSDiskVHDExistance(false), + }, + }, + }) +} + func testCheckAzureRMVirtualMachineExists(name string) resource.TestCheckFunc { return func(s *terraform.State) error { // Ensure we have enough information in state to look up in API @@ -221,6 +248,36 @@ func testCheckAzureRMVirtualMachineDestroy(s *terraform.State) error { return nil } +func testCheckAzureRMVirtualMachineOSDiskVHDExistance(shouldExist bool) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != "azurerm_storage_container" { + continue + } + + // fetch storage account and container name + resourceGroup := rs.Primary.Attributes["resource_group_name"] + storageAccountName := rs.Primary.Attributes["storage_account_name"] + containerName := rs.Primary.Attributes["name"] + storageClient, _, err := testAccProvider.Meta().(*ArmClient).getBlobStorageClientForStorageAccount(resourceGroup, storageAccountName) + if err != nil { + return fmt.Errorf("Error creating Blob storage client: %s", err) + } + + exists, err := storageClient.BlobExists(containerName, "myosdisk1.vhd") + if err != nil { + return fmt.Errorf("Error checking if OS Disk VHD Blob exists: %s", err) + } + + if exists && !shouldExist { + return fmt.Errorf("OS Disk VHD Blob still exists") + } + } + + return nil + } +} + var testAccAzureRMVirtualMachine_basicLinuxMachine = ` resource "azurerm_resource_group" "test" { name = "acctestrg-%d" @@ -309,6 +366,147 @@ resource "azurerm_virtual_machine" "test" { } ` +var testAccAzureRMVirtualMachine_basicLinuxMachineDestroyOSDisk = ` +resource "azurerm_resource_group" "test" { + name = "acctestrg-%d" + location = "West US" +} + +resource "azurerm_virtual_network" "test" { + name = "acctvn-%d" + address_space = ["10.0.0.0/16"] + location = "West US" + resource_group_name = "${azurerm_resource_group.test.name}" +} + +resource "azurerm_subnet" "test" { + name = "acctsub-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test.name}" + address_prefix = "10.0.2.0/24" +} + +resource "azurerm_network_interface" "test" { + name = "acctni-%d" + location = "West US" + resource_group_name = "${azurerm_resource_group.test.name}" + + ip_configuration { + name = "testconfiguration1" + subnet_id = "${azurerm_subnet.test.id}" + private_ip_address_allocation = "dynamic" + } +} + +resource "azurerm_storage_account" "test" { + name = "accsa%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "westus" + account_type = "Standard_LRS" + + tags { + environment = "staging" + } +} + +resource "azurerm_storage_container" "test" { + name = "vhds" + resource_group_name = "${azurerm_resource_group.test.name}" + storage_account_name = "${azurerm_storage_account.test.name}" + container_access_type = "private" +} + +resource "azurerm_virtual_machine" "test" { + name = "acctvm-%d" + location = "West US" + resource_group_name = "${azurerm_resource_group.test.name}" + network_interface_ids = ["${azurerm_network_interface.test.id}"] + vm_size = "Standard_A0" + + storage_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "14.04.2-LTS" + version = "latest" + } + + storage_os_disk { + name = "myosdisk1" + vhd_uri = "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}/myosdisk1.vhd" + caching = "ReadWrite" + create_option = "FromImage" + } + + delete_os_disk_on_termination = true + + os_profile { + computer_name = "hostname%d" + admin_username = "testadmin" + admin_password = "Password1234!" + } + + os_profile_linux_config { + disable_password_authentication = false + } + + tags { + environment = "Production" + cost-center = "Ops" + } +} +` + +var testAccAzureRMVirtualMachine_basicLinuxMachineDeleteVM = ` +resource "azurerm_resource_group" "test" { + name = "acctestrg-%d" + location = "West US" +} + +resource "azurerm_virtual_network" "test" { + name = "acctvn-%d" + address_space = ["10.0.0.0/16"] + location = "West US" + resource_group_name = "${azurerm_resource_group.test.name}" +} + +resource "azurerm_subnet" "test" { + name = "acctsub-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test.name}" + address_prefix = "10.0.2.0/24" +} + +resource "azurerm_network_interface" "test" { + name = "acctni-%d" + location = "West US" + resource_group_name = "${azurerm_resource_group.test.name}" + + ip_configuration { + name = "testconfiguration1" + subnet_id = "${azurerm_subnet.test.id}" + private_ip_address_allocation = "dynamic" + } +} + +resource "azurerm_storage_account" "test" { + name = "accsa%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "westus" + account_type = "Standard_LRS" + + tags { + environment = "staging" + } +} + +resource "azurerm_storage_container" "test" { + name = "vhds" + resource_group_name = "${azurerm_resource_group.test.name}" + storage_account_name = "${azurerm_storage_account.test.name}" + container_access_type = "private" +} +` + var testAccAzureRMVirtualMachine_withDataDisk = ` resource "azurerm_resource_group" "test" { name = "acctestrg-%d" From e28caa0496cac5a78e9bcc7421a4b09d185c5aac Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 13 Jul 2016 23:54:24 +0100 Subject: [PATCH 0276/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b84939127..3caa03094 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -203,6 +203,7 @@ BUG FIXES: * provider/azurerm: `azurerm_dns_cname_record` can create CNAME records again [GH-7113] * provider/azurerm: `azurerm_network_security_group` now waits for the provisioning state of `ready` before proceeding [GH-7307] * provider/azurerm: `computer_name` is now required for `azurerm_virtual_machine` resources [GH-7308] + * provider/azurerm: destroy azurerm_virtual_machine OS Disk VHD on deletion [GH-7584] * provider/cloudflare: Fix issue upgrading CloudFlare Records created before v0.6.15 [GH-6969] * provider/cloudstack: Fix using `cloudstack_network_acl` within a project [GH-6743] * provider/digitalocean: Stop `digitocean_droplet` forcing new resource on uppercase region [GH-7044] From 12dc87411ef39b3eb45549647561bcf85fe920a8 Mon Sep 17 00:00:00 2001 From: Joonas Bergius Date: Wed, 13 Jul 2016 16:59:50 -0600 Subject: [PATCH 0277/1238] docs: Improve the digitalocean_tag usage example (#7634) --- .../source/docs/providers/do/r/tag.html.markdown | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/website/source/docs/providers/do/r/tag.html.markdown b/website/source/docs/providers/do/r/tag.html.markdown index 562e400c6..02e0a9694 100644 --- a/website/source/docs/providers/do/r/tag.html.markdown +++ b/website/source/docs/providers/do/r/tag.html.markdown @@ -16,10 +16,19 @@ configuration via their ID or name. ## Example Usage ``` -# Create a new SSH key -resource "digitalocean_tag" "default" { +# Create a new tag +resource "digitalocean_tag" "foobar" { name = "foobar" } + +# Create a new droplet in nyc3 with the foobar tag +resource "digitalocean_droplet" "web" { + image = "ubuntu-16-04-x64" + name = "web-1" + region = "nyc3" + size = "512mb" + tags = ["${digitalocean_tag.foobar.id}"] +} ``` ## Argument Reference From c1410509290507d77dab7b07932062e6f995802e Mon Sep 17 00:00:00 2001 From: stack72 Date: Thu, 14 Jul 2016 08:33:30 +0100 Subject: [PATCH 0278/1238] provider/azurerm: Remove tests for delete_on_termination --- .../resource_arm_virtual_machine_test.go | 55 ------------------- 1 file changed, 55 deletions(-) diff --git a/builtin/providers/azurerm/resource_arm_virtual_machine_test.go b/builtin/providers/azurerm/resource_arm_virtual_machine_test.go index ef098a5a6..a37fba2d4 100644 --- a/builtin/providers/azurerm/resource_arm_virtual_machine_test.go +++ b/builtin/providers/azurerm/resource_arm_virtual_machine_test.go @@ -18,8 +18,6 @@ func TestAccAzureRMVirtualMachine_basicLinuxMachine(t *testing.T) { Providers: testAccProviders, CheckDestroy: resource.ComposeTestCheckFunc( testCheckAzureRMVirtualMachineDestroy, - // deletion of OS Disk VHD is opt in - testCheckAzureRMVirtualMachineOSDiskVHDExistance(true), ), Steps: []resource.TestStep{ { @@ -171,29 +169,6 @@ func TestAccAzureRMVirtualMachine_winRMConfig(t *testing.T) { }) } -func TestAccAzureRMVirtualMachine_deleteVHD(t *testing.T) { - ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMVirtualMachine_basicLinuxMachineDestroyOSDisk, ri, ri, ri, ri, ri, ri, ri) - postConfig := fmt.Sprintf(testAccAzureRMVirtualMachine_basicLinuxMachineDeleteVM, ri, ri, ri, ri, ri) - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testCheckAzureRMVirtualMachineDestroy, - Steps: []resource.TestStep{ - { - Config: preConfig, - Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineExists("azurerm_virtual_machine.test"), - ), - }, - { - Config: postConfig, - Check: testCheckAzureRMVirtualMachineOSDiskVHDExistance(false), - }, - }, - }) -} - func testCheckAzureRMVirtualMachineExists(name string) resource.TestCheckFunc { return func(s *terraform.State) error { // Ensure we have enough information in state to look up in API @@ -248,36 +223,6 @@ func testCheckAzureRMVirtualMachineDestroy(s *terraform.State) error { return nil } -func testCheckAzureRMVirtualMachineOSDiskVHDExistance(shouldExist bool) resource.TestCheckFunc { - return func(s *terraform.State) error { - for _, rs := range s.RootModule().Resources { - if rs.Type != "azurerm_storage_container" { - continue - } - - // fetch storage account and container name - resourceGroup := rs.Primary.Attributes["resource_group_name"] - storageAccountName := rs.Primary.Attributes["storage_account_name"] - containerName := rs.Primary.Attributes["name"] - storageClient, _, err := testAccProvider.Meta().(*ArmClient).getBlobStorageClientForStorageAccount(resourceGroup, storageAccountName) - if err != nil { - return fmt.Errorf("Error creating Blob storage client: %s", err) - } - - exists, err := storageClient.BlobExists(containerName, "myosdisk1.vhd") - if err != nil { - return fmt.Errorf("Error checking if OS Disk VHD Blob exists: %s", err) - } - - if exists && !shouldExist { - return fmt.Errorf("OS Disk VHD Blob still exists") - } - } - - return nil - } -} - var testAccAzureRMVirtualMachine_basicLinuxMachine = ` resource "azurerm_resource_group" "test" { name = "acctestrg-%d" From 854c9bd488dfdb12303ec47e42d6de8fc17e40a5 Mon Sep 17 00:00:00 2001 From: Peter McAtominey Date: Thu, 14 Jul 2016 10:51:55 +0100 Subject: [PATCH 0279/1238] providers/azurerm: document delete_os_disk_on_termination flag (#7639) --- .../docs/providers/azurerm/r/virtual_machine.html.markdown | 1 + 1 file changed, 1 insertion(+) diff --git a/website/source/docs/providers/azurerm/r/virtual_machine.html.markdown b/website/source/docs/providers/azurerm/r/virtual_machine.html.markdown index aab475a5b..2938c0a13 100644 --- a/website/source/docs/providers/azurerm/r/virtual_machine.html.markdown +++ b/website/source/docs/providers/azurerm/r/virtual_machine.html.markdown @@ -210,6 +210,7 @@ The following arguments are supported: * `vm_size` - (Required) Specifies the [size of the virtual machine](https://azure.microsoft.com/en-us/documentation/articles/virtual-machines-size-specs/). * `storage_image_reference` - (Optional) A Storage Image Reference block as documented below. * `storage_os_disk` - (Required) A Storage OS Disk block as referenced below. +* `delete_os_disk_on_termination` - (Optional) Flag to enable deletion of the OS Disk VHD blob when the VM is deleted, defaults to `false` * `storage_data_disk` - (Optional) A list of Storage Data disk blocks as referenced below. * `os_profile` - (Required) An OS Profile block as documented below. * `os_profile_windows_config` - (Required, when a windows machine) A Windows config block as documented below. From 324e78020d9ea9beb54a97b5ad1e2ce519bc4148 Mon Sep 17 00:00:00 2001 From: Peter McAtominey Date: Thu, 14 Jul 2016 14:20:42 +0100 Subject: [PATCH 0280/1238] provider/azurerm: fix tests removed in c1410509290507d77dab7b07932062e6f995802e (#7640) The tests were removed due to a nil pointer panic in testCheckAzureRMVirtualMachineOSDiskVHDExistance when the storage account itself had been deleted in the destroy stage Added a test to cover opting out of VHD delete rather than polluting the basic linux VM test. ``` TF_ACC=1 go test ./builtin/providers/azurerm -v -run TestAccAzureRMVirtualMachine_deleteVHDOpt -timeout 120m === RUN TestAccAzureRMVirtualMachine_deleteVHDOptOut --- PASS: TestAccAzureRMVirtualMachine_deleteVHDOptOut (731.54s) === RUN TestAccAzureRMVirtualMachine_deleteVHDOptIn --- PASS: TestAccAzureRMVirtualMachine_deleteVHDOptIn (590.87s) PASS ok github.com/hashicorp/terraform/builtin/providers/azurerm 1322.529s ``` ``` TF_ACC=1 go test ./builtin/providers/azurerm -v -run TestAccAzureRMVirtualMachine_basicLinuxMachine -timeout 120m === RUN TestAccAzureRMVirtualMachine_basicLinuxMachine ^[--- PASS: TestAccAzureRMVirtualMachine_basicLinuxMachine (587.63s) PASS ok github.com/hashicorp/terraform/builtin/providers/azurerm 587.738s ``` --- .../resource_arm_virtual_machine_test.go | 84 +++++++++++++++++-- 1 file changed, 79 insertions(+), 5 deletions(-) diff --git a/builtin/providers/azurerm/resource_arm_virtual_machine_test.go b/builtin/providers/azurerm/resource_arm_virtual_machine_test.go index a37fba2d4..789e6b059 100644 --- a/builtin/providers/azurerm/resource_arm_virtual_machine_test.go +++ b/builtin/providers/azurerm/resource_arm_virtual_machine_test.go @@ -14,11 +14,9 @@ func TestAccAzureRMVirtualMachine_basicLinuxMachine(t *testing.T) { ri := acctest.RandInt() config := fmt.Sprintf(testAccAzureRMVirtualMachine_basicLinuxMachine, ri, ri, ri, ri, ri, ri, ri) resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineDestroy, - ), + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMVirtualMachineDestroy, Steps: []resource.TestStep{ { Config: config, @@ -169,6 +167,52 @@ func TestAccAzureRMVirtualMachine_winRMConfig(t *testing.T) { }) } +func TestAccAzureRMVirtualMachine_deleteVHDOptOut(t *testing.T) { + ri := acctest.RandInt() + preConfig := fmt.Sprintf(testAccAzureRMVirtualMachine_basicLinuxMachine, ri, ri, ri, ri, ri, ri, ri) + postConfig := fmt.Sprintf(testAccAzureRMVirtualMachine_basicLinuxMachineDeleteVM, ri, ri, ri, ri, ri) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMVirtualMachineDestroy, + Steps: []resource.TestStep{ + { + Config: preConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMVirtualMachineExists("azurerm_virtual_machine.test"), + ), + }, + { + Config: postConfig, + Check: testCheckAzureRMVirtualMachineOSDiskVHDExistance(true), + }, + }, + }) +} + +func TestAccAzureRMVirtualMachine_deleteVHDOptIn(t *testing.T) { + ri := acctest.RandInt() + preConfig := fmt.Sprintf(testAccAzureRMVirtualMachine_basicLinuxMachineDestroyOSDisk, ri, ri, ri, ri, ri, ri, ri) + postConfig := fmt.Sprintf(testAccAzureRMVirtualMachine_basicLinuxMachineDeleteVM, ri, ri, ri, ri, ri) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMVirtualMachineDestroy, + Steps: []resource.TestStep{ + { + Config: preConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMVirtualMachineExists("azurerm_virtual_machine.test"), + ), + }, + { + Config: postConfig, + Check: testCheckAzureRMVirtualMachineOSDiskVHDExistance(false), + }, + }, + }) +} + func testCheckAzureRMVirtualMachineExists(name string) resource.TestCheckFunc { return func(s *terraform.State) error { // Ensure we have enough information in state to look up in API @@ -223,6 +267,36 @@ func testCheckAzureRMVirtualMachineDestroy(s *terraform.State) error { return nil } +func testCheckAzureRMVirtualMachineOSDiskVHDExistance(shouldExist bool) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != "azurerm_storage_container" { + continue + } + + // fetch storage account and container name + resourceGroup := rs.Primary.Attributes["resource_group_name"] + storageAccountName := rs.Primary.Attributes["storage_account_name"] + containerName := rs.Primary.Attributes["name"] + storageClient, _, err := testAccProvider.Meta().(*ArmClient).getBlobStorageClientForStorageAccount(resourceGroup, storageAccountName) + if err != nil { + return fmt.Errorf("Error creating Blob storage client: %s", err) + } + + exists, err := storageClient.BlobExists(containerName, "myosdisk1.vhd") + if err != nil { + return fmt.Errorf("Error checking if OS Disk VHD Blob exists: %s", err) + } + + if exists && !shouldExist { + return fmt.Errorf("OS Disk VHD Blob still exists") + } + } + + return nil + } +} + var testAccAzureRMVirtualMachine_basicLinuxMachine = ` resource "azurerm_resource_group" "test" { name = "acctestrg-%d" From 33233be2478575efa04b1a7fc52f9805b6528a8c Mon Sep 17 00:00:00 2001 From: stack72 Date: Thu, 14 Jul 2016 15:48:41 +0100 Subject: [PATCH 0281/1238] provider/azurerm: Change of `azurerm_virtual_machine` computer_name now ForceNew ``` % make testacc TEST=./builtin/providers/azurerm TESTARGS='-run=TestAccAzureRMVirtualMachine_ChangeComputerName' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) TF_ACC=1 go test ./builtin/providers/azurerm -v -run=TestAccAzureRMVirtualMachine_ChangeComputerName -timeout 120m === RUN TestAccAzureRMVirtualMachine_ChangeComputerName --- PASS: TestAccAzureRMVirtualMachine_ChangeComputerName (965.04s) PASS ok github.com/hashicorp/terraform/builtin/providers/azurerm 965.051s ``` --- .../azurerm/resource_arm_virtual_machine.go | 2 +- .../resource_arm_virtual_machine_test.go | 252 +++++++++++++++++- 2 files changed, 241 insertions(+), 13 deletions(-) diff --git a/builtin/providers/azurerm/resource_arm_virtual_machine.go b/builtin/providers/azurerm/resource_arm_virtual_machine.go index 6c390c74f..64d9ac83a 100644 --- a/builtin/providers/azurerm/resource_arm_virtual_machine.go +++ b/builtin/providers/azurerm/resource_arm_virtual_machine.go @@ -215,7 +215,7 @@ func resourceArmVirtualMachine() *schema.Resource { "computer_name": { Type: schema.TypeString, Optional: true, - Computed: true, + ForceNew: true, }, "admin_username": { diff --git a/builtin/providers/azurerm/resource_arm_virtual_machine_test.go b/builtin/providers/azurerm/resource_arm_virtual_machine_test.go index 789e6b059..39a138577 100644 --- a/builtin/providers/azurerm/resource_arm_virtual_machine_test.go +++ b/builtin/providers/azurerm/resource_arm_virtual_machine_test.go @@ -5,12 +5,14 @@ import ( "net/http" "testing" + "github.com/Azure/azure-sdk-for-go/arm/compute" "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" ) func TestAccAzureRMVirtualMachine_basicLinuxMachine(t *testing.T) { + var vm compute.VirtualMachine ri := acctest.RandInt() config := fmt.Sprintf(testAccAzureRMVirtualMachine_basicLinuxMachine, ri, ri, ri, ri, ri, ri, ri) resource.Test(t, resource.TestCase{ @@ -21,7 +23,7 @@ func TestAccAzureRMVirtualMachine_basicLinuxMachine(t *testing.T) { { Config: config, Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineExists("azurerm_virtual_machine.test"), + testCheckAzureRMVirtualMachineExists("azurerm_virtual_machine.test", &vm), ), }, }, @@ -29,6 +31,8 @@ func TestAccAzureRMVirtualMachine_basicLinuxMachine(t *testing.T) { } func TestAccAzureRMVirtualMachine_withDataDisk(t *testing.T) { + var vm compute.VirtualMachine + ri := acctest.RandInt() config := fmt.Sprintf(testAccAzureRMVirtualMachine_withDataDisk, ri, ri, ri, ri, ri, ri, ri) resource.Test(t, resource.TestCase{ @@ -39,7 +43,7 @@ func TestAccAzureRMVirtualMachine_withDataDisk(t *testing.T) { { Config: config, Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineExists("azurerm_virtual_machine.test"), + testCheckAzureRMVirtualMachineExists("azurerm_virtual_machine.test", &vm), ), }, }, @@ -47,6 +51,8 @@ func TestAccAzureRMVirtualMachine_withDataDisk(t *testing.T) { } func TestAccAzureRMVirtualMachine_tags(t *testing.T) { + var vm compute.VirtualMachine + ri := acctest.RandInt() preConfig := fmt.Sprintf(testAccAzureRMVirtualMachine_basicLinuxMachine, ri, ri, ri, ri, ri, ri, ri) postConfig := fmt.Sprintf(testAccAzureRMVirtualMachine_basicLinuxMachineUpdated, ri, ri, ri, ri, ri, ri, ri) @@ -58,7 +64,7 @@ func TestAccAzureRMVirtualMachine_tags(t *testing.T) { { Config: preConfig, Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineExists("azurerm_virtual_machine.test"), + testCheckAzureRMVirtualMachineExists("azurerm_virtual_machine.test", &vm), resource.TestCheckResourceAttr( "azurerm_virtual_machine.test", "tags.%", "2"), resource.TestCheckResourceAttr( @@ -71,7 +77,7 @@ func TestAccAzureRMVirtualMachine_tags(t *testing.T) { { Config: postConfig, Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineExists("azurerm_virtual_machine.test"), + testCheckAzureRMVirtualMachineExists("azurerm_virtual_machine.test", &vm), resource.TestCheckResourceAttr( "azurerm_virtual_machine.test", "tags.%", "1"), resource.TestCheckResourceAttr( @@ -85,6 +91,8 @@ func TestAccAzureRMVirtualMachine_tags(t *testing.T) { //This is a regression test around https://github.com/hashicorp/terraform/issues/6517 //Because we use CreateOrUpdate, we were sending an empty password on update requests func TestAccAzureRMVirtualMachine_updateMachineSize(t *testing.T) { + var vm compute.VirtualMachine + ri := acctest.RandInt() preConfig := fmt.Sprintf(testAccAzureRMVirtualMachine_basicLinuxMachine, ri, ri, ri, ri, ri, ri, ri) postConfig := fmt.Sprintf(testAccAzureRMVirtualMachine_updatedLinuxMachine, ri, ri, ri, ri, ri, ri, ri) @@ -96,7 +104,7 @@ func TestAccAzureRMVirtualMachine_updateMachineSize(t *testing.T) { { Config: preConfig, Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineExists("azurerm_virtual_machine.test"), + testCheckAzureRMVirtualMachineExists("azurerm_virtual_machine.test", &vm), resource.TestCheckResourceAttr( "azurerm_virtual_machine.test", "vm_size", "Standard_A0"), ), @@ -104,7 +112,7 @@ func TestAccAzureRMVirtualMachine_updateMachineSize(t *testing.T) { { Config: postConfig, Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineExists("azurerm_virtual_machine.test"), + testCheckAzureRMVirtualMachineExists("azurerm_virtual_machine.test", &vm), resource.TestCheckResourceAttr( "azurerm_virtual_machine.test", "vm_size", "Standard_A1"), ), @@ -114,6 +122,7 @@ func TestAccAzureRMVirtualMachine_updateMachineSize(t *testing.T) { } func TestAccAzureRMVirtualMachine_basicWindowsMachine(t *testing.T) { + var vm compute.VirtualMachine ri := acctest.RandInt() config := fmt.Sprintf(testAccAzureRMVirtualMachine_basicWindowsMachine, ri, ri, ri, ri, ri, ri) resource.Test(t, resource.TestCase{ @@ -124,7 +133,7 @@ func TestAccAzureRMVirtualMachine_basicWindowsMachine(t *testing.T) { { Config: config, Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineExists("azurerm_virtual_machine.test"), + testCheckAzureRMVirtualMachineExists("azurerm_virtual_machine.test", &vm), ), }, }, @@ -132,6 +141,7 @@ func TestAccAzureRMVirtualMachine_basicWindowsMachine(t *testing.T) { } func TestAccAzureRMVirtualMachine_windowsUnattendedConfig(t *testing.T) { + var vm compute.VirtualMachine ri := acctest.RandInt() config := fmt.Sprintf(testAccAzureRMVirtualMachine_windowsUnattendedConfig, ri, ri, ri, ri, ri, ri) resource.Test(t, resource.TestCase{ @@ -142,7 +152,7 @@ func TestAccAzureRMVirtualMachine_windowsUnattendedConfig(t *testing.T) { { Config: config, Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineExists("azurerm_virtual_machine.test"), + testCheckAzureRMVirtualMachineExists("azurerm_virtual_machine.test", &vm), ), }, }, @@ -150,6 +160,7 @@ func TestAccAzureRMVirtualMachine_windowsUnattendedConfig(t *testing.T) { } func TestAccAzureRMVirtualMachine_winRMConfig(t *testing.T) { + var vm compute.VirtualMachine ri := acctest.RandInt() config := fmt.Sprintf(testAccAzureRMVirtualMachine_winRMConfig, ri, ri, ri, ri, ri, ri) resource.Test(t, resource.TestCase{ @@ -160,7 +171,7 @@ func TestAccAzureRMVirtualMachine_winRMConfig(t *testing.T) { { Config: config, Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineExists("azurerm_virtual_machine.test"), + testCheckAzureRMVirtualMachineExists("azurerm_virtual_machine.test", &vm), ), }, }, @@ -168,6 +179,7 @@ func TestAccAzureRMVirtualMachine_winRMConfig(t *testing.T) { } func TestAccAzureRMVirtualMachine_deleteVHDOptOut(t *testing.T) { + var vm compute.VirtualMachine ri := acctest.RandInt() preConfig := fmt.Sprintf(testAccAzureRMVirtualMachine_basicLinuxMachine, ri, ri, ri, ri, ri, ri, ri) postConfig := fmt.Sprintf(testAccAzureRMVirtualMachine_basicLinuxMachineDeleteVM, ri, ri, ri, ri, ri) @@ -179,7 +191,7 @@ func TestAccAzureRMVirtualMachine_deleteVHDOptOut(t *testing.T) { { Config: preConfig, Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineExists("azurerm_virtual_machine.test"), + testCheckAzureRMVirtualMachineExists("azurerm_virtual_machine.test", &vm), ), }, { @@ -191,6 +203,7 @@ func TestAccAzureRMVirtualMachine_deleteVHDOptOut(t *testing.T) { } func TestAccAzureRMVirtualMachine_deleteVHDOptIn(t *testing.T) { + var vm compute.VirtualMachine ri := acctest.RandInt() preConfig := fmt.Sprintf(testAccAzureRMVirtualMachine_basicLinuxMachineDestroyOSDisk, ri, ri, ri, ri, ri, ri, ri) postConfig := fmt.Sprintf(testAccAzureRMVirtualMachine_basicLinuxMachineDeleteVM, ri, ri, ri, ri, ri) @@ -202,7 +215,7 @@ func TestAccAzureRMVirtualMachine_deleteVHDOptIn(t *testing.T) { { Config: preConfig, Check: resource.ComposeTestCheckFunc( - testCheckAzureRMVirtualMachineExists("azurerm_virtual_machine.test"), + testCheckAzureRMVirtualMachineExists("azurerm_virtual_machine.test", &vm), ), }, { @@ -213,7 +226,37 @@ func TestAccAzureRMVirtualMachine_deleteVHDOptIn(t *testing.T) { }) } -func testCheckAzureRMVirtualMachineExists(name string) resource.TestCheckFunc { +func TestAccAzureRMVirtualMachine_ChangeComputerName(t *testing.T) { + var afterCreate, afterUpdate compute.VirtualMachine + + ri := acctest.RandInt() + preConfig := fmt.Sprintf(testAccAzureRMVirtualMachine_machineNameBeforeUpdate, ri, ri, ri, ri, ri, ri, ri) + postConfig := fmt.Sprintf(testAccAzureRMVirtualMachine_updateMachineName, ri, ri, ri, ri, ri, ri, ri) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMVirtualMachineDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: preConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMVirtualMachineExists("azurerm_virtual_machine.test", &afterCreate), + ), + }, + + resource.TestStep{ + Config: postConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMVirtualMachineExists("azurerm_virtual_machine.test", &afterUpdate), + testAccCheckVirtualMachineRecreated( + t, &afterCreate, &afterUpdate), + ), + }, + }, + }) +} + +func testCheckAzureRMVirtualMachineExists(name string, vm *compute.VirtualMachine) resource.TestCheckFunc { return func(s *terraform.State) error { // Ensure we have enough information in state to look up in API rs, ok := s.RootModule().Resources[name] @@ -238,6 +281,18 @@ func testCheckAzureRMVirtualMachineExists(name string) resource.TestCheckFunc { return fmt.Errorf("Bad: VirtualMachine %q (resource group: %q) does not exist", vmName, resourceGroup) } + *vm = resp + + return nil + } +} + +func testAccCheckVirtualMachineRecreated(t *testing.T, + before, after *compute.VirtualMachine) resource.TestCheckFunc { + return func(s *terraform.State) error { + if before.ID == after.ID { + t.Fatalf("Expected change of Virtual Machine IDs, but both were %v", before.ID) + } return nil } } @@ -385,6 +440,95 @@ resource "azurerm_virtual_machine" "test" { } ` +var testAccAzureRMVirtualMachine_machineNameBeforeUpdate = ` +resource "azurerm_resource_group" "test" { + name = "acctestrg-%d" + location = "West US" +} + +resource "azurerm_virtual_network" "test" { + name = "acctvn-%d" + address_space = ["10.0.0.0/16"] + location = "West US" + resource_group_name = "${azurerm_resource_group.test.name}" +} + +resource "azurerm_subnet" "test" { + name = "acctsub-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test.name}" + address_prefix = "10.0.2.0/24" +} + +resource "azurerm_network_interface" "test" { + name = "acctni-%d" + location = "West US" + resource_group_name = "${azurerm_resource_group.test.name}" + + ip_configuration { + name = "testconfiguration1" + subnet_id = "${azurerm_subnet.test.id}" + private_ip_address_allocation = "dynamic" + } +} + +resource "azurerm_storage_account" "test" { + name = "accsa%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "westus" + account_type = "Standard_LRS" + + tags { + environment = "staging" + } +} + +resource "azurerm_storage_container" "test" { + name = "vhds" + resource_group_name = "${azurerm_resource_group.test.name}" + storage_account_name = "${azurerm_storage_account.test.name}" + container_access_type = "private" +} + +resource "azurerm_virtual_machine" "test" { + name = "acctvm-%d" + location = "West US" + resource_group_name = "${azurerm_resource_group.test.name}" + network_interface_ids = ["${azurerm_network_interface.test.id}"] + vm_size = "Standard_A0" + delete_os_disk_on_termination = true + + storage_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "14.04.2-LTS" + version = "latest" + } + + storage_os_disk { + name = "myosdisk1" + vhd_uri = "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}/myosdisk1.vhd" + caching = "ReadWrite" + create_option = "FromImage" + } + + os_profile { + computer_name = "hostname%d" + admin_username = "testadmin" + admin_password = "Password1234!" + } + + os_profile_linux_config { + disable_password_authentication = false + } + + tags { + environment = "Production" + cost-center = "Ops" + } +} +` + var testAccAzureRMVirtualMachine_basicLinuxMachineDestroyOSDisk = ` resource "azurerm_resource_group" "test" { name = "acctestrg-%d" @@ -1049,3 +1193,87 @@ resource "azurerm_virtual_machine" "test" { } } ` + +var testAccAzureRMVirtualMachine_updateMachineName = ` + resource "azurerm_resource_group" "test" { + name = "acctestrg-%d" + location = "West US" + } + + resource "azurerm_virtual_network" "test" { + name = "acctvn-%d" + address_space = ["10.0.0.0/16"] + location = "West US" + resource_group_name = "${azurerm_resource_group.test.name}" + } + + resource "azurerm_subnet" "test" { + name = "acctsub-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test.name}" + address_prefix = "10.0.2.0/24" + } + + resource "azurerm_network_interface" "test" { + name = "acctni-%d" + location = "West US" + resource_group_name = "${azurerm_resource_group.test.name}" + + ip_configuration { + name = "testconfiguration1" + subnet_id = "${azurerm_subnet.test.id}" + private_ip_address_allocation = "dynamic" + } + } + + resource "azurerm_storage_account" "test" { + name = "accsa%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "westus" + account_type = "Standard_LRS" + + tags { + environment = "staging" + } + } + + resource "azurerm_storage_container" "test" { + name = "vhds" + resource_group_name = "${azurerm_resource_group.test.name}" + storage_account_name = "${azurerm_storage_account.test.name}" + container_access_type = "private" + } + + resource "azurerm_virtual_machine" "test" { + name = "acctvm-%d" + location = "West US" + resource_group_name = "${azurerm_resource_group.test.name}" + network_interface_ids = ["${azurerm_network_interface.test.id}"] + vm_size = "Standard_A0" + delete_os_disk_on_termination = true + + storage_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "14.04.2-LTS" + version = "latest" + } + + storage_os_disk { + name = "myosdisk1" + vhd_uri = "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}/myosdisk1.vhd" + caching = "ReadWrite" + create_option = "FromImage" + } + + os_profile { + computer_name = "newhostname%d" + admin_username = "testadmin" + admin_password = "Password1234!" + } + + os_profile_linux_config { + disable_password_authentication = false + } + } + ` From 0fde61b9ab4574f52975196c65f9584f22b63126 Mon Sep 17 00:00:00 2001 From: Peter McAtominey Date: Thu, 14 Jul 2016 16:06:58 +0100 Subject: [PATCH 0282/1238] provider/azurerm: catch azurerm_template_deployment errors (#7644) The error was ignored causing Terraform to report that the deployments was successful rather than in a bad state. This commit cause the apply operation to report the error. Added a test which attempts to create a storage account with a name longer than the maximum permitted length to force a failure. ``` TF_ACC=1 go test ./builtin/providers/azurerm -v -run TestAccAzureRMTemplateDeployment_ -timeout 120m === RUN TestAccAzureRMTemplateDeployment_basic --- PASS: TestAccAzureRMTemplateDeployment_basic (377.78s) === RUN TestAccAzureRMTemplateDeployment_withParams --- PASS: TestAccAzureRMTemplateDeployment_withParams (327.89s) === RUN TestAccAzureRMTemplateDeployment_withError --- PASS: TestAccAzureRMTemplateDeployment_withError (226.64s) PASS ok github.com/hashicorp/terraform/builtin/providers/azurerm 932.440s ``` --- .../resource_arm_template_deployment.go | 2 +- .../resource_arm_template_deployment_test.go | 80 +++++++++++++++++++ 2 files changed, 81 insertions(+), 1 deletion(-) diff --git a/builtin/providers/azurerm/resource_arm_template_deployment.go b/builtin/providers/azurerm/resource_arm_template_deployment.go index 79f400715..5eb37cd70 100644 --- a/builtin/providers/azurerm/resource_arm_template_deployment.go +++ b/builtin/providers/azurerm/resource_arm_template_deployment.go @@ -101,7 +101,7 @@ func resourceArmTemplateDeploymentCreate(d *schema.ResourceData, meta interface{ _, err := deployClient.CreateOrUpdate(resGroup, name, deployment, make(chan struct{})) if err != nil { - return nil + return fmt.Errorf("Error creating deployment: %s", err) } read, err := deployClient.Get(resGroup, name) diff --git a/builtin/providers/azurerm/resource_arm_template_deployment_test.go b/builtin/providers/azurerm/resource_arm_template_deployment_test.go index 99314628b..efb64ce0c 100644 --- a/builtin/providers/azurerm/resource_arm_template_deployment_test.go +++ b/builtin/providers/azurerm/resource_arm_template_deployment_test.go @@ -3,6 +3,7 @@ package azurerm import ( "fmt" "net/http" + "regexp" "testing" "github.com/hashicorp/terraform/helper/acctest" @@ -47,6 +48,22 @@ func TestAccAzureRMTemplateDeployment_withParams(t *testing.T) { }) } +func TestAccAzureRMTemplateDeployment_withError(t *testing.T) { + ri := acctest.RandInt() + config := fmt.Sprintf(testAccAzureRMTemplateDeployment_withError, ri, ri) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMTemplateDeploymentDestroy, + Steps: []resource.TestStep{ + { + Config: config, + ExpectError: regexp.MustCompile("The deployment operation failed"), + }, + }, + }) +} + func testCheckAzureRMTemplateDeploymentExists(name string) resource.TestCheckFunc { return func(s *terraform.State) error { // Ensure we have enough information in state to look up in API @@ -249,3 +266,66 @@ DEPLOY } ` + +// StorageAccount name is too long, forces error +var testAccAzureRMTemplateDeployment_withError = ` + resource "azurerm_resource_group" "test" { + name = "acctestrg-%d" + location = "West US" + } + + output "test" { + value = "${azurerm_template_deployment.test.outputs.testOutput}" + } + + resource "azurerm_template_deployment" "test" { + name = "acctesttemplate-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + template_body = < Date: Thu, 14 Jul 2016 16:07:38 +0100 Subject: [PATCH 0283/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3caa03094..159b9c572 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -204,6 +204,7 @@ BUG FIXES: * provider/azurerm: `azurerm_network_security_group` now waits for the provisioning state of `ready` before proceeding [GH-7307] * provider/azurerm: `computer_name` is now required for `azurerm_virtual_machine` resources [GH-7308] * provider/azurerm: destroy azurerm_virtual_machine OS Disk VHD on deletion [GH-7584] + * provider/azurerm: catch `azurerm_template_deployment` erroring silently [GH-7644] * provider/cloudflare: Fix issue upgrading CloudFlare Records created before v0.6.15 [GH-6969] * provider/cloudstack: Fix using `cloudstack_network_acl` within a project [GH-6743] * provider/digitalocean: Stop `digitocean_droplet` forcing new resource on uppercase region [GH-7044] From 340655d56c9db942ae05d0b0d562a58604b36fa5 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Thu, 14 Jul 2016 12:01:45 -0600 Subject: [PATCH 0284/1238] core: Allow "." character in map keys Fixes #2143 and fixes #7130. --- terraform/resource.go | 30 +++++++++++++++++++++--------- 1 file changed, 21 insertions(+), 9 deletions(-) diff --git a/terraform/resource.go b/terraform/resource.go index 7d22a6e9b..1d146799e 100644 --- a/terraform/resource.go +++ b/terraform/resource.go @@ -48,14 +48,6 @@ type Resource struct { // its a primary instance, a tainted instance, or an orphan. type ResourceFlag byte -const ( - FlagPrimary ResourceFlag = 1 << iota - FlagTainted - FlagOrphan - FlagReplacePrimary - FlagDeposed -) - // InstanceInfo is used to hold information about the instance and/or // resource being modified. type InstanceInfo struct { @@ -180,7 +172,8 @@ func (c *ResourceConfig) get( } var current interface{} = raw - for _, part := range parts { + var previous interface{} = nil + for i, part := range parts { if current == nil { return nil, false } @@ -188,12 +181,23 @@ func (c *ResourceConfig) get( cv := reflect.ValueOf(current) switch cv.Kind() { case reflect.Map: + previous = current v := cv.MapIndex(reflect.ValueOf(part)) if !v.IsValid() { + if i > 0 && i != (len(parts)-1) { + tryKey := strings.Join(parts[i:], ".") + v := cv.MapIndex(reflect.ValueOf(tryKey)) + if !v.IsValid() { + return nil, false + } + return v.Interface(), true + } + return nil, false } current = v.Interface() case reflect.Slice: + previous = current if part == "#" { current = cv.Len() } else { @@ -206,6 +210,14 @@ func (c *ResourceConfig) get( } current = cv.Index(int(i)).Interface() } + case reflect.String: + // This happens when map keys contain "." and have a common + // prefix so were split as path components above. + actualKey := strings.Join(parts[i-1:], ".") + if prevMap, ok := previous.(map[string]interface{}); ok { + return prevMap[actualKey], true + } + return nil, false default: panic(fmt.Sprintf("Unknown kind: %s", cv.Kind())) } From 5fc1b6870a2bd0223281b650b6f93add24ba7fb5 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Thu, 14 Jul 2016 13:32:00 -0600 Subject: [PATCH 0285/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 159b9c572..d36b889a0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -205,6 +205,7 @@ BUG FIXES: * provider/azurerm: `computer_name` is now required for `azurerm_virtual_machine` resources [GH-7308] * provider/azurerm: destroy azurerm_virtual_machine OS Disk VHD on deletion [GH-7584] * provider/azurerm: catch `azurerm_template_deployment` erroring silently [GH-7644] + * provider/azurerm: changing the name of an `azurerm_virtual_machine` now forces a new resource [GH-7646] * provider/cloudflare: Fix issue upgrading CloudFlare Records created before v0.6.15 [GH-6969] * provider/cloudstack: Fix using `cloudstack_network_acl` within a project [GH-6743] * provider/digitalocean: Stop `digitocean_droplet` forcing new resource on uppercase region [GH-7044] From bc5a8b827f9547c5ec247d2f517bf5c51657177e Mon Sep 17 00:00:00 2001 From: Joe Topjian Date: Fri, 15 Jul 2016 02:47:11 -0600 Subject: [PATCH 0286/1238] provider/openstack: Support Import of OpenStack Networking Resources (#7661) Router-based resources are not included. They will be added later. --- ...openstack_networking_floatingip_v2_test.go | 29 ++++++ ...rt_openstack_networking_network_v2_test.go | 29 ++++++ ...mport_openstack_networking_port_v2_test.go | 29 ++++++ ...nstack_networking_secgroup_rule_v2_test.go | 29 ++++++ ...t_openstack_networking_secgroup_v2_test.go | 29 ++++++ ...ort_openstack_networking_subnet_v2_test.go | 29 ++++++ ...urce_openstack_networking_floatingip_v2.go | 3 + ...esource_openstack_networking_network_v2.go | 3 + .../resource_openstack_networking_port_v2.go | 3 + ...ource_openstack_networking_port_v2_test.go | 97 ++++++++----------- ...e_openstack_networking_secgroup_rule_v2.go | 6 ++ ...source_openstack_networking_secgroup_v2.go | 4 + ...resource_openstack_networking_subnet_v2.go | 5 + 13 files changed, 241 insertions(+), 54 deletions(-) create mode 100644 builtin/providers/openstack/import_openstack_networking_floatingip_v2_test.go create mode 100644 builtin/providers/openstack/import_openstack_networking_network_v2_test.go create mode 100644 builtin/providers/openstack/import_openstack_networking_port_v2_test.go create mode 100644 builtin/providers/openstack/import_openstack_networking_secgroup_rule_v2_test.go create mode 100644 builtin/providers/openstack/import_openstack_networking_secgroup_v2_test.go create mode 100644 builtin/providers/openstack/import_openstack_networking_subnet_v2_test.go diff --git a/builtin/providers/openstack/import_openstack_networking_floatingip_v2_test.go b/builtin/providers/openstack/import_openstack_networking_floatingip_v2_test.go new file mode 100644 index 000000000..8ce6fec97 --- /dev/null +++ b/builtin/providers/openstack/import_openstack_networking_floatingip_v2_test.go @@ -0,0 +1,29 @@ +package openstack + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOpenStackNetworkingFloatingIPV2_importBasic(t *testing.T) { + resourceName := "openstack_networking_floatingip_v2.foo" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNetworkingV2FloatingIPDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccNetworkingV2FloatingIP_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"region"}, + }, + }, + }) +} diff --git a/builtin/providers/openstack/import_openstack_networking_network_v2_test.go b/builtin/providers/openstack/import_openstack_networking_network_v2_test.go new file mode 100644 index 000000000..4c6faab82 --- /dev/null +++ b/builtin/providers/openstack/import_openstack_networking_network_v2_test.go @@ -0,0 +1,29 @@ +package openstack + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOpenStackNetworkingNetworkV2_importBasic(t *testing.T) { + resourceName := "openstack_networking_network_v2.foo" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNetworkingV2NetworkDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccNetworkingV2Network_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"region"}, + }, + }, + }) +} diff --git a/builtin/providers/openstack/import_openstack_networking_port_v2_test.go b/builtin/providers/openstack/import_openstack_networking_port_v2_test.go new file mode 100644 index 000000000..55c21e128 --- /dev/null +++ b/builtin/providers/openstack/import_openstack_networking_port_v2_test.go @@ -0,0 +1,29 @@ +package openstack + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOpenStackNetworkingPortV2_importBasic(t *testing.T) { + resourceName := "openstack_networking_port_v2.foo" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNetworkingV2PortDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccNetworkingV2Port_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"region"}, + }, + }, + }) +} diff --git a/builtin/providers/openstack/import_openstack_networking_secgroup_rule_v2_test.go b/builtin/providers/openstack/import_openstack_networking_secgroup_rule_v2_test.go new file mode 100644 index 000000000..8c3726b53 --- /dev/null +++ b/builtin/providers/openstack/import_openstack_networking_secgroup_rule_v2_test.go @@ -0,0 +1,29 @@ +package openstack + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOpenStackNetworkingSecGroupRuleV2_importBasic(t *testing.T) { + resourceName := "openstack_networking_secgroup_rule_v2.sr_foo" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNetworkingV2SecGroupRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccNetworkingV2SecGroupRule_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"region"}, + }, + }, + }) +} diff --git a/builtin/providers/openstack/import_openstack_networking_secgroup_v2_test.go b/builtin/providers/openstack/import_openstack_networking_secgroup_v2_test.go new file mode 100644 index 000000000..b5afdc01f --- /dev/null +++ b/builtin/providers/openstack/import_openstack_networking_secgroup_v2_test.go @@ -0,0 +1,29 @@ +package openstack + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOpenStackNetworkingSecGroupV2_importBasic(t *testing.T) { + resourceName := "openstack_networking_secgroup_v2.foo" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNetworkingV2SecGroupDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccNetworkingV2SecGroup_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"region"}, + }, + }, + }) +} diff --git a/builtin/providers/openstack/import_openstack_networking_subnet_v2_test.go b/builtin/providers/openstack/import_openstack_networking_subnet_v2_test.go new file mode 100644 index 000000000..87d30f200 --- /dev/null +++ b/builtin/providers/openstack/import_openstack_networking_subnet_v2_test.go @@ -0,0 +1,29 @@ +package openstack + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOpenStackNetworkingSubnetV2_importBasic(t *testing.T) { + resourceName := "openstack_networking_subnet_v2.subnet_1" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNetworkingV2SubnetDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccNetworkingV2Subnet_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"region"}, + }, + }, + }) +} diff --git a/builtin/providers/openstack/resource_openstack_networking_floatingip_v2.go b/builtin/providers/openstack/resource_openstack_networking_floatingip_v2.go index 06af02e31..f08371970 100644 --- a/builtin/providers/openstack/resource_openstack_networking_floatingip_v2.go +++ b/builtin/providers/openstack/resource_openstack_networking_floatingip_v2.go @@ -20,6 +20,9 @@ func resourceNetworkingFloatingIPV2() *schema.Resource { Read: resourceNetworkFloatingIPV2Read, Update: resourceNetworkFloatingIPV2Update, Delete: resourceNetworkFloatingIPV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "region": &schema.Schema{ diff --git a/builtin/providers/openstack/resource_openstack_networking_network_v2.go b/builtin/providers/openstack/resource_openstack_networking_network_v2.go index 4c3f4da17..8fbd832c7 100644 --- a/builtin/providers/openstack/resource_openstack_networking_network_v2.go +++ b/builtin/providers/openstack/resource_openstack_networking_network_v2.go @@ -19,6 +19,9 @@ func resourceNetworkingNetworkV2() *schema.Resource { Read: resourceNetworkingNetworkV2Read, Update: resourceNetworkingNetworkV2Update, Delete: resourceNetworkingNetworkV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "region": &schema.Schema{ diff --git a/builtin/providers/openstack/resource_openstack_networking_port_v2.go b/builtin/providers/openstack/resource_openstack_networking_port_v2.go index 73fa8da45..39ea5aaad 100644 --- a/builtin/providers/openstack/resource_openstack_networking_port_v2.go +++ b/builtin/providers/openstack/resource_openstack_networking_port_v2.go @@ -18,6 +18,9 @@ func resourceNetworkingPortV2() *schema.Resource { Read: resourceNetworkingPortV2Read, Update: resourceNetworkingPortV2Update, Delete: resourceNetworkingPortV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "region": &schema.Schema{ diff --git a/builtin/providers/openstack/resource_openstack_networking_port_v2_test.go b/builtin/providers/openstack/resource_openstack_networking_port_v2_test.go index aa90e190d..1bb79e7f4 100644 --- a/builtin/providers/openstack/resource_openstack_networking_port_v2_test.go +++ b/builtin/providers/openstack/resource_openstack_networking_port_v2_test.go @@ -2,7 +2,6 @@ package openstack import ( "fmt" - "os" "testing" "github.com/hashicorp/terraform/helper/resource" @@ -14,38 +13,10 @@ import ( ) func TestAccNetworkingV2Port_basic(t *testing.T) { - region := os.Getenv(OS_REGION_NAME) - var network networks.Network var port ports.Port var subnet subnets.Subnet - var testAccNetworkingV2Port_basic = fmt.Sprintf(` - resource "openstack_networking_network_v2" "foo" { - region = "%s" - name = "network_1" - admin_state_up = "true" - } - - resource "openstack_networking_subnet_v2" "foo" { - region = "%s" - name = "subnet_1" - network_id = "${openstack_networking_network_v2.foo.id}" - cidr = "192.168.199.0/24" - ip_version = 4 - } - - resource "openstack_networking_port_v2" "foo" { - region = "%s" - name = "port_1" - network_id = "${openstack_networking_network_v2.foo.id}" - admin_state_up = "true" - fixed_ip { - subnet_id = "${openstack_networking_subnet_v2.foo.id}" - ip_address = "192.168.199.23" - } - }`, region, region, region) - resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -64,35 +35,10 @@ func TestAccNetworkingV2Port_basic(t *testing.T) { } func TestAccNetworkingV2Port_noip(t *testing.T) { - region := os.Getenv(OS_REGION_NAME) - var network networks.Network var port ports.Port var subnet subnets.Subnet - var testAccNetworkingV2Port_noip = fmt.Sprintf(` - resource "openstack_networking_network_v2" "foo" { - region = "%s" - name = "network_1" - admin_state_up = "true" - } - resource "openstack_networking_subnet_v2" "foo" { - region = "%s" - name = "subnet_1" - network_id = "${openstack_networking_network_v2.foo.id}" - cidr = "192.168.199.0/24" - ip_version = 4 - } - resource "openstack_networking_port_v2" "foo" { - region = "%s" - name = "port_1" - network_id = "${openstack_networking_network_v2.foo.id}" - admin_state_up = "true" - fixed_ip { - subnet_id = "${openstack_networking_subnet_v2.foo.id}" - } - }`, region, region, region) - resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -162,3 +108,46 @@ func testAccCheckNetworkingV2PortExists(t *testing.T, n string, port *ports.Port return nil } } + +var testAccNetworkingV2Port_basic = fmt.Sprintf(` + resource "openstack_networking_network_v2" "foo" { + name = "network_1" + admin_state_up = "true" + } + + resource "openstack_networking_subnet_v2" "foo" { + name = "subnet_1" + network_id = "${openstack_networking_network_v2.foo.id}" + cidr = "192.168.199.0/24" + ip_version = 4 + } + + resource "openstack_networking_port_v2" "foo" { + name = "port_1" + network_id = "${openstack_networking_network_v2.foo.id}" + admin_state_up = "true" + fixed_ip { + subnet_id = "${openstack_networking_subnet_v2.foo.id}" + ip_address = "192.168.199.23" + } + }`) + +var testAccNetworkingV2Port_noip = fmt.Sprintf(` + resource "openstack_networking_network_v2" "foo" { + name = "network_1" + admin_state_up = "true" + } + resource "openstack_networking_subnet_v2" "foo" { + name = "subnet_1" + network_id = "${openstack_networking_network_v2.foo.id}" + cidr = "192.168.199.0/24" + ip_version = 4 + } + resource "openstack_networking_port_v2" "foo" { + name = "port_1" + network_id = "${openstack_networking_network_v2.foo.id}" + admin_state_up = "true" + fixed_ip { + subnet_id = "${openstack_networking_subnet_v2.foo.id}" + } + }`) diff --git a/builtin/providers/openstack/resource_openstack_networking_secgroup_rule_v2.go b/builtin/providers/openstack/resource_openstack_networking_secgroup_rule_v2.go index 929b839f0..396d836b8 100644 --- a/builtin/providers/openstack/resource_openstack_networking_secgroup_rule_v2.go +++ b/builtin/providers/openstack/resource_openstack_networking_secgroup_rule_v2.go @@ -18,6 +18,9 @@ func resourceNetworkingSecGroupRuleV2() *schema.Resource { Create: resourceNetworkingSecGroupRuleV2Create, Read: resourceNetworkingSecGroupRuleV2Read, Delete: resourceNetworkingSecGroupRuleV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "region": &schema.Schema{ @@ -143,11 +146,14 @@ func resourceNetworkingSecGroupRuleV2Read(d *schema.ResourceData, meta interface return CheckDeleted(d, err, "OpenStack Security Group Rule") } + d.Set("direction", security_group_rule.Direction) + d.Set("ethertype", security_group_rule.EtherType) d.Set("protocol", security_group_rule.Protocol) d.Set("port_range_min", security_group_rule.PortRangeMin) d.Set("port_range_max", security_group_rule.PortRangeMax) d.Set("remote_group_id", security_group_rule.RemoteGroupID) d.Set("remote_ip_prefix", security_group_rule.RemoteIPPrefix) + d.Set("security_group_id", security_group_rule.SecGroupID) d.Set("tenant_id", security_group_rule.TenantID) return nil } diff --git a/builtin/providers/openstack/resource_openstack_networking_secgroup_v2.go b/builtin/providers/openstack/resource_openstack_networking_secgroup_v2.go index f08e9affc..26ddca40d 100644 --- a/builtin/providers/openstack/resource_openstack_networking_secgroup_v2.go +++ b/builtin/providers/openstack/resource_openstack_networking_secgroup_v2.go @@ -17,6 +17,9 @@ func resourceNetworkingSecGroupV2() *schema.Resource { Create: resourceNetworkingSecGroupV2Create, Read: resourceNetworkingSecGroupV2Read, Delete: resourceNetworkingSecGroupV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "region": &schema.Schema{ @@ -91,6 +94,7 @@ func resourceNetworkingSecGroupV2Read(d *schema.ResourceData, meta interface{}) d.Set("description", security_group.Description) d.Set("tenant_id", security_group.TenantID) + d.Set("name", security_group.Name) return nil } diff --git a/builtin/providers/openstack/resource_openstack_networking_subnet_v2.go b/builtin/providers/openstack/resource_openstack_networking_subnet_v2.go index ee119313d..2ef42c78c 100644 --- a/builtin/providers/openstack/resource_openstack_networking_subnet_v2.go +++ b/builtin/providers/openstack/resource_openstack_networking_subnet_v2.go @@ -18,6 +18,9 @@ func resourceNetworkingSubnetV2() *schema.Resource { Read: resourceNetworkingSubnetV2Read, Update: resourceNetworkingSubnetV2Update, Delete: resourceNetworkingSubnetV2Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "region": &schema.Schema{ @@ -190,6 +193,8 @@ func resourceNetworkingSubnetV2Read(d *schema.ResourceData, meta interface{}) er d.Set("gateway_ip", s.GatewayIP) d.Set("dns_nameservers", s.DNSNameservers) d.Set("host_routes", s.HostRoutes) + d.Set("enable_dhcp", s.EnableDHCP) + d.Set("network_id", s.NetworkID) return nil } From f7da2612946fed4a8a593d1bafc368a6c0d87a2f Mon Sep 17 00:00:00 2001 From: Joe Topjian Date: Fri, 15 Jul 2016 02:49:31 -0600 Subject: [PATCH 0287/1238] provider/openstack: Support Import of OpenStack LBaaS V1 Resources (#7660) --- .../import_openstack_lb_member_v1_test.go | 29 +++++++++++++++++++ .../import_openstack_lb_monitor_v1_test.go | 29 +++++++++++++++++++ .../import_openstack_lb_pool_v1_test.go | 29 +++++++++++++++++++ .../import_openstack_lb_vip_v1_test.go | 29 +++++++++++++++++++ .../resource_openstack_lb_member_v1.go | 6 ++++ .../resource_openstack_lb_monitor_v1.go | 3 ++ .../resource_openstack_lb_pool_v1.go | 3 ++ .../openstack/resource_openstack_lb_vip_v1.go | 13 +++++++++ 8 files changed, 141 insertions(+) create mode 100644 builtin/providers/openstack/import_openstack_lb_member_v1_test.go create mode 100644 builtin/providers/openstack/import_openstack_lb_monitor_v1_test.go create mode 100644 builtin/providers/openstack/import_openstack_lb_pool_v1_test.go create mode 100644 builtin/providers/openstack/import_openstack_lb_vip_v1_test.go diff --git a/builtin/providers/openstack/import_openstack_lb_member_v1_test.go b/builtin/providers/openstack/import_openstack_lb_member_v1_test.go new file mode 100644 index 000000000..5d5572561 --- /dev/null +++ b/builtin/providers/openstack/import_openstack_lb_member_v1_test.go @@ -0,0 +1,29 @@ +package openstack + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOpenStackLBMemberV1_importBasic(t *testing.T) { + resourceName := "openstack_lb_member_v1.member_1" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckLBV1MemberDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccLBV1Member_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"region"}, + }, + }, + }) +} diff --git a/builtin/providers/openstack/import_openstack_lb_monitor_v1_test.go b/builtin/providers/openstack/import_openstack_lb_monitor_v1_test.go new file mode 100644 index 000000000..a6cf8a319 --- /dev/null +++ b/builtin/providers/openstack/import_openstack_lb_monitor_v1_test.go @@ -0,0 +1,29 @@ +package openstack + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOpenStackLBMonitorV1_importBasic(t *testing.T) { + resourceName := "openstack_lb_monitor_v1.monitor_1" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckLBV1MonitorDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccLBV1Monitor_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"region"}, + }, + }, + }) +} diff --git a/builtin/providers/openstack/import_openstack_lb_pool_v1_test.go b/builtin/providers/openstack/import_openstack_lb_pool_v1_test.go new file mode 100644 index 000000000..5df55443f --- /dev/null +++ b/builtin/providers/openstack/import_openstack_lb_pool_v1_test.go @@ -0,0 +1,29 @@ +package openstack + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOpenStackLBPoolV1_importBasic(t *testing.T) { + resourceName := "openstack_lb_pool_v1.pool_1" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckLBV1PoolDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccLBV1Pool_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"region"}, + }, + }, + }) +} diff --git a/builtin/providers/openstack/import_openstack_lb_vip_v1_test.go b/builtin/providers/openstack/import_openstack_lb_vip_v1_test.go new file mode 100644 index 000000000..7688543a3 --- /dev/null +++ b/builtin/providers/openstack/import_openstack_lb_vip_v1_test.go @@ -0,0 +1,29 @@ +package openstack + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccOpenStackLBVIPV1_importBasic(t *testing.T) { + resourceName := "openstack_lb_vip_v1.vip_1" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckLBV1VIPDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccLBV1VIP_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"region"}, + }, + }, + }) +} diff --git a/builtin/providers/openstack/resource_openstack_lb_member_v1.go b/builtin/providers/openstack/resource_openstack_lb_member_v1.go index d6d467c13..149a8ccb9 100644 --- a/builtin/providers/openstack/resource_openstack_lb_member_v1.go +++ b/builtin/providers/openstack/resource_openstack_lb_member_v1.go @@ -18,6 +18,9 @@ func resourceLBMemberV1() *schema.Resource { Read: resourceLBMemberV1Read, Update: resourceLBMemberV1Update, Delete: resourceLBMemberV1Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "region": &schema.Schema{ @@ -128,6 +131,9 @@ func resourceLBMemberV1Read(d *schema.ResourceData, meta interface{}) error { log.Printf("[DEBUG] Retreived OpenStack LB member %s: %+v", d.Id(), m) + d.Set("address", m.Address) + d.Set("pool_id", m.PoolID) + d.Set("port", m.ProtocolPort) d.Set("weight", m.Weight) d.Set("admin_state_up", m.AdminStateUp) diff --git a/builtin/providers/openstack/resource_openstack_lb_monitor_v1.go b/builtin/providers/openstack/resource_openstack_lb_monitor_v1.go index 71ace9286..4955e1d09 100644 --- a/builtin/providers/openstack/resource_openstack_lb_monitor_v1.go +++ b/builtin/providers/openstack/resource_openstack_lb_monitor_v1.go @@ -19,6 +19,9 @@ func resourceLBMonitorV1() *schema.Resource { Read: resourceLBMonitorV1Read, Update: resourceLBMonitorV1Update, Delete: resourceLBMonitorV1Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "region": &schema.Schema{ diff --git a/builtin/providers/openstack/resource_openstack_lb_pool_v1.go b/builtin/providers/openstack/resource_openstack_lb_pool_v1.go index d55ede7ee..483fa892f 100644 --- a/builtin/providers/openstack/resource_openstack_lb_pool_v1.go +++ b/builtin/providers/openstack/resource_openstack_lb_pool_v1.go @@ -22,6 +22,9 @@ func resourceLBPoolV1() *schema.Resource { Read: resourceLBPoolV1Read, Update: resourceLBPoolV1Update, Delete: resourceLBPoolV1Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "region": &schema.Schema{ diff --git a/builtin/providers/openstack/resource_openstack_lb_vip_v1.go b/builtin/providers/openstack/resource_openstack_lb_vip_v1.go index 3bbcba56d..b2fb63c9e 100644 --- a/builtin/providers/openstack/resource_openstack_lb_vip_v1.go +++ b/builtin/providers/openstack/resource_openstack_lb_vip_v1.go @@ -18,6 +18,9 @@ func resourceLBVipV1() *schema.Resource { Read: resourceLBVipV1Read, Update: resourceLBVipV1Update, Delete: resourceLBVipV1Delete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "region": &schema.Schema{ @@ -182,6 +185,16 @@ func resourceLBVipV1Read(d *schema.ResourceData, meta interface{}) error { d.Set("conn_limit", p.ConnLimit) d.Set("admin_state_up", p.AdminStateUp) + // Set the persistence method being used + persistence := make(map[string]interface{}) + if p.Persistence.Type != "" { + persistence["type"] = p.Persistence.Type + } + if p.Persistence.CookieName != "" { + persistence["cookie_name"] = p.Persistence.CookieName + } + d.Set("persistence", persistence) + return nil } From 31297f1c9b13432345574c28b75923cbff881f02 Mon Sep 17 00:00:00 2001 From: Christoph Blecker Date: Fri, 15 Jul 2016 04:56:48 -0700 Subject: [PATCH 0288/1238] [Vagrantfile] set resources for the provider 'parallels' (#7659) --- Vagrantfile | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Vagrantfile b/Vagrantfile index 3e9d89dda..bfcbcf5fa 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -60,4 +60,9 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| v.memory = 4096 v.cpus = 2 end + + config.vm.provider "parallels" do |prl| + prl.memory = 4096 + prl.cpus = 2 + end end From f262566f77d723051259a8b4eda9b319b7bb5c73 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Fri, 15 Jul 2016 14:49:02 +0100 Subject: [PATCH 0289/1238] provider/aws: Bump the SDK version to v1.2.5 (#7638) This will allow for new changes to the ECS --- .../github.com/aws/aws-sdk-go/aws/config.go | 7 +- .../aws/aws-sdk-go/aws/convert_types.go | 24 +- .../aws/credentials/endpointcreds/provider.go | 191 + .../aws/credentials/static_provider.go | 2 +- .../aws/aws-sdk-go/aws/defaults/defaults.go | 44 +- .../aws-sdk-go/aws/request/http_request.go | 19 +- .../aws/request/http_request_1_4.go | 19 +- .../aws/aws-sdk-go/aws/request/request.go | 19 +- .../signer/v4/header_rules.go | 0 .../aws/aws-sdk-go/aws/signer/v4/v4.go | 644 ++ .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/private/signer/v4/v4.go | 465 -- .../aws/aws-sdk-go/service/apigateway/api.go | 1951 ++++- .../aws-sdk-go/service/apigateway/service.go | 4 +- .../aws/aws-sdk-go/service/autoscaling/api.go | 1263 ++- .../aws-sdk-go/service/autoscaling/service.go | 4 +- .../aws-sdk-go/service/cloudformation/api.go | 773 +- .../service/cloudformation/service.go | 6 +- .../aws/aws-sdk-go/service/cloudfront/api.go | 574 +- .../aws-sdk-go/service/cloudfront/service.go | 4 +- .../aws/aws-sdk-go/service/cloudtrail/api.go | 478 +- .../aws-sdk-go/service/cloudtrail/service.go | 21 +- .../aws/aws-sdk-go/service/cloudwatch/api.go | 304 +- .../aws-sdk-go/service/cloudwatch/service.go | 4 +- .../aws-sdk-go/service/cloudwatch/waiters.go | 30 + .../service/cloudwatchevents/api.go | 276 +- .../service/cloudwatchevents/service.go | 4 +- .../aws-sdk-go/service/cloudwatchlogs/api.go | 694 +- .../service/cloudwatchlogs/service.go | 4 +- .../aws/aws-sdk-go/service/codecommit/api.go | 379 +- .../aws-sdk-go/service/codecommit/service.go | 4 +- .../aws/aws-sdk-go/service/codedeploy/api.go | 907 ++- .../aws-sdk-go/service/codedeploy/service.go | 4 +- .../service/directoryservice/api.go | 1057 ++- .../service/directoryservice/service.go | 4 +- .../aws/aws-sdk-go/service/dynamodb/api.go | 2022 +++-- .../aws-sdk-go/service/dynamodb/service.go | 38 +- .../aws/aws-sdk-go/service/ec2/api.go | 7137 ++++++++++++++--- .../aws/aws-sdk-go/service/ec2/service.go | 6 +- .../aws/aws-sdk-go/service/ec2/waiters.go | 29 + .../aws/aws-sdk-go/service/ecr/api.go | 368 +- .../aws/aws-sdk-go/service/ecr/service.go | 4 +- .../aws/aws-sdk-go/service/ecs/api.go | 979 ++- .../aws/aws-sdk-go/service/ecs/service.go | 4 +- .../aws/aws-sdk-go/service/efs/api.go | 735 +- .../aws/aws-sdk-go/service/efs/service.go | 4 +- .../aws/aws-sdk-go/service/elasticache/api.go | 1953 ++++- .../aws-sdk-go/service/elasticache/service.go | 4 +- .../service/elasticbeanstalk/api.go | 845 +- .../service/elasticbeanstalk/service.go | 4 +- .../service/elasticsearchservice/api.go | 230 +- .../service/elasticsearchservice/service.go | 4 +- .../service/elastictranscoder/api.go | 459 +- .../service/elastictranscoder/service.go | 4 +- .../aws/aws-sdk-go/service/elb/api.go | 661 +- .../aws/aws-sdk-go/service/elb/service.go | 4 +- .../aws/aws-sdk-go/service/emr/api.go | 1022 ++- .../aws-sdk-go/service/emr/examples_test.go | 617 -- .../aws/aws-sdk-go/service/emr/service.go | 4 +- .../aws/aws-sdk-go/service/firehose/api.go | 251 +- .../aws-sdk-go/service/firehose/service.go | 4 +- .../aws/aws-sdk-go/service/glacier/api.go | 781 +- .../aws/aws-sdk-go/service/glacier/service.go | 4 +- .../aws/aws-sdk-go/service/iam/api.go | 5024 ++++++++++-- .../aws/aws-sdk-go/service/iam/service.go | 26 +- .../aws/aws-sdk-go/service/kinesis/api.go | 425 +- .../aws/aws-sdk-go/service/kinesis/service.go | 4 +- .../aws/aws-sdk-go/service/kms/api.go | 1203 ++- .../aws/aws-sdk-go/service/kms/service.go | 46 +- .../aws/aws-sdk-go/service/lambda/api.go | 586 +- .../aws/aws-sdk-go/service/lambda/service.go | 4 +- .../aws/aws-sdk-go/service/opsworks/api.go | 2443 +++++- .../aws-sdk-go/service/opsworks/service.go | 43 +- .../aws-sdk-go/service/opsworks/waiters.go | 65 + .../aws/aws-sdk-go/service/rds/api.go | 4717 +++++++++-- .../aws/aws-sdk-go/service/rds/service.go | 24 +- .../aws/aws-sdk-go/service/redshift/api.go | 1681 +++- .../aws-sdk-go/service/redshift/service.go | 4 +- .../aws/aws-sdk-go/service/route53/api.go | 1407 +++- .../aws/aws-sdk-go/service/route53/service.go | 4 +- .../aws/aws-sdk-go/service/s3/api.go | 1620 +++- .../aws/aws-sdk-go/service/s3/service.go | 4 +- .../aws/aws-sdk-go/service/ses/api.go | 2835 ++++++- .../aws/aws-sdk-go/service/ses/service.go | 6 +- .../aws/aws-sdk-go/service/sns/api.go | 1531 +++- .../aws/aws-sdk-go/service/sns/service.go | 4 +- .../aws/aws-sdk-go/service/sqs/api.go | 698 +- .../aws/aws-sdk-go/service/sqs/checksums.go | 12 +- .../aws/aws-sdk-go/service/sqs/service.go | 29 +- .../aws/aws-sdk-go/service/sts/api.go | 165 +- .../aws/aws-sdk-go/service/sts/service.go | 4 +- vendor/vendor.json | 268 +- 92 files changed, 46076 insertions(+), 7168 deletions(-) create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go rename vendor/github.com/aws/aws-sdk-go/{private => aws}/signer/v4/header_rules.go (100%) create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/cloudwatch/waiters.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/emr/examples_test.go diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index bfaa15203..da72935be 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -139,13 +139,18 @@ type Config struct { // EC2MetadataDisableTimeoutOverride *bool + // SleepDelay is an override for the func the SDK will call when sleeping + // during the lifecycle of a request. Specifically this will be used for + // request delays. This value should only be used for testing. To adjust + // the delay of a request see the aws/client.DefaultRetryer and + // aws/request.Retryer. SleepDelay func(time.Duration) } // NewConfig returns a new Config pointer that can be chained with builder methods to // set multiple configuration values inline without using pointers. // -// svc := s3.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10)) +// sess := session.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10)) // func NewConfig() *Config { return &Config{} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go index d6a7b08df..3b73a7da7 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -2,7 +2,7 @@ package aws import "time" -// String returns a pointer to of the string value passed in. +// String returns a pointer to the string value passed in. func String(v string) *string { return &v } @@ -61,7 +61,7 @@ func StringValueMap(src map[string]*string) map[string]string { return dst } -// Bool returns a pointer to of the bool value passed in. +// Bool returns a pointer to the bool value passed in. func Bool(v bool) *bool { return &v } @@ -120,7 +120,7 @@ func BoolValueMap(src map[string]*bool) map[string]bool { return dst } -// Int returns a pointer to of the int value passed in. +// Int returns a pointer to the int value passed in. func Int(v int) *int { return &v } @@ -179,7 +179,7 @@ func IntValueMap(src map[string]*int) map[string]int { return dst } -// Int64 returns a pointer to of the int64 value passed in. +// Int64 returns a pointer to the int64 value passed in. func Int64(v int64) *int64 { return &v } @@ -238,7 +238,7 @@ func Int64ValueMap(src map[string]*int64) map[string]int64 { return dst } -// Float64 returns a pointer to of the float64 value passed in. +// Float64 returns a pointer to the float64 value passed in. func Float64(v float64) *float64 { return &v } @@ -297,7 +297,7 @@ func Float64ValueMap(src map[string]*float64) map[string]float64 { return dst } -// Time returns a pointer to of the time.Time value passed in. +// Time returns a pointer to the time.Time value passed in. func Time(v time.Time) *time.Time { return &v } @@ -311,6 +311,18 @@ func TimeValue(v *time.Time) time.Time { return time.Time{} } +// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". +// The result is undefined if the Unix time cannot be represented by an int64. +// Which includes calling TimeUnixMilli on a zero Time is undefined. +// +// This utility is useful for service API's such as CloudWatch Logs which require +// their unix time values to be in milliseconds. +// +// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. +func TimeUnixMilli(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) +} + // TimeSlice converts a slice of time.Time values into a slice of // time.Time pointers func TimeSlice(src []time.Time) []*time.Time { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go new file mode 100644 index 000000000..a4cec5c55 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -0,0 +1,191 @@ +// Package endpointcreds provides support for retrieving credentials from an +// arbitrary HTTP endpoint. +// +// The credentials endpoint Provider can receive both static and refreshable +// credentials that will expire. Credentials are static when an "Expiration" +// value is not provided in the endpoint's response. +// +// Static credentials will never expire once they have been retrieved. The format +// of the static credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// } +// +// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration +// value in the response. The format of the refreshable credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// "Token" : "AQoDY....=", +// "Expiration" : "2016-02-25T06:03:31Z" +// } +// +// Errors should be returned in the following format and only returned with 400 +// or 500 HTTP status codes. +// { +// "code": "ErrorCode", +// "message": "Helpful error message." +// } +package endpointcreds + +import ( + "encoding/json" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// ProviderName is the name of the credentials provider. +const ProviderName = `CredentialsEndpointProvider` + +// Provider satisfies the credentials.Provider interface, and is a client to +// retrieve credentials from an arbitrary endpoint. +type Provider struct { + staticCreds bool + credentials.Expiry + + // Requires a AWS Client to make HTTP requests to the endpoint with. + // the Endpoint the request will be made to is provided by the aws.Config's + // Endpoint value. + Client *client.Client + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewProviderClient returns a credentials Provider for retrieving AWS credentials +// from arbitrary endpoint. +func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider { + p := &Provider{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "CredentialsEndpoint", + Endpoint: endpoint, + }, + handlers, + ), + } + + p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler) + p.Client.Handlers.UnmarshalError.PushBack(unmarshalError) + p.Client.Handlers.Validate.Clear() + p.Client.Handlers.Validate.PushBack(validateEndpointHandler) + + for _, option := range options { + option(p) + } + + return p +} + +// NewCredentialsClient returns a Credentials wrapper for retrieving credentials +// from an arbitrary endpoint concurrently. The client will request the +func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { + return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *Provider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// Retrieve will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) Retrieve() (credentials.Value, error) { + resp, err := p.getCredentials() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("CredentialsEndpointError", "failed to load credentials", err) + } + + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } else { + p.staticCreds = true + } + + return credentials.Value{ + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.Token, + ProviderName: ProviderName, + }, nil +} + +type getCredentialsOutput struct { + Expiration *time.Time + AccessKeyID string + SecretAccessKey string + Token string +} + +type errorOutput struct { + Code string `json:"code"` + Message string `json:"message"` +} + +func (p *Provider) getCredentials() (*getCredentialsOutput, error) { + op := &request.Operation{ + Name: "GetCredentials", + HTTPMethod: "GET", + } + + out := &getCredentialsOutput{} + req := p.Client.NewRequest(op, nil, out) + req.HTTPRequest.Header.Set("Accept", "application/json") + + return out, req.Send() +} + +func validateEndpointHandler(r *request.Request) { + if len(r.ClientInfo.Endpoint) == 0 { + r.Error = aws.ErrMissingEndpoint + } +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + out := r.Data.(*getCredentialsOutput) + if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { + r.Error = awserr.New("SerializationError", + "failed to decode endpoint credentials", + err, + ) + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var errOut errorOutput + if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil { + r.Error = awserr.New("SerializationError", + "failed to decode endpoint credentials", + err, + ) + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New(errOut.Code, errOut.Message, nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go index 71189e733..6f075604e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -14,7 +14,7 @@ var ( ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) ) -// A StaticProvider is a set of credentials which are set pragmatically, +// A StaticProvider is a set of credentials which are set programmatically, // and will never expire. type StaticProvider struct { Value diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go index 043960d3f..570417ffa 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -8,6 +8,7 @@ package defaults import ( + "fmt" "net/http" "os" "time" @@ -16,6 +17,7 @@ import ( "github.com/aws/aws-sdk-go/aws/corehandlers" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" "github.com/aws/aws-sdk-go/aws/ec2metadata" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/private/endpoints" @@ -66,6 +68,7 @@ func Handlers() request.Handlers { var handlers request.Handlers handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + handlers.Validate.AfterEachFn = request.HandlerListStopOnError handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) handlers.Build.AfterEachFn = request.HandlerListStopOnError handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) @@ -82,16 +85,43 @@ func Handlers() request.Handlers { // is available if you need to reset the credentials of an // existing service client or session's Config. func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { - endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName, *cfg.Region, true) - return credentials.NewCredentials(&credentials.ChainProvider{ VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), Providers: []credentials.Provider{ &credentials.EnvProvider{}, &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, - &ec2rolecreds.EC2RoleProvider{ - Client: ec2metadata.NewClient(*cfg, handlers, endpoint, signingRegion), - ExpiryWindow: 5 * time.Minute, - }, - }}) + remoteCredProvider(*cfg, handlers), + }, + }) +} + +func remoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + ecsCredURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI") + + if len(ecsCredURI) > 0 { + return ecsCredProvider(cfg, handlers, ecsCredURI) + } + + return ec2RoleProvider(cfg, handlers) +} + +func ecsCredProvider(cfg aws.Config, handlers request.Handlers, uri string) credentials.Provider { + const host = `169.254.170.2` + + return endpointcreds.NewProviderClient(cfg, handlers, + fmt.Sprintf("http://%s%s", host, uri), + func(p *endpointcreds.Provider) { + p.ExpiryWindow = 5 * time.Minute + }, + ) +} + +func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName, + aws.StringValue(cfg.Region), true) + + return &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.NewClient(cfg, handlers, endpoint, signingRegion), + ExpiryWindow: 5 * time.Minute, + } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go index 3127762f4..a4087f20e 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go @@ -5,17 +5,15 @@ package request import ( "io" "net/http" + "net/url" ) func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { - return &http.Request{ - URL: r.URL, - Header: r.Header, + req := &http.Request{ + URL: &url.URL{}, + Header: http.Header{}, Close: r.Close, - Form: r.Form, - PostForm: r.PostForm, Body: body, - MultipartForm: r.MultipartForm, Host: r.Host, Method: r.Method, Proto: r.Proto, @@ -23,4 +21,13 @@ func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { // Cancel will be deprecated in 1.7 and will be replaced with Context Cancel: r.Cancel, } + + *req.URL = *r.URL + for k, v := range r.Header { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + + return req } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go index 86c9c2f77..75da021ef 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go @@ -5,20 +5,27 @@ package request import ( "io" "net/http" + "net/url" ) func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { - return &http.Request{ - URL: r.URL, - Header: r.Header, + req := &http.Request{ + URL: &url.URL{}, + Header: http.Header{}, Close: r.Close, - Form: r.Form, - PostForm: r.PostForm, Body: body, - MultipartForm: r.MultipartForm, Host: r.Host, Method: r.Method, Proto: r.Proto, ContentLength: r.ContentLength, } + + *req.URL = *r.URL + for k, v := range r.Header { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + + return req } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go index 2391632f1..2832aaa43 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -12,6 +12,7 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client/metadata" ) @@ -38,6 +39,7 @@ type Request struct { RetryDelay time.Duration NotHoist bool SignedHeaderVals http.Header + LastSignedAt time.Time built bool } @@ -71,13 +73,15 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, if method == "" { method = "POST" } - p := operation.HTTPPath - if p == "" { - p = "/" - } httpReq, _ := http.NewRequest(method, "", nil) - httpReq.URL, _ = url.Parse(clientInfo.Endpoint + p) + + var err error + httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath) + if err != nil { + httpReq.URL = &url.URL{} + err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) + } r := &Request{ Config: cfg, @@ -91,7 +95,7 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, HTTPRequest: httpReq, Body: nil, Params: params, - Error: nil, + Error: err, Data: data, } r.SetBufferBody([]byte{}) @@ -185,7 +189,6 @@ func debugLogReqError(r *Request, stage string, retrying bool, err error) { // which occurred will be returned. func (r *Request) Build() error { if !r.built { - r.Error = nil r.Handlers.Validate.Run(r) if r.Error != nil { debugLogReqError(r, "Validate Request", false, r.Error) @@ -202,7 +205,7 @@ func (r *Request) Build() error { return r.Error } -// Sign will sign the request retuning error if errors are encountered. +// Sign will sign the request returning error if errors are encountered. // // Send will build the request prior to signing. All Sign Handlers will // be executed in the order they were set. diff --git a/vendor/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go similarity index 100% rename from vendor/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go rename to vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go new file mode 100644 index 000000000..f040f9ce9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -0,0 +1,644 @@ +// Package v4 implements signing for AWS V4 signer +// +// Provides request signing for request that need to be signed with +// AWS V4 Signatures. +package v4 + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + authHeaderPrefix = "AWS4-HMAC-SHA256" + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" + + // emptyStringSHA256 is a SHA256 of an empty string + emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` +) + +var ignoredHeaders = rules{ + blacklist{ + mapRule{ + "Authorization": struct{}{}, + "User-Agent": struct{}{}, + }, + }, +} + +// requiredSignedHeaders is a whitelist for build canonical headers. +var requiredSignedHeaders = rules{ + whitelist{ + mapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + }, + }, + patterns{"X-Amz-Meta-"}, +} + +// allowedHoisting is a whitelist for build query headers. The boolean value +// represents whether or not it is a pattern. +var allowedQueryHoisting = inclusiveRules{ + blacklist{requiredSignedHeaders}, + patterns{"X-Amz-"}, +} + +// Signer applies AWS v4 signing to given request. Use this to sign requests +// that need to be signed with AWS V4 Signatures. +type Signer struct { + // The authentication credentials the request will be signed against. + // This value must be set to sign requests. + Credentials *credentials.Credentials + + // Sets the log level the signer should use when reporting information to + // the logger. If the logger is nil nothing will be logged. See + // aws.LogLevelType for more information on available logging levels + // + // By default nothing will be logged. + Debug aws.LogLevelType + + // The logger loging information will be written to. If there the logger + // is nil, nothing will be logged. + Logger aws.Logger + + // Disables the Signer's moving HTTP header key/value pairs from the HTTP + // request header to the request's query string. This is most commonly used + // with pre-signed requests preventing headers from being added to the + // request's query string. + DisableHeaderHoisting bool + + // currentTimeFn returns the time value which represents the current time. + // This value should only be used for testing. If it is nil the default + // time.Now will be used. + currentTimeFn func() time.Time +} + +// NewSigner returns a Signer pointer configured with the credentials and optional +// option values provided. If not options are provided the Signer will use its +// default configuration. +func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer { + v4 := &Signer{ + Credentials: credentials, + } + + for _, option := range options { + option(v4) + } + + return v4 +} + +type signingCtx struct { + ServiceName string + Region string + Request *http.Request + Body io.ReadSeeker + Query url.Values + Time time.Time + ExpireTime time.Duration + SignedHeaderVals http.Header + + credValues credentials.Value + isPresign bool + formattedTime string + formattedShortTime string + + bodyDigest string + signedHeaders string + canonicalHeaders string + canonicalString string + credentialString string + stringToSign string + signature string + authorization string +} + +// Sign signs AWS v4 requests with the provided body, service name, region the +// request is made to, and time the request is signed at. The signTime allows +// you to specify that a request is signed for the future, and cannot be +// used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. Generally for signed requests this value +// is not needed as the full request context will be captured by the http.Request +// value. It is included for reference though. +// +// Sign differs from Presign in that it will sign the request using HTTP +// header values. This type of signing is intended for http.Request values that +// will not be shared, or are shared in a way the header values on the request +// will not be lost. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, 0, signTime) +} + +// Presign signs AWS v4 requests with the provided body, service name, region +// the request is made to, and time the request is signed at. The signTime +// allows you to specify that a request is signed for the future, and cannot +// be used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. For presigned requests these headers +// and their values must be included on the HTTP request when it is made. This +// is helpful to know what header values need to be shared with the party the +// presigned request will be distributed to. +// +// Presign differs from Sign in that it will sign the request using query string +// instead of header values. This allows you to share the Presigned Request's +// URL with third parties, or distribute it throughout your system with minimal +// dependencies. +// +// Presign also takes an exp value which is the duration the +// signed request will be valid after the signing time. This is allows you to +// set when the request will expire. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +// +// Presigning a S3 request will not compute the body's SHA256 hash by default. +// This is done due to the general use case for S3 presigned URLs is to share +// PUT/GET capabilities. If you would like to include the body's SHA256 in the +// presigned request's signature you can set the "X-Amz-Content-Sha256" +// HTTP header and that will be included in the request's signature. +func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, exp, signTime) +} + +func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { + currentTimeFn := v4.currentTimeFn + if currentTimeFn == nil { + currentTimeFn = time.Now + } + + ctx := &signingCtx{ + Request: r, + Body: body, + Query: r.URL.Query(), + Time: signTime, + ExpireTime: exp, + isPresign: exp != 0, + ServiceName: service, + Region: region, + } + + if ctx.isRequestSigned() { + if !v4.Credentials.IsExpired() && currentTimeFn().Before(ctx.Time.Add(10*time.Minute)) { + // If the request is already signed, and the credentials have not + // expired, and the request is not too old ignore the signing request. + return ctx.SignedHeaderVals, nil + } + ctx.Time = currentTimeFn() + ctx.handlePresignRemoval() + } + + var err error + ctx.credValues, err = v4.Credentials.Get() + if err != nil { + return http.Header{}, err + } + + ctx.assignAmzQueryValues() + ctx.build(v4.DisableHeaderHoisting) + + if v4.Debug.Matches(aws.LogDebugWithSigning) { + v4.logSigningInfo(ctx) + } + + return ctx.SignedHeaderVals, nil +} + +func (ctx *signingCtx) handlePresignRemoval() { + if !ctx.isPresign { + return + } + + // The credentials have expired for this request. The current signing + // is invalid, and needs to be request because the request will fail. + ctx.removePresign() + + // Update the request's query string to ensure the values stays in + // sync in the case retrieving the new credentials fails. + ctx.Request.URL.RawQuery = ctx.Query.Encode() +} + +func (ctx *signingCtx) assignAmzQueryValues() { + if ctx.isPresign { + ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix) + if ctx.credValues.SessionToken != "" { + ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } else { + ctx.Query.Del("X-Amz-Security-Token") + } + + return + } + + if ctx.credValues.SessionToken != "" { + ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } +} + +// SignRequestHandler is a named request handler the SDK will use to sign +// service client request with using the V4 signature. +var SignRequestHandler = request.NamedHandler{ + Name: "v4.SignRequestHandler", Fn: SignSDKRequest, +} + +// SignSDKRequest signs an AWS request with the V4 signature. This +// request handler is bested used only with the SDK's built in service client's +// API operation requests. +// +// This function should not be used on its on its own, but in conjunction with +// an AWS service client's API operation call. To sign a standalone request +// not created by a service client's API operation method use the "Sign" or +// "Presign" functions of the "Signer" type. +// +// If the credentials of the request's config are set to +// credentials.AnonymousCredentials the request will not be signed. +func SignSDKRequest(req *request.Request) { + signSDKRequestWithCurrTime(req, time.Now) +} +func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + region := req.ClientInfo.SigningRegion + if region == "" { + region = aws.StringValue(req.Config.Region) + } + + name := req.ClientInfo.SigningName + if name == "" { + name = req.ClientInfo.ServiceName + } + + v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) { + v4.Debug = req.Config.LogLevel.Value() + v4.Logger = req.Config.Logger + v4.DisableHeaderHoisting = req.NotHoist + v4.currentTimeFn = curTimeFn + }) + + signingTime := req.Time + if !req.LastSignedAt.IsZero() { + signingTime = req.LastSignedAt + } + + signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.Body, name, region, req.ExpireTime, signingTime) + if err != nil { + req.Error = err + req.SignedHeaderVals = nil + return + } + + req.SignedHeaderVals = signedHeaders + req.LastSignedAt = curTimeFn() +} + +const logSignInfoMsg = `DEBUG: Request Signiture: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func (v4 *Signer) logSigningInfo(ctx *signingCtx) { + signedURLMsg := "" + if ctx.isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String()) + } + msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg) + v4.Logger.Log(msg) +} + +func (ctx *signingCtx) build(disableHeaderHoisting bool) { + ctx.buildTime() // no depends + ctx.buildCredentialString() // no depends + + unsignedHeaders := ctx.Request.Header + if ctx.isPresign { + if !disableHeaderHoisting { + urlValues := url.Values{} + urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends + for k := range urlValues { + ctx.Query[k] = urlValues[k] + } + } + } + + ctx.buildBodyDigest() + ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) + ctx.buildCanonicalString() // depends on canon headers / signed headers + ctx.buildStringToSign() // depends on canon string + ctx.buildSignature() // depends on string to sign + + if ctx.isPresign { + ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature + } else { + parts := []string{ + authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, + "SignedHeaders=" + ctx.signedHeaders, + "Signature=" + ctx.signature, + } + ctx.Request.Header.Set("Authorization", strings.Join(parts, ", ")) + } +} + +func (ctx *signingCtx) buildTime() { + ctx.formattedTime = ctx.Time.UTC().Format(timeFormat) + ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat) + + if ctx.isPresign { + duration := int64(ctx.ExpireTime / time.Second) + ctx.Query.Set("X-Amz-Date", ctx.formattedTime) + ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) + } else { + ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime) + } +} + +func (ctx *signingCtx) buildCredentialString() { + ctx.credentialString = strings.Join([]string{ + ctx.formattedShortTime, + ctx.Region, + ctx.ServiceName, + "aws4_request", + }, "/") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) + } +} + +func buildQuery(r rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} +func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { + var headers []string + headers = append(headers, "host") + for k, v := range header { + canonicalKey := http.CanonicalHeaderKey(k) + if !r.IsValid(canonicalKey) { + continue // ignored header + } + if ctx.SignedHeaderVals == nil { + ctx.SignedHeaderVals = make(http.Header) + } + + lowerCaseKey := strings.ToLower(k) + if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok { + // include additional values + ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...) + continue + } + + headers = append(headers, lowerCaseKey) + ctx.SignedHeaderVals[lowerCaseKey] = v + } + sort.Strings(headers) + + ctx.signedHeaders = strings.Join(headers, ";") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders) + } + + headerValues := make([]string, len(headers)) + for i, k := range headers { + if k == "host" { + headerValues[i] = "host:" + ctx.Request.URL.Host + } else { + headerValues[i] = k + ":" + + strings.Join(ctx.SignedHeaderVals[k], ",") + } + } + + ctx.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n") +} + +func (ctx *signingCtx) buildCanonicalString() { + ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1) + uri := ctx.Request.URL.Opaque + if uri != "" { + uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/") + } else { + uri = ctx.Request.URL.Path + } + if uri == "" { + uri = "/" + } + + if ctx.ServiceName != "s3" { + uri = rest.EscapePath(uri, false) + } + + ctx.canonicalString = strings.Join([]string{ + ctx.Request.Method, + uri, + ctx.Request.URL.RawQuery, + ctx.canonicalHeaders + "\n", + ctx.signedHeaders, + ctx.bodyDigest, + }, "\n") +} + +func (ctx *signingCtx) buildStringToSign() { + ctx.stringToSign = strings.Join([]string{ + authHeaderPrefix, + ctx.formattedTime, + ctx.credentialString, + hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))), + }, "\n") +} + +func (ctx *signingCtx) buildSignature() { + secret := ctx.credValues.SecretAccessKey + date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime)) + region := makeHmac(date, []byte(ctx.Region)) + service := makeHmac(region, []byte(ctx.ServiceName)) + credentials := makeHmac(service, []byte("aws4_request")) + signature := makeHmac(credentials, []byte(ctx.stringToSign)) + ctx.signature = hex.EncodeToString(signature) +} + +func (ctx *signingCtx) buildBodyDigest() { + hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") + if hash == "" { + if ctx.isPresign && ctx.ServiceName == "s3" { + hash = "UNSIGNED-PAYLOAD" + } else if ctx.Body == nil { + hash = emptyStringSHA256 + } else { + hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) + } + if ctx.ServiceName == "s3" { + ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) + } + } + ctx.bodyDigest = hash +} + +// isRequestSigned returns if the request is currently signed or presigned +func (ctx *signingCtx) isRequestSigned() bool { + if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" { + return true + } + if ctx.Request.Header.Get("Authorization") != "" { + return true + } + + return false +} + +// unsign removes signing flags for both signed and presigned requests. +func (ctx *signingCtx) removePresign() { + ctx.Query.Del("X-Amz-Algorithm") + ctx.Query.Del("X-Amz-Signature") + ctx.Query.Del("X-Amz-Security-Token") + ctx.Query.Del("X-Amz-Date") + ctx.Query.Del("X-Amz-Expires") + ctx.Query.Del("X-Amz-Credential") + ctx.Query.Del("X-Amz-SignedHeaders") +} + +func makeHmac(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256Reader(reader io.ReadSeeker) []byte { + hash := sha256.New() + start, _ := reader.Seek(0, 1) + defer reader.Seek(start, 0) + + io.Copy(hash, reader) + return hash.Sum(nil) +} + +const doubleSpaces = " " + +var doubleSpaceBytes = []byte(doubleSpaces) + +func stripExcessSpaces(headerVals []string) []string { + vals := make([]string, len(headerVals)) + for i, str := range headerVals { + // Trim leading and trailing spaces + trimmed := strings.TrimSpace(str) + + idx := strings.Index(trimmed, doubleSpaces) + var buf []byte + for idx > -1 { + // Multiple adjacent spaces found + if buf == nil { + // first time create the buffer + buf = []byte(trimmed) + } + + stripToIdx := -1 + for j := idx + 1; j < len(buf); j++ { + if buf[j] != ' ' { + buf = append(buf[:idx+1], buf[j:]...) + stripToIdx = j + break + } + } + + if stripToIdx >= 0 { + idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes) + if idx >= 0 { + idx += stripToIdx + } + } else { + idx = -1 + } + } + + if buf != nil { + vals[i] = string(buf) + } else { + vals[i] = trimmed + } + } + return vals +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 819485663..97a3f57f5 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.1.23" +const SDKVersion = "1.2.5" diff --git a/vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4.go deleted file mode 100644 index 476580056..000000000 --- a/vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4.go +++ /dev/null @@ -1,465 +0,0 @@ -// Package v4 implements signing for AWS V4 signer -package v4 - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/hex" - "fmt" - "io" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/rest" -) - -const ( - authHeaderPrefix = "AWS4-HMAC-SHA256" - timeFormat = "20060102T150405Z" - shortTimeFormat = "20060102" -) - -var ignoredHeaders = rules{ - blacklist{ - mapRule{ - "Authorization": struct{}{}, - "User-Agent": struct{}{}, - }, - }, -} - -// requiredSignedHeaders is a whitelist for build canonical headers. -var requiredSignedHeaders = rules{ - whitelist{ - mapRule{ - "Cache-Control": struct{}{}, - "Content-Disposition": struct{}{}, - "Content-Encoding": struct{}{}, - "Content-Language": struct{}{}, - "Content-Md5": struct{}{}, - "Content-Type": struct{}{}, - "Expires": struct{}{}, - "If-Match": struct{}{}, - "If-Modified-Since": struct{}{}, - "If-None-Match": struct{}{}, - "If-Unmodified-Since": struct{}{}, - "Range": struct{}{}, - "X-Amz-Acl": struct{}{}, - "X-Amz-Copy-Source": struct{}{}, - "X-Amz-Copy-Source-If-Match": struct{}{}, - "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, - "X-Amz-Copy-Source-If-None-Match": struct{}{}, - "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, - "X-Amz-Copy-Source-Range": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Grant-Full-control": struct{}{}, - "X-Amz-Grant-Read": struct{}{}, - "X-Amz-Grant-Read-Acp": struct{}{}, - "X-Amz-Grant-Write": struct{}{}, - "X-Amz-Grant-Write-Acp": struct{}{}, - "X-Amz-Metadata-Directive": struct{}{}, - "X-Amz-Mfa": struct{}{}, - "X-Amz-Request-Payer": struct{}{}, - "X-Amz-Server-Side-Encryption": struct{}{}, - "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Storage-Class": struct{}{}, - "X-Amz-Website-Redirect-Location": struct{}{}, - }, - }, - patterns{"X-Amz-Meta-"}, -} - -// allowedHoisting is a whitelist for build query headers. The boolean value -// represents whether or not it is a pattern. -var allowedQueryHoisting = inclusiveRules{ - blacklist{requiredSignedHeaders}, - patterns{"X-Amz-"}, -} - -type signer struct { - Request *http.Request - Time time.Time - ExpireTime time.Duration - ServiceName string - Region string - CredValues credentials.Value - Credentials *credentials.Credentials - Query url.Values - Body io.ReadSeeker - Debug aws.LogLevelType - Logger aws.Logger - - isPresign bool - formattedTime string - formattedShortTime string - - signedHeaders string - canonicalHeaders string - canonicalString string - credentialString string - stringToSign string - signature string - authorization string - notHoist bool - signedHeaderVals http.Header -} - -// Sign requests with signature version 4. -// -// Will sign the requests with the service config's Credentials object -// Signing is skipped if the credentials is the credentials.AnonymousCredentials -// object. -func Sign(req *request.Request) { - // If the request does not need to be signed ignore the signing of the - // request if the AnonymousCredentials object is used. - if req.Config.Credentials == credentials.AnonymousCredentials { - return - } - - region := req.ClientInfo.SigningRegion - if region == "" { - region = aws.StringValue(req.Config.Region) - } - - name := req.ClientInfo.SigningName - if name == "" { - name = req.ClientInfo.ServiceName - } - - s := signer{ - Request: req.HTTPRequest, - Time: req.Time, - ExpireTime: req.ExpireTime, - Query: req.HTTPRequest.URL.Query(), - Body: req.Body, - ServiceName: name, - Region: region, - Credentials: req.Config.Credentials, - Debug: req.Config.LogLevel.Value(), - Logger: req.Config.Logger, - notHoist: req.NotHoist, - } - - req.Error = s.sign() - req.Time = s.Time - req.SignedHeaderVals = s.signedHeaderVals -} - -func (v4 *signer) sign() error { - if v4.ExpireTime != 0 { - v4.isPresign = true - } - - if v4.isRequestSigned() { - if !v4.Credentials.IsExpired() && time.Now().Before(v4.Time.Add(10*time.Minute)) { - // If the request is already signed, and the credentials have not - // expired, and the request is not too old ignore the signing request. - return nil - } - v4.Time = time.Now() - - // The credentials have expired for this request. The current signing - // is invalid, and needs to be request because the request will fail. - if v4.isPresign { - v4.removePresign() - // Update the request's query string to ensure the values stays in - // sync in the case retrieving the new credentials fails. - v4.Request.URL.RawQuery = v4.Query.Encode() - } - } - - var err error - v4.CredValues, err = v4.Credentials.Get() - if err != nil { - return err - } - - if v4.isPresign { - v4.Query.Set("X-Amz-Algorithm", authHeaderPrefix) - if v4.CredValues.SessionToken != "" { - v4.Query.Set("X-Amz-Security-Token", v4.CredValues.SessionToken) - } else { - v4.Query.Del("X-Amz-Security-Token") - } - } else if v4.CredValues.SessionToken != "" { - v4.Request.Header.Set("X-Amz-Security-Token", v4.CredValues.SessionToken) - } - - v4.build() - - if v4.Debug.Matches(aws.LogDebugWithSigning) { - v4.logSigningInfo() - } - - return nil -} - -const logSignInfoMsg = `DEBUG: Request Signiture: ----[ CANONICAL STRING ]----------------------------- -%s ----[ STRING TO SIGN ]-------------------------------- -%s%s ------------------------------------------------------` -const logSignedURLMsg = ` ----[ SIGNED URL ]------------------------------------ -%s` - -func (v4 *signer) logSigningInfo() { - signedURLMsg := "" - if v4.isPresign { - signedURLMsg = fmt.Sprintf(logSignedURLMsg, v4.Request.URL.String()) - } - msg := fmt.Sprintf(logSignInfoMsg, v4.canonicalString, v4.stringToSign, signedURLMsg) - v4.Logger.Log(msg) -} - -func (v4 *signer) build() { - - v4.buildTime() // no depends - v4.buildCredentialString() // no depends - - unsignedHeaders := v4.Request.Header - if v4.isPresign { - if !v4.notHoist { - urlValues := url.Values{} - urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends - for k := range urlValues { - v4.Query[k] = urlValues[k] - } - } - } - - v4.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) - v4.buildCanonicalString() // depends on canon headers / signed headers - v4.buildStringToSign() // depends on canon string - v4.buildSignature() // depends on string to sign - - if v4.isPresign { - v4.Request.URL.RawQuery += "&X-Amz-Signature=" + v4.signature - } else { - parts := []string{ - authHeaderPrefix + " Credential=" + v4.CredValues.AccessKeyID + "/" + v4.credentialString, - "SignedHeaders=" + v4.signedHeaders, - "Signature=" + v4.signature, - } - v4.Request.Header.Set("Authorization", strings.Join(parts, ", ")) - } -} - -func (v4 *signer) buildTime() { - v4.formattedTime = v4.Time.UTC().Format(timeFormat) - v4.formattedShortTime = v4.Time.UTC().Format(shortTimeFormat) - - if v4.isPresign { - duration := int64(v4.ExpireTime / time.Second) - v4.Query.Set("X-Amz-Date", v4.formattedTime) - v4.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) - } else { - v4.Request.Header.Set("X-Amz-Date", v4.formattedTime) - } -} - -func (v4 *signer) buildCredentialString() { - v4.credentialString = strings.Join([]string{ - v4.formattedShortTime, - v4.Region, - v4.ServiceName, - "aws4_request", - }, "/") - - if v4.isPresign { - v4.Query.Set("X-Amz-Credential", v4.CredValues.AccessKeyID+"/"+v4.credentialString) - } -} - -func buildQuery(r rule, header http.Header) (url.Values, http.Header) { - query := url.Values{} - unsignedHeaders := http.Header{} - for k, h := range header { - if r.IsValid(k) { - query[k] = h - } else { - unsignedHeaders[k] = h - } - } - - return query, unsignedHeaders -} -func (v4 *signer) buildCanonicalHeaders(r rule, header http.Header) { - var headers []string - headers = append(headers, "host") - for k, v := range header { - canonicalKey := http.CanonicalHeaderKey(k) - if !r.IsValid(canonicalKey) { - continue // ignored header - } - if v4.signedHeaderVals == nil { - v4.signedHeaderVals = make(http.Header) - } - - lowerCaseKey := strings.ToLower(k) - if _, ok := v4.signedHeaderVals[lowerCaseKey]; ok { - // include additional values - v4.signedHeaderVals[lowerCaseKey] = append(v4.signedHeaderVals[lowerCaseKey], v...) - continue - } - - headers = append(headers, lowerCaseKey) - v4.signedHeaderVals[lowerCaseKey] = v - } - sort.Strings(headers) - - v4.signedHeaders = strings.Join(headers, ";") - - if v4.isPresign { - v4.Query.Set("X-Amz-SignedHeaders", v4.signedHeaders) - } - - headerValues := make([]string, len(headers)) - for i, k := range headers { - if k == "host" { - headerValues[i] = "host:" + v4.Request.URL.Host - } else { - headerValues[i] = k + ":" + - strings.Join(v4.signedHeaderVals[k], ",") - } - } - - v4.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n") -} - -func (v4 *signer) buildCanonicalString() { - v4.Request.URL.RawQuery = strings.Replace(v4.Query.Encode(), "+", "%20", -1) - uri := v4.Request.URL.Opaque - if uri != "" { - uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/") - } else { - uri = v4.Request.URL.Path - } - if uri == "" { - uri = "/" - } - - if v4.ServiceName != "s3" { - uri = rest.EscapePath(uri, false) - } - - v4.canonicalString = strings.Join([]string{ - v4.Request.Method, - uri, - v4.Request.URL.RawQuery, - v4.canonicalHeaders + "\n", - v4.signedHeaders, - v4.bodyDigest(), - }, "\n") -} - -func (v4 *signer) buildStringToSign() { - v4.stringToSign = strings.Join([]string{ - authHeaderPrefix, - v4.formattedTime, - v4.credentialString, - hex.EncodeToString(makeSha256([]byte(v4.canonicalString))), - }, "\n") -} - -func (v4 *signer) buildSignature() { - secret := v4.CredValues.SecretAccessKey - date := makeHmac([]byte("AWS4"+secret), []byte(v4.formattedShortTime)) - region := makeHmac(date, []byte(v4.Region)) - service := makeHmac(region, []byte(v4.ServiceName)) - credentials := makeHmac(service, []byte("aws4_request")) - signature := makeHmac(credentials, []byte(v4.stringToSign)) - v4.signature = hex.EncodeToString(signature) -} - -func (v4 *signer) bodyDigest() string { - hash := v4.Request.Header.Get("X-Amz-Content-Sha256") - if hash == "" { - if v4.isPresign && v4.ServiceName == "s3" { - hash = "UNSIGNED-PAYLOAD" - } else if v4.Body == nil { - hash = hex.EncodeToString(makeSha256([]byte{})) - } else { - hash = hex.EncodeToString(makeSha256Reader(v4.Body)) - } - v4.Request.Header.Add("X-Amz-Content-Sha256", hash) - } - return hash -} - -// isRequestSigned returns if the request is currently signed or presigned -func (v4 *signer) isRequestSigned() bool { - if v4.isPresign && v4.Query.Get("X-Amz-Signature") != "" { - return true - } - if v4.Request.Header.Get("Authorization") != "" { - return true - } - - return false -} - -// unsign removes signing flags for both signed and presigned requests. -func (v4 *signer) removePresign() { - v4.Query.Del("X-Amz-Algorithm") - v4.Query.Del("X-Amz-Signature") - v4.Query.Del("X-Amz-Security-Token") - v4.Query.Del("X-Amz-Date") - v4.Query.Del("X-Amz-Expires") - v4.Query.Del("X-Amz-Credential") - v4.Query.Del("X-Amz-SignedHeaders") -} - -func makeHmac(key []byte, data []byte) []byte { - hash := hmac.New(sha256.New, key) - hash.Write(data) - return hash.Sum(nil) -} - -func makeSha256(data []byte) []byte { - hash := sha256.New() - hash.Write(data) - return hash.Sum(nil) -} - -func makeSha256Reader(reader io.ReadSeeker) []byte { - hash := sha256.New() - start, _ := reader.Seek(0, 1) - defer reader.Seek(start, 0) - - io.Copy(hash, reader) - return hash.Sum(nil) -} - -func stripExcessSpaces(headerVals []string) []string { - vals := make([]string, len(headerVals)) - for i, str := range headerVals { - stripped := "" - found := false - str = strings.TrimSpace(str) - for _, c := range str { - if !found && c == ' ' { - stripped += string(c) - found = true - } else if c != ' ' { - stripped += string(c) - found = false - } - } - vals[i] = stripped - } - return vals -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/apigateway/api.go b/vendor/github.com/aws/aws-sdk-go/service/apigateway/api.go index 698647d13..4755978d7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/apigateway/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/apigateway/api.go @@ -14,7 +14,28 @@ import ( const opCreateApiKey = "CreateApiKey" -// CreateApiKeyRequest generates a request for the CreateApiKey operation. +// CreateApiKeyRequest generates a "aws/request.Request" representing the +// client's request for the CreateApiKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateApiKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateApiKeyRequest method. +// req, resp := client.CreateApiKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) CreateApiKeyRequest(input *CreateApiKeyInput) (req *request.Request, output *ApiKey) { op := &request.Operation{ Name: opCreateApiKey, @@ -41,7 +62,28 @@ func (c *APIGateway) CreateApiKey(input *CreateApiKeyInput) (*ApiKey, error) { const opCreateAuthorizer = "CreateAuthorizer" -// CreateAuthorizerRequest generates a request for the CreateAuthorizer operation. +// CreateAuthorizerRequest generates a "aws/request.Request" representing the +// client's request for the CreateAuthorizer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateAuthorizer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateAuthorizerRequest method. +// req, resp := client.CreateAuthorizerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) CreateAuthorizerRequest(input *CreateAuthorizerInput) (req *request.Request, output *Authorizer) { op := &request.Operation{ Name: opCreateAuthorizer, @@ -68,7 +110,28 @@ func (c *APIGateway) CreateAuthorizer(input *CreateAuthorizerInput) (*Authorizer const opCreateBasePathMapping = "CreateBasePathMapping" -// CreateBasePathMappingRequest generates a request for the CreateBasePathMapping operation. +// CreateBasePathMappingRequest generates a "aws/request.Request" representing the +// client's request for the CreateBasePathMapping operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateBasePathMapping method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateBasePathMappingRequest method. +// req, resp := client.CreateBasePathMappingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) CreateBasePathMappingRequest(input *CreateBasePathMappingInput) (req *request.Request, output *BasePathMapping) { op := &request.Operation{ Name: opCreateBasePathMapping, @@ -95,7 +158,28 @@ func (c *APIGateway) CreateBasePathMapping(input *CreateBasePathMappingInput) (* const opCreateDeployment = "CreateDeployment" -// CreateDeploymentRequest generates a request for the CreateDeployment operation. +// CreateDeploymentRequest generates a "aws/request.Request" representing the +// client's request for the CreateDeployment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDeployment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDeploymentRequest method. +// req, resp := client.CreateDeploymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) CreateDeploymentRequest(input *CreateDeploymentInput) (req *request.Request, output *Deployment) { op := &request.Operation{ Name: opCreateDeployment, @@ -123,7 +207,28 @@ func (c *APIGateway) CreateDeployment(input *CreateDeploymentInput) (*Deployment const opCreateDomainName = "CreateDomainName" -// CreateDomainNameRequest generates a request for the CreateDomainName operation. +// CreateDomainNameRequest generates a "aws/request.Request" representing the +// client's request for the CreateDomainName operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDomainName method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDomainNameRequest method. +// req, resp := client.CreateDomainNameRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) CreateDomainNameRequest(input *CreateDomainNameInput) (req *request.Request, output *DomainName) { op := &request.Operation{ Name: opCreateDomainName, @@ -150,7 +255,28 @@ func (c *APIGateway) CreateDomainName(input *CreateDomainNameInput) (*DomainName const opCreateModel = "CreateModel" -// CreateModelRequest generates a request for the CreateModel operation. +// CreateModelRequest generates a "aws/request.Request" representing the +// client's request for the CreateModel operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateModel method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateModelRequest method. +// req, resp := client.CreateModelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) CreateModelRequest(input *CreateModelInput) (req *request.Request, output *Model) { op := &request.Operation{ Name: opCreateModel, @@ -177,7 +303,28 @@ func (c *APIGateway) CreateModel(input *CreateModelInput) (*Model, error) { const opCreateResource = "CreateResource" -// CreateResourceRequest generates a request for the CreateResource operation. +// CreateResourceRequest generates a "aws/request.Request" representing the +// client's request for the CreateResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateResourceRequest method. +// req, resp := client.CreateResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) CreateResourceRequest(input *CreateResourceInput) (req *request.Request, output *Resource) { op := &request.Operation{ Name: opCreateResource, @@ -204,7 +351,28 @@ func (c *APIGateway) CreateResource(input *CreateResourceInput) (*Resource, erro const opCreateRestApi = "CreateRestApi" -// CreateRestApiRequest generates a request for the CreateRestApi operation. +// CreateRestApiRequest generates a "aws/request.Request" representing the +// client's request for the CreateRestApi operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateRestApi method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateRestApiRequest method. +// req, resp := client.CreateRestApiRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) CreateRestApiRequest(input *CreateRestApiInput) (req *request.Request, output *RestApi) { op := &request.Operation{ Name: opCreateRestApi, @@ -231,7 +399,28 @@ func (c *APIGateway) CreateRestApi(input *CreateRestApiInput) (*RestApi, error) const opCreateStage = "CreateStage" -// CreateStageRequest generates a request for the CreateStage operation. +// CreateStageRequest generates a "aws/request.Request" representing the +// client's request for the CreateStage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateStage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateStageRequest method. +// req, resp := client.CreateStageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) CreateStageRequest(input *CreateStageInput) (req *request.Request, output *Stage) { op := &request.Operation{ Name: opCreateStage, @@ -259,7 +448,28 @@ func (c *APIGateway) CreateStage(input *CreateStageInput) (*Stage, error) { const opDeleteApiKey = "DeleteApiKey" -// DeleteApiKeyRequest generates a request for the DeleteApiKey operation. +// DeleteApiKeyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteApiKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteApiKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteApiKeyRequest method. +// req, resp := client.DeleteApiKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) DeleteApiKeyRequest(input *DeleteApiKeyInput) (req *request.Request, output *DeleteApiKeyOutput) { op := &request.Operation{ Name: opDeleteApiKey, @@ -288,7 +498,28 @@ func (c *APIGateway) DeleteApiKey(input *DeleteApiKeyInput) (*DeleteApiKeyOutput const opDeleteAuthorizer = "DeleteAuthorizer" -// DeleteAuthorizerRequest generates a request for the DeleteAuthorizer operation. +// DeleteAuthorizerRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAuthorizer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteAuthorizer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteAuthorizerRequest method. +// req, resp := client.DeleteAuthorizerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) DeleteAuthorizerRequest(input *DeleteAuthorizerInput) (req *request.Request, output *DeleteAuthorizerOutput) { op := &request.Operation{ Name: opDeleteAuthorizer, @@ -317,7 +548,28 @@ func (c *APIGateway) DeleteAuthorizer(input *DeleteAuthorizerInput) (*DeleteAuth const opDeleteBasePathMapping = "DeleteBasePathMapping" -// DeleteBasePathMappingRequest generates a request for the DeleteBasePathMapping operation. +// DeleteBasePathMappingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBasePathMapping operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBasePathMapping method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBasePathMappingRequest method. +// req, resp := client.DeleteBasePathMappingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) DeleteBasePathMappingRequest(input *DeleteBasePathMappingInput) (req *request.Request, output *DeleteBasePathMappingOutput) { op := &request.Operation{ Name: opDeleteBasePathMapping, @@ -346,7 +598,28 @@ func (c *APIGateway) DeleteBasePathMapping(input *DeleteBasePathMappingInput) (* const opDeleteClientCertificate = "DeleteClientCertificate" -// DeleteClientCertificateRequest generates a request for the DeleteClientCertificate operation. +// DeleteClientCertificateRequest generates a "aws/request.Request" representing the +// client's request for the DeleteClientCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteClientCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteClientCertificateRequest method. +// req, resp := client.DeleteClientCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) DeleteClientCertificateRequest(input *DeleteClientCertificateInput) (req *request.Request, output *DeleteClientCertificateOutput) { op := &request.Operation{ Name: opDeleteClientCertificate, @@ -375,7 +648,28 @@ func (c *APIGateway) DeleteClientCertificate(input *DeleteClientCertificateInput const opDeleteDeployment = "DeleteDeployment" -// DeleteDeploymentRequest generates a request for the DeleteDeployment operation. +// DeleteDeploymentRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDeployment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDeployment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDeploymentRequest method. +// req, resp := client.DeleteDeploymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) DeleteDeploymentRequest(input *DeleteDeploymentInput) (req *request.Request, output *DeleteDeploymentOutput) { op := &request.Operation{ Name: opDeleteDeployment, @@ -405,7 +699,28 @@ func (c *APIGateway) DeleteDeployment(input *DeleteDeploymentInput) (*DeleteDepl const opDeleteDomainName = "DeleteDomainName" -// DeleteDomainNameRequest generates a request for the DeleteDomainName operation. +// DeleteDomainNameRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDomainName operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDomainName method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDomainNameRequest method. +// req, resp := client.DeleteDomainNameRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) DeleteDomainNameRequest(input *DeleteDomainNameInput) (req *request.Request, output *DeleteDomainNameOutput) { op := &request.Operation{ Name: opDeleteDomainName, @@ -434,7 +749,28 @@ func (c *APIGateway) DeleteDomainName(input *DeleteDomainNameInput) (*DeleteDoma const opDeleteIntegration = "DeleteIntegration" -// DeleteIntegrationRequest generates a request for the DeleteIntegration operation. +// DeleteIntegrationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteIntegration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteIntegration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteIntegrationRequest method. +// req, resp := client.DeleteIntegrationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) DeleteIntegrationRequest(input *DeleteIntegrationInput) (req *request.Request, output *DeleteIntegrationOutput) { op := &request.Operation{ Name: opDeleteIntegration, @@ -463,7 +799,28 @@ func (c *APIGateway) DeleteIntegration(input *DeleteIntegrationInput) (*DeleteIn const opDeleteIntegrationResponse = "DeleteIntegrationResponse" -// DeleteIntegrationResponseRequest generates a request for the DeleteIntegrationResponse operation. +// DeleteIntegrationResponseRequest generates a "aws/request.Request" representing the +// client's request for the DeleteIntegrationResponse operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteIntegrationResponse method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteIntegrationResponseRequest method. +// req, resp := client.DeleteIntegrationResponseRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) DeleteIntegrationResponseRequest(input *DeleteIntegrationResponseInput) (req *request.Request, output *DeleteIntegrationResponseOutput) { op := &request.Operation{ Name: opDeleteIntegrationResponse, @@ -492,7 +849,28 @@ func (c *APIGateway) DeleteIntegrationResponse(input *DeleteIntegrationResponseI const opDeleteMethod = "DeleteMethod" -// DeleteMethodRequest generates a request for the DeleteMethod operation. +// DeleteMethodRequest generates a "aws/request.Request" representing the +// client's request for the DeleteMethod operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteMethod method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteMethodRequest method. +// req, resp := client.DeleteMethodRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) DeleteMethodRequest(input *DeleteMethodInput) (req *request.Request, output *DeleteMethodOutput) { op := &request.Operation{ Name: opDeleteMethod, @@ -521,7 +899,28 @@ func (c *APIGateway) DeleteMethod(input *DeleteMethodInput) (*DeleteMethodOutput const opDeleteMethodResponse = "DeleteMethodResponse" -// DeleteMethodResponseRequest generates a request for the DeleteMethodResponse operation. +// DeleteMethodResponseRequest generates a "aws/request.Request" representing the +// client's request for the DeleteMethodResponse operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteMethodResponse method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteMethodResponseRequest method. +// req, resp := client.DeleteMethodResponseRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) DeleteMethodResponseRequest(input *DeleteMethodResponseInput) (req *request.Request, output *DeleteMethodResponseOutput) { op := &request.Operation{ Name: opDeleteMethodResponse, @@ -550,7 +949,28 @@ func (c *APIGateway) DeleteMethodResponse(input *DeleteMethodResponseInput) (*De const opDeleteModel = "DeleteModel" -// DeleteModelRequest generates a request for the DeleteModel operation. +// DeleteModelRequest generates a "aws/request.Request" representing the +// client's request for the DeleteModel operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteModel method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteModelRequest method. +// req, resp := client.DeleteModelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) DeleteModelRequest(input *DeleteModelInput) (req *request.Request, output *DeleteModelOutput) { op := &request.Operation{ Name: opDeleteModel, @@ -579,7 +999,28 @@ func (c *APIGateway) DeleteModel(input *DeleteModelInput) (*DeleteModelOutput, e const opDeleteResource = "DeleteResource" -// DeleteResourceRequest generates a request for the DeleteResource operation. +// DeleteResourceRequest generates a "aws/request.Request" representing the +// client's request for the DeleteResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteResourceRequest method. +// req, resp := client.DeleteResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) DeleteResourceRequest(input *DeleteResourceInput) (req *request.Request, output *DeleteResourceOutput) { op := &request.Operation{ Name: opDeleteResource, @@ -608,7 +1049,28 @@ func (c *APIGateway) DeleteResource(input *DeleteResourceInput) (*DeleteResource const opDeleteRestApi = "DeleteRestApi" -// DeleteRestApiRequest generates a request for the DeleteRestApi operation. +// DeleteRestApiRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRestApi operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteRestApi method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteRestApiRequest method. +// req, resp := client.DeleteRestApiRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) DeleteRestApiRequest(input *DeleteRestApiInput) (req *request.Request, output *DeleteRestApiOutput) { op := &request.Operation{ Name: opDeleteRestApi, @@ -637,7 +1099,28 @@ func (c *APIGateway) DeleteRestApi(input *DeleteRestApiInput) (*DeleteRestApiOut const opDeleteStage = "DeleteStage" -// DeleteStageRequest generates a request for the DeleteStage operation. +// DeleteStageRequest generates a "aws/request.Request" representing the +// client's request for the DeleteStage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteStage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteStageRequest method. +// req, resp := client.DeleteStageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) DeleteStageRequest(input *DeleteStageInput) (req *request.Request, output *DeleteStageOutput) { op := &request.Operation{ Name: opDeleteStage, @@ -666,7 +1149,28 @@ func (c *APIGateway) DeleteStage(input *DeleteStageInput) (*DeleteStageOutput, e const opFlushStageAuthorizersCache = "FlushStageAuthorizersCache" -// FlushStageAuthorizersCacheRequest generates a request for the FlushStageAuthorizersCache operation. +// FlushStageAuthorizersCacheRequest generates a "aws/request.Request" representing the +// client's request for the FlushStageAuthorizersCache operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the FlushStageAuthorizersCache method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the FlushStageAuthorizersCacheRequest method. +// req, resp := client.FlushStageAuthorizersCacheRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) FlushStageAuthorizersCacheRequest(input *FlushStageAuthorizersCacheInput) (req *request.Request, output *FlushStageAuthorizersCacheOutput) { op := &request.Operation{ Name: opFlushStageAuthorizersCache, @@ -695,7 +1199,28 @@ func (c *APIGateway) FlushStageAuthorizersCache(input *FlushStageAuthorizersCach const opFlushStageCache = "FlushStageCache" -// FlushStageCacheRequest generates a request for the FlushStageCache operation. +// FlushStageCacheRequest generates a "aws/request.Request" representing the +// client's request for the FlushStageCache operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the FlushStageCache method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the FlushStageCacheRequest method. +// req, resp := client.FlushStageCacheRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) FlushStageCacheRequest(input *FlushStageCacheInput) (req *request.Request, output *FlushStageCacheOutput) { op := &request.Operation{ Name: opFlushStageCache, @@ -724,7 +1249,28 @@ func (c *APIGateway) FlushStageCache(input *FlushStageCacheInput) (*FlushStageCa const opGenerateClientCertificate = "GenerateClientCertificate" -// GenerateClientCertificateRequest generates a request for the GenerateClientCertificate operation. +// GenerateClientCertificateRequest generates a "aws/request.Request" representing the +// client's request for the GenerateClientCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GenerateClientCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GenerateClientCertificateRequest method. +// req, resp := client.GenerateClientCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) GenerateClientCertificateRequest(input *GenerateClientCertificateInput) (req *request.Request, output *ClientCertificate) { op := &request.Operation{ Name: opGenerateClientCertificate, @@ -751,7 +1297,28 @@ func (c *APIGateway) GenerateClientCertificate(input *GenerateClientCertificateI const opGetAccount = "GetAccount" -// GetAccountRequest generates a request for the GetAccount operation. +// GetAccountRequest generates a "aws/request.Request" representing the +// client's request for the GetAccount operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetAccount method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetAccountRequest method. +// req, resp := client.GetAccountRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) GetAccountRequest(input *GetAccountInput) (req *request.Request, output *Account) { op := &request.Operation{ Name: opGetAccount, @@ -778,7 +1345,28 @@ func (c *APIGateway) GetAccount(input *GetAccountInput) (*Account, error) { const opGetApiKey = "GetApiKey" -// GetApiKeyRequest generates a request for the GetApiKey operation. +// GetApiKeyRequest generates a "aws/request.Request" representing the +// client's request for the GetApiKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetApiKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetApiKeyRequest method. +// req, resp := client.GetApiKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) GetApiKeyRequest(input *GetApiKeyInput) (req *request.Request, output *ApiKey) { op := &request.Operation{ Name: opGetApiKey, @@ -805,7 +1393,28 @@ func (c *APIGateway) GetApiKey(input *GetApiKeyInput) (*ApiKey, error) { const opGetApiKeys = "GetApiKeys" -// GetApiKeysRequest generates a request for the GetApiKeys operation. +// GetApiKeysRequest generates a "aws/request.Request" representing the +// client's request for the GetApiKeys operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetApiKeys method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetApiKeysRequest method. +// req, resp := client.GetApiKeysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) GetApiKeysRequest(input *GetApiKeysInput) (req *request.Request, output *GetApiKeysOutput) { op := &request.Operation{ Name: opGetApiKeys, @@ -836,6 +1445,23 @@ func (c *APIGateway) GetApiKeys(input *GetApiKeysInput) (*GetApiKeysOutput, erro return out, err } +// GetApiKeysPages iterates over the pages of a GetApiKeys operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetApiKeys method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetApiKeys operation. +// pageNum := 0 +// err := client.GetApiKeysPages(params, +// func(page *GetApiKeysOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *APIGateway) GetApiKeysPages(input *GetApiKeysInput, fn func(p *GetApiKeysOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.GetApiKeysRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -846,7 +1472,28 @@ func (c *APIGateway) GetApiKeysPages(input *GetApiKeysInput, fn func(p *GetApiKe const opGetAuthorizer = "GetAuthorizer" -// GetAuthorizerRequest generates a request for the GetAuthorizer operation. +// GetAuthorizerRequest generates a "aws/request.Request" representing the +// client's request for the GetAuthorizer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetAuthorizer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetAuthorizerRequest method. +// req, resp := client.GetAuthorizerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) GetAuthorizerRequest(input *GetAuthorizerInput) (req *request.Request, output *Authorizer) { op := &request.Operation{ Name: opGetAuthorizer, @@ -873,7 +1520,28 @@ func (c *APIGateway) GetAuthorizer(input *GetAuthorizerInput) (*Authorizer, erro const opGetAuthorizers = "GetAuthorizers" -// GetAuthorizersRequest generates a request for the GetAuthorizers operation. +// GetAuthorizersRequest generates a "aws/request.Request" representing the +// client's request for the GetAuthorizers operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetAuthorizers method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetAuthorizersRequest method. +// req, resp := client.GetAuthorizersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) GetAuthorizersRequest(input *GetAuthorizersInput) (req *request.Request, output *GetAuthorizersOutput) { op := &request.Operation{ Name: opGetAuthorizers, @@ -900,7 +1568,28 @@ func (c *APIGateway) GetAuthorizers(input *GetAuthorizersInput) (*GetAuthorizers const opGetBasePathMapping = "GetBasePathMapping" -// GetBasePathMappingRequest generates a request for the GetBasePathMapping operation. +// GetBasePathMappingRequest generates a "aws/request.Request" representing the +// client's request for the GetBasePathMapping operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBasePathMapping method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBasePathMappingRequest method. +// req, resp := client.GetBasePathMappingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) GetBasePathMappingRequest(input *GetBasePathMappingInput) (req *request.Request, output *BasePathMapping) { op := &request.Operation{ Name: opGetBasePathMapping, @@ -927,7 +1616,28 @@ func (c *APIGateway) GetBasePathMapping(input *GetBasePathMappingInput) (*BasePa const opGetBasePathMappings = "GetBasePathMappings" -// GetBasePathMappingsRequest generates a request for the GetBasePathMappings operation. +// GetBasePathMappingsRequest generates a "aws/request.Request" representing the +// client's request for the GetBasePathMappings operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBasePathMappings method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBasePathMappingsRequest method. +// req, resp := client.GetBasePathMappingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) GetBasePathMappingsRequest(input *GetBasePathMappingsInput) (req *request.Request, output *GetBasePathMappingsOutput) { op := &request.Operation{ Name: opGetBasePathMappings, @@ -958,6 +1668,23 @@ func (c *APIGateway) GetBasePathMappings(input *GetBasePathMappingsInput) (*GetB return out, err } +// GetBasePathMappingsPages iterates over the pages of a GetBasePathMappings operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetBasePathMappings method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetBasePathMappings operation. +// pageNum := 0 +// err := client.GetBasePathMappingsPages(params, +// func(page *GetBasePathMappingsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *APIGateway) GetBasePathMappingsPages(input *GetBasePathMappingsInput, fn func(p *GetBasePathMappingsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.GetBasePathMappingsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -968,7 +1695,28 @@ func (c *APIGateway) GetBasePathMappingsPages(input *GetBasePathMappingsInput, f const opGetClientCertificate = "GetClientCertificate" -// GetClientCertificateRequest generates a request for the GetClientCertificate operation. +// GetClientCertificateRequest generates a "aws/request.Request" representing the +// client's request for the GetClientCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetClientCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetClientCertificateRequest method. +// req, resp := client.GetClientCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) GetClientCertificateRequest(input *GetClientCertificateInput) (req *request.Request, output *ClientCertificate) { op := &request.Operation{ Name: opGetClientCertificate, @@ -995,7 +1743,28 @@ func (c *APIGateway) GetClientCertificate(input *GetClientCertificateInput) (*Cl const opGetClientCertificates = "GetClientCertificates" -// GetClientCertificatesRequest generates a request for the GetClientCertificates operation. +// GetClientCertificatesRequest generates a "aws/request.Request" representing the +// client's request for the GetClientCertificates operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetClientCertificates method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetClientCertificatesRequest method. +// req, resp := client.GetClientCertificatesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) GetClientCertificatesRequest(input *GetClientCertificatesInput) (req *request.Request, output *GetClientCertificatesOutput) { op := &request.Operation{ Name: opGetClientCertificates, @@ -1026,6 +1795,23 @@ func (c *APIGateway) GetClientCertificates(input *GetClientCertificatesInput) (* return out, err } +// GetClientCertificatesPages iterates over the pages of a GetClientCertificates operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetClientCertificates method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetClientCertificates operation. +// pageNum := 0 +// err := client.GetClientCertificatesPages(params, +// func(page *GetClientCertificatesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *APIGateway) GetClientCertificatesPages(input *GetClientCertificatesInput, fn func(p *GetClientCertificatesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.GetClientCertificatesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1036,7 +1822,28 @@ func (c *APIGateway) GetClientCertificatesPages(input *GetClientCertificatesInpu const opGetDeployment = "GetDeployment" -// GetDeploymentRequest generates a request for the GetDeployment operation. +// GetDeploymentRequest generates a "aws/request.Request" representing the +// client's request for the GetDeployment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDeployment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDeploymentRequest method. +// req, resp := client.GetDeploymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) GetDeploymentRequest(input *GetDeploymentInput) (req *request.Request, output *Deployment) { op := &request.Operation{ Name: opGetDeployment, @@ -1063,7 +1870,28 @@ func (c *APIGateway) GetDeployment(input *GetDeploymentInput) (*Deployment, erro const opGetDeployments = "GetDeployments" -// GetDeploymentsRequest generates a request for the GetDeployments operation. +// GetDeploymentsRequest generates a "aws/request.Request" representing the +// client's request for the GetDeployments operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDeployments method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDeploymentsRequest method. +// req, resp := client.GetDeploymentsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) GetDeploymentsRequest(input *GetDeploymentsInput) (req *request.Request, output *GetDeploymentsOutput) { op := &request.Operation{ Name: opGetDeployments, @@ -1094,6 +1922,23 @@ func (c *APIGateway) GetDeployments(input *GetDeploymentsInput) (*GetDeployments return out, err } +// GetDeploymentsPages iterates over the pages of a GetDeployments operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetDeployments method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetDeployments operation. +// pageNum := 0 +// err := client.GetDeploymentsPages(params, +// func(page *GetDeploymentsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *APIGateway) GetDeploymentsPages(input *GetDeploymentsInput, fn func(p *GetDeploymentsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.GetDeploymentsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1104,7 +1949,28 @@ func (c *APIGateway) GetDeploymentsPages(input *GetDeploymentsInput, fn func(p * const opGetDomainName = "GetDomainName" -// GetDomainNameRequest generates a request for the GetDomainName operation. +// GetDomainNameRequest generates a "aws/request.Request" representing the +// client's request for the GetDomainName operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDomainName method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDomainNameRequest method. +// req, resp := client.GetDomainNameRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) GetDomainNameRequest(input *GetDomainNameInput) (req *request.Request, output *DomainName) { op := &request.Operation{ Name: opGetDomainName, @@ -1132,7 +1998,28 @@ func (c *APIGateway) GetDomainName(input *GetDomainNameInput) (*DomainName, erro const opGetDomainNames = "GetDomainNames" -// GetDomainNamesRequest generates a request for the GetDomainNames operation. +// GetDomainNamesRequest generates a "aws/request.Request" representing the +// client's request for the GetDomainNames operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDomainNames method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDomainNamesRequest method. +// req, resp := client.GetDomainNamesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) GetDomainNamesRequest(input *GetDomainNamesInput) (req *request.Request, output *GetDomainNamesOutput) { op := &request.Operation{ Name: opGetDomainNames, @@ -1163,6 +2050,23 @@ func (c *APIGateway) GetDomainNames(input *GetDomainNamesInput) (*GetDomainNames return out, err } +// GetDomainNamesPages iterates over the pages of a GetDomainNames operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetDomainNames method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetDomainNames operation. +// pageNum := 0 +// err := client.GetDomainNamesPages(params, +// func(page *GetDomainNamesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *APIGateway) GetDomainNamesPages(input *GetDomainNamesInput, fn func(p *GetDomainNamesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.GetDomainNamesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1173,7 +2077,28 @@ func (c *APIGateway) GetDomainNamesPages(input *GetDomainNamesInput, fn func(p * const opGetExport = "GetExport" -// GetExportRequest generates a request for the GetExport operation. +// GetExportRequest generates a "aws/request.Request" representing the +// client's request for the GetExport operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetExport method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetExportRequest method. +// req, resp := client.GetExportRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) GetExportRequest(input *GetExportInput) (req *request.Request, output *GetExportOutput) { op := &request.Operation{ Name: opGetExport, @@ -1200,7 +2125,28 @@ func (c *APIGateway) GetExport(input *GetExportInput) (*GetExportOutput, error) const opGetIntegration = "GetIntegration" -// GetIntegrationRequest generates a request for the GetIntegration operation. +// GetIntegrationRequest generates a "aws/request.Request" representing the +// client's request for the GetIntegration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetIntegration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetIntegrationRequest method. +// req, resp := client.GetIntegrationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) GetIntegrationRequest(input *GetIntegrationInput) (req *request.Request, output *Integration) { op := &request.Operation{ Name: opGetIntegration, @@ -1227,7 +2173,28 @@ func (c *APIGateway) GetIntegration(input *GetIntegrationInput) (*Integration, e const opGetIntegrationResponse = "GetIntegrationResponse" -// GetIntegrationResponseRequest generates a request for the GetIntegrationResponse operation. +// GetIntegrationResponseRequest generates a "aws/request.Request" representing the +// client's request for the GetIntegrationResponse operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetIntegrationResponse method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetIntegrationResponseRequest method. +// req, resp := client.GetIntegrationResponseRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) GetIntegrationResponseRequest(input *GetIntegrationResponseInput) (req *request.Request, output *IntegrationResponse) { op := &request.Operation{ Name: opGetIntegrationResponse, @@ -1254,7 +2221,28 @@ func (c *APIGateway) GetIntegrationResponse(input *GetIntegrationResponseInput) const opGetMethod = "GetMethod" -// GetMethodRequest generates a request for the GetMethod operation. +// GetMethodRequest generates a "aws/request.Request" representing the +// client's request for the GetMethod operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetMethod method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetMethodRequest method. +// req, resp := client.GetMethodRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) GetMethodRequest(input *GetMethodInput) (req *request.Request, output *Method) { op := &request.Operation{ Name: opGetMethod, @@ -1281,7 +2269,28 @@ func (c *APIGateway) GetMethod(input *GetMethodInput) (*Method, error) { const opGetMethodResponse = "GetMethodResponse" -// GetMethodResponseRequest generates a request for the GetMethodResponse operation. +// GetMethodResponseRequest generates a "aws/request.Request" representing the +// client's request for the GetMethodResponse operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetMethodResponse method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetMethodResponseRequest method. +// req, resp := client.GetMethodResponseRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) GetMethodResponseRequest(input *GetMethodResponseInput) (req *request.Request, output *MethodResponse) { op := &request.Operation{ Name: opGetMethodResponse, @@ -1308,7 +2317,28 @@ func (c *APIGateway) GetMethodResponse(input *GetMethodResponseInput) (*MethodRe const opGetModel = "GetModel" -// GetModelRequest generates a request for the GetModel operation. +// GetModelRequest generates a "aws/request.Request" representing the +// client's request for the GetModel operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetModel method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetModelRequest method. +// req, resp := client.GetModelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) GetModelRequest(input *GetModelInput) (req *request.Request, output *Model) { op := &request.Operation{ Name: opGetModel, @@ -1335,7 +2365,28 @@ func (c *APIGateway) GetModel(input *GetModelInput) (*Model, error) { const opGetModelTemplate = "GetModelTemplate" -// GetModelTemplateRequest generates a request for the GetModelTemplate operation. +// GetModelTemplateRequest generates a "aws/request.Request" representing the +// client's request for the GetModelTemplate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetModelTemplate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetModelTemplateRequest method. +// req, resp := client.GetModelTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) GetModelTemplateRequest(input *GetModelTemplateInput) (req *request.Request, output *GetModelTemplateOutput) { op := &request.Operation{ Name: opGetModelTemplate, @@ -1363,7 +2414,28 @@ func (c *APIGateway) GetModelTemplate(input *GetModelTemplateInput) (*GetModelTe const opGetModels = "GetModels" -// GetModelsRequest generates a request for the GetModels operation. +// GetModelsRequest generates a "aws/request.Request" representing the +// client's request for the GetModels operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetModels method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetModelsRequest method. +// req, resp := client.GetModelsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) GetModelsRequest(input *GetModelsInput) (req *request.Request, output *GetModelsOutput) { op := &request.Operation{ Name: opGetModels, @@ -1394,6 +2466,23 @@ func (c *APIGateway) GetModels(input *GetModelsInput) (*GetModelsOutput, error) return out, err } +// GetModelsPages iterates over the pages of a GetModels operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetModels method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetModels operation. +// pageNum := 0 +// err := client.GetModelsPages(params, +// func(page *GetModelsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *APIGateway) GetModelsPages(input *GetModelsInput, fn func(p *GetModelsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.GetModelsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1404,7 +2493,28 @@ func (c *APIGateway) GetModelsPages(input *GetModelsInput, fn func(p *GetModelsO const opGetResource = "GetResource" -// GetResourceRequest generates a request for the GetResource operation. +// GetResourceRequest generates a "aws/request.Request" representing the +// client's request for the GetResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetResourceRequest method. +// req, resp := client.GetResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) GetResourceRequest(input *GetResourceInput) (req *request.Request, output *Resource) { op := &request.Operation{ Name: opGetResource, @@ -1431,7 +2541,28 @@ func (c *APIGateway) GetResource(input *GetResourceInput) (*Resource, error) { const opGetResources = "GetResources" -// GetResourcesRequest generates a request for the GetResources operation. +// GetResourcesRequest generates a "aws/request.Request" representing the +// client's request for the GetResources operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetResources method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetResourcesRequest method. +// req, resp := client.GetResourcesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) GetResourcesRequest(input *GetResourcesInput) (req *request.Request, output *GetResourcesOutput) { op := &request.Operation{ Name: opGetResources, @@ -1462,6 +2593,23 @@ func (c *APIGateway) GetResources(input *GetResourcesInput) (*GetResourcesOutput return out, err } +// GetResourcesPages iterates over the pages of a GetResources operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetResources method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetResources operation. +// pageNum := 0 +// err := client.GetResourcesPages(params, +// func(page *GetResourcesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *APIGateway) GetResourcesPages(input *GetResourcesInput, fn func(p *GetResourcesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.GetResourcesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1472,7 +2620,28 @@ func (c *APIGateway) GetResourcesPages(input *GetResourcesInput, fn func(p *GetR const opGetRestApi = "GetRestApi" -// GetRestApiRequest generates a request for the GetRestApi operation. +// GetRestApiRequest generates a "aws/request.Request" representing the +// client's request for the GetRestApi operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetRestApi method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetRestApiRequest method. +// req, resp := client.GetRestApiRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) GetRestApiRequest(input *GetRestApiInput) (req *request.Request, output *RestApi) { op := &request.Operation{ Name: opGetRestApi, @@ -1499,7 +2668,28 @@ func (c *APIGateway) GetRestApi(input *GetRestApiInput) (*RestApi, error) { const opGetRestApis = "GetRestApis" -// GetRestApisRequest generates a request for the GetRestApis operation. +// GetRestApisRequest generates a "aws/request.Request" representing the +// client's request for the GetRestApis operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetRestApis method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetRestApisRequest method. +// req, resp := client.GetRestApisRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) GetRestApisRequest(input *GetRestApisInput) (req *request.Request, output *GetRestApisOutput) { op := &request.Operation{ Name: opGetRestApis, @@ -1530,6 +2720,23 @@ func (c *APIGateway) GetRestApis(input *GetRestApisInput) (*GetRestApisOutput, e return out, err } +// GetRestApisPages iterates over the pages of a GetRestApis operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetRestApis method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetRestApis operation. +// pageNum := 0 +// err := client.GetRestApisPages(params, +// func(page *GetRestApisOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *APIGateway) GetRestApisPages(input *GetRestApisInput, fn func(p *GetRestApisOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.GetRestApisRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1540,7 +2747,28 @@ func (c *APIGateway) GetRestApisPages(input *GetRestApisInput, fn func(p *GetRes const opGetSdk = "GetSdk" -// GetSdkRequest generates a request for the GetSdk operation. +// GetSdkRequest generates a "aws/request.Request" representing the +// client's request for the GetSdk operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetSdk method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetSdkRequest method. +// req, resp := client.GetSdkRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) GetSdkRequest(input *GetSdkInput) (req *request.Request, output *GetSdkOutput) { op := &request.Operation{ Name: opGetSdk, @@ -1567,7 +2795,28 @@ func (c *APIGateway) GetSdk(input *GetSdkInput) (*GetSdkOutput, error) { const opGetStage = "GetStage" -// GetStageRequest generates a request for the GetStage operation. +// GetStageRequest generates a "aws/request.Request" representing the +// client's request for the GetStage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetStage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetStageRequest method. +// req, resp := client.GetStageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) GetStageRequest(input *GetStageInput) (req *request.Request, output *Stage) { op := &request.Operation{ Name: opGetStage, @@ -1594,7 +2843,28 @@ func (c *APIGateway) GetStage(input *GetStageInput) (*Stage, error) { const opGetStages = "GetStages" -// GetStagesRequest generates a request for the GetStages operation. +// GetStagesRequest generates a "aws/request.Request" representing the +// client's request for the GetStages operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetStages method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetStagesRequest method. +// req, resp := client.GetStagesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) GetStagesRequest(input *GetStagesInput) (req *request.Request, output *GetStagesOutput) { op := &request.Operation{ Name: opGetStages, @@ -1621,7 +2891,28 @@ func (c *APIGateway) GetStages(input *GetStagesInput) (*GetStagesOutput, error) const opImportRestApi = "ImportRestApi" -// ImportRestApiRequest generates a request for the ImportRestApi operation. +// ImportRestApiRequest generates a "aws/request.Request" representing the +// client's request for the ImportRestApi operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ImportRestApi method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ImportRestApiRequest method. +// req, resp := client.ImportRestApiRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) ImportRestApiRequest(input *ImportRestApiInput) (req *request.Request, output *RestApi) { op := &request.Operation{ Name: opImportRestApi, @@ -1649,7 +2940,28 @@ func (c *APIGateway) ImportRestApi(input *ImportRestApiInput) (*RestApi, error) const opPutIntegration = "PutIntegration" -// PutIntegrationRequest generates a request for the PutIntegration operation. +// PutIntegrationRequest generates a "aws/request.Request" representing the +// client's request for the PutIntegration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutIntegration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutIntegrationRequest method. +// req, resp := client.PutIntegrationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) PutIntegrationRequest(input *PutIntegrationInput) (req *request.Request, output *Integration) { op := &request.Operation{ Name: opPutIntegration, @@ -1676,7 +2988,28 @@ func (c *APIGateway) PutIntegration(input *PutIntegrationInput) (*Integration, e const opPutIntegrationResponse = "PutIntegrationResponse" -// PutIntegrationResponseRequest generates a request for the PutIntegrationResponse operation. +// PutIntegrationResponseRequest generates a "aws/request.Request" representing the +// client's request for the PutIntegrationResponse operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutIntegrationResponse method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutIntegrationResponseRequest method. +// req, resp := client.PutIntegrationResponseRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) PutIntegrationResponseRequest(input *PutIntegrationResponseInput) (req *request.Request, output *IntegrationResponse) { op := &request.Operation{ Name: opPutIntegrationResponse, @@ -1703,7 +3036,28 @@ func (c *APIGateway) PutIntegrationResponse(input *PutIntegrationResponseInput) const opPutMethod = "PutMethod" -// PutMethodRequest generates a request for the PutMethod operation. +// PutMethodRequest generates a "aws/request.Request" representing the +// client's request for the PutMethod operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutMethod method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutMethodRequest method. +// req, resp := client.PutMethodRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) PutMethodRequest(input *PutMethodInput) (req *request.Request, output *Method) { op := &request.Operation{ Name: opPutMethod, @@ -1730,7 +3084,28 @@ func (c *APIGateway) PutMethod(input *PutMethodInput) (*Method, error) { const opPutMethodResponse = "PutMethodResponse" -// PutMethodResponseRequest generates a request for the PutMethodResponse operation. +// PutMethodResponseRequest generates a "aws/request.Request" representing the +// client's request for the PutMethodResponse operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutMethodResponse method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutMethodResponseRequest method. +// req, resp := client.PutMethodResponseRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) PutMethodResponseRequest(input *PutMethodResponseInput) (req *request.Request, output *MethodResponse) { op := &request.Operation{ Name: opPutMethodResponse, @@ -1757,7 +3132,28 @@ func (c *APIGateway) PutMethodResponse(input *PutMethodResponseInput) (*MethodRe const opPutRestApi = "PutRestApi" -// PutRestApiRequest generates a request for the PutRestApi operation. +// PutRestApiRequest generates a "aws/request.Request" representing the +// client's request for the PutRestApi operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutRestApi method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutRestApiRequest method. +// req, resp := client.PutRestApiRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) PutRestApiRequest(input *PutRestApiInput) (req *request.Request, output *RestApi) { op := &request.Operation{ Name: opPutRestApi, @@ -1787,7 +3183,28 @@ func (c *APIGateway) PutRestApi(input *PutRestApiInput) (*RestApi, error) { const opTestInvokeAuthorizer = "TestInvokeAuthorizer" -// TestInvokeAuthorizerRequest generates a request for the TestInvokeAuthorizer operation. +// TestInvokeAuthorizerRequest generates a "aws/request.Request" representing the +// client's request for the TestInvokeAuthorizer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the TestInvokeAuthorizer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the TestInvokeAuthorizerRequest method. +// req, resp := client.TestInvokeAuthorizerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) TestInvokeAuthorizerRequest(input *TestInvokeAuthorizerInput) (req *request.Request, output *TestInvokeAuthorizerOutput) { op := &request.Operation{ Name: opTestInvokeAuthorizer, @@ -1815,7 +3232,28 @@ func (c *APIGateway) TestInvokeAuthorizer(input *TestInvokeAuthorizerInput) (*Te const opTestInvokeMethod = "TestInvokeMethod" -// TestInvokeMethodRequest generates a request for the TestInvokeMethod operation. +// TestInvokeMethodRequest generates a "aws/request.Request" representing the +// client's request for the TestInvokeMethod operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the TestInvokeMethod method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the TestInvokeMethodRequest method. +// req, resp := client.TestInvokeMethodRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) TestInvokeMethodRequest(input *TestInvokeMethodInput) (req *request.Request, output *TestInvokeMethodOutput) { op := &request.Operation{ Name: opTestInvokeMethod, @@ -1843,7 +3281,28 @@ func (c *APIGateway) TestInvokeMethod(input *TestInvokeMethodInput) (*TestInvoke const opUpdateAccount = "UpdateAccount" -// UpdateAccountRequest generates a request for the UpdateAccount operation. +// UpdateAccountRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAccount operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateAccount method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateAccountRequest method. +// req, resp := client.UpdateAccountRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) UpdateAccountRequest(input *UpdateAccountInput) (req *request.Request, output *Account) { op := &request.Operation{ Name: opUpdateAccount, @@ -1870,7 +3329,28 @@ func (c *APIGateway) UpdateAccount(input *UpdateAccountInput) (*Account, error) const opUpdateApiKey = "UpdateApiKey" -// UpdateApiKeyRequest generates a request for the UpdateApiKey operation. +// UpdateApiKeyRequest generates a "aws/request.Request" representing the +// client's request for the UpdateApiKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateApiKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateApiKeyRequest method. +// req, resp := client.UpdateApiKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) UpdateApiKeyRequest(input *UpdateApiKeyInput) (req *request.Request, output *ApiKey) { op := &request.Operation{ Name: opUpdateApiKey, @@ -1897,7 +3377,28 @@ func (c *APIGateway) UpdateApiKey(input *UpdateApiKeyInput) (*ApiKey, error) { const opUpdateAuthorizer = "UpdateAuthorizer" -// UpdateAuthorizerRequest generates a request for the UpdateAuthorizer operation. +// UpdateAuthorizerRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAuthorizer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateAuthorizer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateAuthorizerRequest method. +// req, resp := client.UpdateAuthorizerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) UpdateAuthorizerRequest(input *UpdateAuthorizerInput) (req *request.Request, output *Authorizer) { op := &request.Operation{ Name: opUpdateAuthorizer, @@ -1924,7 +3425,28 @@ func (c *APIGateway) UpdateAuthorizer(input *UpdateAuthorizerInput) (*Authorizer const opUpdateBasePathMapping = "UpdateBasePathMapping" -// UpdateBasePathMappingRequest generates a request for the UpdateBasePathMapping operation. +// UpdateBasePathMappingRequest generates a "aws/request.Request" representing the +// client's request for the UpdateBasePathMapping operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateBasePathMapping method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateBasePathMappingRequest method. +// req, resp := client.UpdateBasePathMappingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) UpdateBasePathMappingRequest(input *UpdateBasePathMappingInput) (req *request.Request, output *BasePathMapping) { op := &request.Operation{ Name: opUpdateBasePathMapping, @@ -1951,7 +3473,28 @@ func (c *APIGateway) UpdateBasePathMapping(input *UpdateBasePathMappingInput) (* const opUpdateClientCertificate = "UpdateClientCertificate" -// UpdateClientCertificateRequest generates a request for the UpdateClientCertificate operation. +// UpdateClientCertificateRequest generates a "aws/request.Request" representing the +// client's request for the UpdateClientCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateClientCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateClientCertificateRequest method. +// req, resp := client.UpdateClientCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) UpdateClientCertificateRequest(input *UpdateClientCertificateInput) (req *request.Request, output *ClientCertificate) { op := &request.Operation{ Name: opUpdateClientCertificate, @@ -1978,7 +3521,28 @@ func (c *APIGateway) UpdateClientCertificate(input *UpdateClientCertificateInput const opUpdateDeployment = "UpdateDeployment" -// UpdateDeploymentRequest generates a request for the UpdateDeployment operation. +// UpdateDeploymentRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDeployment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateDeployment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateDeploymentRequest method. +// req, resp := client.UpdateDeploymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) UpdateDeploymentRequest(input *UpdateDeploymentInput) (req *request.Request, output *Deployment) { op := &request.Operation{ Name: opUpdateDeployment, @@ -2005,7 +3569,28 @@ func (c *APIGateway) UpdateDeployment(input *UpdateDeploymentInput) (*Deployment const opUpdateDomainName = "UpdateDomainName" -// UpdateDomainNameRequest generates a request for the UpdateDomainName operation. +// UpdateDomainNameRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDomainName operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateDomainName method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateDomainNameRequest method. +// req, resp := client.UpdateDomainNameRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) UpdateDomainNameRequest(input *UpdateDomainNameInput) (req *request.Request, output *DomainName) { op := &request.Operation{ Name: opUpdateDomainName, @@ -2032,7 +3617,28 @@ func (c *APIGateway) UpdateDomainName(input *UpdateDomainNameInput) (*DomainName const opUpdateIntegration = "UpdateIntegration" -// UpdateIntegrationRequest generates a request for the UpdateIntegration operation. +// UpdateIntegrationRequest generates a "aws/request.Request" representing the +// client's request for the UpdateIntegration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateIntegration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateIntegrationRequest method. +// req, resp := client.UpdateIntegrationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) UpdateIntegrationRequest(input *UpdateIntegrationInput) (req *request.Request, output *Integration) { op := &request.Operation{ Name: opUpdateIntegration, @@ -2059,7 +3665,28 @@ func (c *APIGateway) UpdateIntegration(input *UpdateIntegrationInput) (*Integrat const opUpdateIntegrationResponse = "UpdateIntegrationResponse" -// UpdateIntegrationResponseRequest generates a request for the UpdateIntegrationResponse operation. +// UpdateIntegrationResponseRequest generates a "aws/request.Request" representing the +// client's request for the UpdateIntegrationResponse operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateIntegrationResponse method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateIntegrationResponseRequest method. +// req, resp := client.UpdateIntegrationResponseRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) UpdateIntegrationResponseRequest(input *UpdateIntegrationResponseInput) (req *request.Request, output *IntegrationResponse) { op := &request.Operation{ Name: opUpdateIntegrationResponse, @@ -2086,7 +3713,28 @@ func (c *APIGateway) UpdateIntegrationResponse(input *UpdateIntegrationResponseI const opUpdateMethod = "UpdateMethod" -// UpdateMethodRequest generates a request for the UpdateMethod operation. +// UpdateMethodRequest generates a "aws/request.Request" representing the +// client's request for the UpdateMethod operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateMethod method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateMethodRequest method. +// req, resp := client.UpdateMethodRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) UpdateMethodRequest(input *UpdateMethodInput) (req *request.Request, output *Method) { op := &request.Operation{ Name: opUpdateMethod, @@ -2113,7 +3761,28 @@ func (c *APIGateway) UpdateMethod(input *UpdateMethodInput) (*Method, error) { const opUpdateMethodResponse = "UpdateMethodResponse" -// UpdateMethodResponseRequest generates a request for the UpdateMethodResponse operation. +// UpdateMethodResponseRequest generates a "aws/request.Request" representing the +// client's request for the UpdateMethodResponse operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateMethodResponse method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateMethodResponseRequest method. +// req, resp := client.UpdateMethodResponseRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) UpdateMethodResponseRequest(input *UpdateMethodResponseInput) (req *request.Request, output *MethodResponse) { op := &request.Operation{ Name: opUpdateMethodResponse, @@ -2140,7 +3809,28 @@ func (c *APIGateway) UpdateMethodResponse(input *UpdateMethodResponseInput) (*Me const opUpdateModel = "UpdateModel" -// UpdateModelRequest generates a request for the UpdateModel operation. +// UpdateModelRequest generates a "aws/request.Request" representing the +// client's request for the UpdateModel operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateModel method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateModelRequest method. +// req, resp := client.UpdateModelRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) UpdateModelRequest(input *UpdateModelInput) (req *request.Request, output *Model) { op := &request.Operation{ Name: opUpdateModel, @@ -2167,7 +3857,28 @@ func (c *APIGateway) UpdateModel(input *UpdateModelInput) (*Model, error) { const opUpdateResource = "UpdateResource" -// UpdateResourceRequest generates a request for the UpdateResource operation. +// UpdateResourceRequest generates a "aws/request.Request" representing the +// client's request for the UpdateResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateResourceRequest method. +// req, resp := client.UpdateResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) UpdateResourceRequest(input *UpdateResourceInput) (req *request.Request, output *Resource) { op := &request.Operation{ Name: opUpdateResource, @@ -2194,7 +3905,28 @@ func (c *APIGateway) UpdateResource(input *UpdateResourceInput) (*Resource, erro const opUpdateRestApi = "UpdateRestApi" -// UpdateRestApiRequest generates a request for the UpdateRestApi operation. +// UpdateRestApiRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRestApi operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateRestApi method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateRestApiRequest method. +// req, resp := client.UpdateRestApiRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) UpdateRestApiRequest(input *UpdateRestApiInput) (req *request.Request, output *RestApi) { op := &request.Operation{ Name: opUpdateRestApi, @@ -2221,7 +3953,28 @@ func (c *APIGateway) UpdateRestApi(input *UpdateRestApiInput) (*RestApi, error) const opUpdateStage = "UpdateStage" -// UpdateStageRequest generates a request for the UpdateStage operation. +// UpdateStageRequest generates a "aws/request.Request" representing the +// client's request for the UpdateStage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateStage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateStageRequest method. +// req, resp := client.UpdateStageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *APIGateway) UpdateStageRequest(input *UpdateStageInput) (req *request.Request, output *Stage) { op := &request.Operation{ Name: opUpdateStage, @@ -5138,6 +6891,22 @@ type Integration struct { // Specifies the integration's responses. IntegrationResponses map[string]*IntegrationResponse `locationName:"integrationResponses" type:"map"` + // Specifies the pass-through behavior for incoming requests based on the Content-Type + // header in the request, and the available requestTemplates defined on the + // Integration. There are three valid values: WHEN_NO_MATCH, WHEN_NO_TEMPLATES, + // and NEVER. + // + // WHEN_NO_MATCH passes the request body for unmapped content types through + // to the Integration backend without transformation. + // + // NEVER rejects unmapped content types with an HTTP 415 'Unsupported Media + // Type' response. + // + // WHEN_NO_TEMPLATES will allow pass-through when the Integration has NO content + // types mapped to templates. However if there is at least one content type + // defined, unmapped content types will be rejected with the same 415 response. + PassthroughBehavior *string `locationName:"passthroughBehavior" type:"string"` + // Represents requests parameters that are sent with the backend request. Request // parameters are represented as a key/value map, with a destination as the // key and a source as the value. A source must match an existing method request @@ -5148,7 +6917,10 @@ type Integration struct { // unique parameter name. RequestParameters map[string]*string `locationName:"requestParameters" type:"map"` - // Specifies the integration's request templates. + // Represents a map of Velocity templates that are applied on the request payload + // based on the value of the Content-Type header sent by the client. The content + // type value is the key in this map, and the template (as a String) is the + // value. RequestTemplates map[string]*string `locationName:"requestTemplates" type:"map"` // Specifies the integration's type. The valid value is HTTP, AWS, or MOCK. @@ -5477,6 +7249,22 @@ type PutIntegrationInput struct { // or AWS, this field is required. IntegrationHttpMethod *string `locationName:"httpMethod" type:"string"` + // Specifies the pass-through behavior for incoming requests based on the Content-Type + // header in the request, and the available requestTemplates defined on the + // Integration. There are three valid values: WHEN_NO_MATCH, WHEN_NO_TEMPLATES, + // and NEVER. + // + // WHEN_NO_MATCH passes the request body for unmapped content types through + // to the Integration backend without transformation. + // + // NEVER rejects unmapped content types with an HTTP 415 'Unsupported Media + // Type' response. + // + // WHEN_NO_TEMPLATES will allow pass-through when the Integration has NO content + // types mapped to templates. However if there is at least one content type + // defined, unmapped content types will be rejected with the same 415 response. + PassthroughBehavior *string `locationName:"passthroughBehavior" type:"string"` + // Represents request parameters that are sent with the backend request. Request // parameters are represented as a key/value map, with a destination as the // key and a source as the value. A source must match an existing method request @@ -5487,9 +7275,10 @@ type PutIntegrationInput struct { // unique parameter name. RequestParameters map[string]*string `locationName:"requestParameters" type:"map"` - // Specifies the templates used to transform the method request body. Request - // templates are represented as a key/value map, with a content-type as the - // key and a template as the value. + // Represents a map of Velocity templates that are applied on the request payload + // based on the value of the Content-Type header sent by the client. The content + // type value is the key in this map, and the template (as a String) is the + // value. RequestTemplates map[string]*string `locationName:"requestTemplates" type:"map"` // Specifies a put integration request's resource ID. diff --git a/vendor/github.com/aws/aws-sdk-go/service/apigateway/service.go b/vendor/github.com/aws/aws-sdk-go/service/apigateway/service.go index 6e6c8cfb3..3372a2f74 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/apigateway/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/apigateway/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/restjson" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // Amazon API Gateway helps developers deliver robust, secure and scalable mobile @@ -62,7 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go index 87ee5ab6e..04caf266b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go @@ -15,7 +15,28 @@ import ( const opAttachInstances = "AttachInstances" -// AttachInstancesRequest generates a request for the AttachInstances operation. +// AttachInstancesRequest generates a "aws/request.Request" representing the +// client's request for the AttachInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AttachInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AttachInstancesRequest method. +// req, resp := client.AttachInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) AttachInstancesRequest(input *AttachInstancesInput) (req *request.Request, output *AttachInstancesOutput) { op := &request.Operation{ Name: opAttachInstances, @@ -53,7 +74,28 @@ func (c *AutoScaling) AttachInstances(input *AttachInstancesInput) (*AttachInsta const opAttachLoadBalancers = "AttachLoadBalancers" -// AttachLoadBalancersRequest generates a request for the AttachLoadBalancers operation. +// AttachLoadBalancersRequest generates a "aws/request.Request" representing the +// client's request for the AttachLoadBalancers operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AttachLoadBalancers method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AttachLoadBalancersRequest method. +// req, resp := client.AttachLoadBalancersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) AttachLoadBalancersRequest(input *AttachLoadBalancersInput) (req *request.Request, output *AttachLoadBalancersOutput) { op := &request.Operation{ Name: opAttachLoadBalancers, @@ -87,7 +129,28 @@ func (c *AutoScaling) AttachLoadBalancers(input *AttachLoadBalancersInput) (*Att const opCompleteLifecycleAction = "CompleteLifecycleAction" -// CompleteLifecycleActionRequest generates a request for the CompleteLifecycleAction operation. +// CompleteLifecycleActionRequest generates a "aws/request.Request" representing the +// client's request for the CompleteLifecycleAction operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CompleteLifecycleAction method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CompleteLifecycleActionRequest method. +// req, resp := client.CompleteLifecycleActionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) CompleteLifecycleActionRequest(input *CompleteLifecycleActionInput) (req *request.Request, output *CompleteLifecycleActionOutput) { op := &request.Operation{ Name: opCompleteLifecycleAction, @@ -130,7 +193,28 @@ func (c *AutoScaling) CompleteLifecycleAction(input *CompleteLifecycleActionInpu const opCreateAutoScalingGroup = "CreateAutoScalingGroup" -// CreateAutoScalingGroupRequest generates a request for the CreateAutoScalingGroup operation. +// CreateAutoScalingGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateAutoScalingGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateAutoScalingGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateAutoScalingGroupRequest method. +// req, resp := client.CreateAutoScalingGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) CreateAutoScalingGroupRequest(input *CreateAutoScalingGroupInput) (req *request.Request, output *CreateAutoScalingGroupOutput) { op := &request.Operation{ Name: opCreateAutoScalingGroup, @@ -166,7 +250,28 @@ func (c *AutoScaling) CreateAutoScalingGroup(input *CreateAutoScalingGroupInput) const opCreateLaunchConfiguration = "CreateLaunchConfiguration" -// CreateLaunchConfigurationRequest generates a request for the CreateLaunchConfiguration operation. +// CreateLaunchConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the CreateLaunchConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateLaunchConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateLaunchConfigurationRequest method. +// req, resp := client.CreateLaunchConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) CreateLaunchConfigurationRequest(input *CreateLaunchConfigurationInput) (req *request.Request, output *CreateLaunchConfigurationOutput) { op := &request.Operation{ Name: opCreateLaunchConfiguration, @@ -202,7 +307,28 @@ func (c *AutoScaling) CreateLaunchConfiguration(input *CreateLaunchConfiguration const opCreateOrUpdateTags = "CreateOrUpdateTags" -// CreateOrUpdateTagsRequest generates a request for the CreateOrUpdateTags operation. +// CreateOrUpdateTagsRequest generates a "aws/request.Request" representing the +// client's request for the CreateOrUpdateTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateOrUpdateTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateOrUpdateTagsRequest method. +// req, resp := client.CreateOrUpdateTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) CreateOrUpdateTagsRequest(input *CreateOrUpdateTagsInput) (req *request.Request, output *CreateOrUpdateTagsOutput) { op := &request.Operation{ Name: opCreateOrUpdateTags, @@ -237,7 +363,28 @@ func (c *AutoScaling) CreateOrUpdateTags(input *CreateOrUpdateTagsInput) (*Creat const opDeleteAutoScalingGroup = "DeleteAutoScalingGroup" -// DeleteAutoScalingGroupRequest generates a request for the DeleteAutoScalingGroup operation. +// DeleteAutoScalingGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAutoScalingGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteAutoScalingGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteAutoScalingGroupRequest method. +// req, resp := client.DeleteAutoScalingGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) DeleteAutoScalingGroupRequest(input *DeleteAutoScalingGroupInput) (req *request.Request, output *DeleteAutoScalingGroupOutput) { op := &request.Operation{ Name: opDeleteAutoScalingGroup, @@ -281,7 +428,28 @@ func (c *AutoScaling) DeleteAutoScalingGroup(input *DeleteAutoScalingGroupInput) const opDeleteLaunchConfiguration = "DeleteLaunchConfiguration" -// DeleteLaunchConfigurationRequest generates a request for the DeleteLaunchConfiguration operation. +// DeleteLaunchConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLaunchConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteLaunchConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteLaunchConfigurationRequest method. +// req, resp := client.DeleteLaunchConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) DeleteLaunchConfigurationRequest(input *DeleteLaunchConfigurationInput) (req *request.Request, output *DeleteLaunchConfigurationOutput) { op := &request.Operation{ Name: opDeleteLaunchConfiguration, @@ -314,7 +482,28 @@ func (c *AutoScaling) DeleteLaunchConfiguration(input *DeleteLaunchConfiguration const opDeleteLifecycleHook = "DeleteLifecycleHook" -// DeleteLifecycleHookRequest generates a request for the DeleteLifecycleHook operation. +// DeleteLifecycleHookRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLifecycleHook operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteLifecycleHook method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteLifecycleHookRequest method. +// req, resp := client.DeleteLifecycleHookRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) DeleteLifecycleHookRequest(input *DeleteLifecycleHookInput) (req *request.Request, output *DeleteLifecycleHookOutput) { op := &request.Operation{ Name: opDeleteLifecycleHook, @@ -344,7 +533,28 @@ func (c *AutoScaling) DeleteLifecycleHook(input *DeleteLifecycleHookInput) (*Del const opDeleteNotificationConfiguration = "DeleteNotificationConfiguration" -// DeleteNotificationConfigurationRequest generates a request for the DeleteNotificationConfiguration operation. +// DeleteNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteNotificationConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteNotificationConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteNotificationConfigurationRequest method. +// req, resp := client.DeleteNotificationConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) DeleteNotificationConfigurationRequest(input *DeleteNotificationConfigurationInput) (req *request.Request, output *DeleteNotificationConfigurationOutput) { op := &request.Operation{ Name: opDeleteNotificationConfiguration, @@ -373,7 +583,28 @@ func (c *AutoScaling) DeleteNotificationConfiguration(input *DeleteNotificationC const opDeletePolicy = "DeletePolicy" -// DeletePolicyRequest generates a request for the DeletePolicy operation. +// DeletePolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeletePolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeletePolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeletePolicyRequest method. +// req, resp := client.DeletePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) DeletePolicyRequest(input *DeletePolicyInput) (req *request.Request, output *DeletePolicyOutput) { op := &request.Operation{ Name: opDeletePolicy, @@ -405,7 +636,28 @@ func (c *AutoScaling) DeletePolicy(input *DeletePolicyInput) (*DeletePolicyOutpu const opDeleteScheduledAction = "DeleteScheduledAction" -// DeleteScheduledActionRequest generates a request for the DeleteScheduledAction operation. +// DeleteScheduledActionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteScheduledAction operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteScheduledAction method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteScheduledActionRequest method. +// req, resp := client.DeleteScheduledActionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) DeleteScheduledActionRequest(input *DeleteScheduledActionInput) (req *request.Request, output *DeleteScheduledActionOutput) { op := &request.Operation{ Name: opDeleteScheduledAction, @@ -434,7 +686,28 @@ func (c *AutoScaling) DeleteScheduledAction(input *DeleteScheduledActionInput) ( const opDeleteTags = "DeleteTags" -// DeleteTagsRequest generates a request for the DeleteTags operation. +// DeleteTagsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteTagsRequest method. +// req, resp := client.DeleteTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Request, output *DeleteTagsOutput) { op := &request.Operation{ Name: opDeleteTags, @@ -463,7 +736,28 @@ func (c *AutoScaling) DeleteTags(input *DeleteTagsInput) (*DeleteTagsOutput, err const opDescribeAccountLimits = "DescribeAccountLimits" -// DescribeAccountLimitsRequest generates a request for the DescribeAccountLimits operation. +// DescribeAccountLimitsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAccountLimits operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAccountLimits method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAccountLimitsRequest method. +// req, resp := client.DescribeAccountLimitsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) DescribeAccountLimitsRequest(input *DescribeAccountLimitsInput) (req *request.Request, output *DescribeAccountLimitsOutput) { op := &request.Operation{ Name: opDescribeAccountLimits, @@ -494,7 +788,28 @@ func (c *AutoScaling) DescribeAccountLimits(input *DescribeAccountLimitsInput) ( const opDescribeAdjustmentTypes = "DescribeAdjustmentTypes" -// DescribeAdjustmentTypesRequest generates a request for the DescribeAdjustmentTypes operation. +// DescribeAdjustmentTypesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAdjustmentTypes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAdjustmentTypes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAdjustmentTypesRequest method. +// req, resp := client.DescribeAdjustmentTypesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) DescribeAdjustmentTypesRequest(input *DescribeAdjustmentTypesInput) (req *request.Request, output *DescribeAdjustmentTypesOutput) { op := &request.Operation{ Name: opDescribeAdjustmentTypes, @@ -521,7 +836,28 @@ func (c *AutoScaling) DescribeAdjustmentTypes(input *DescribeAdjustmentTypesInpu const opDescribeAutoScalingGroups = "DescribeAutoScalingGroups" -// DescribeAutoScalingGroupsRequest generates a request for the DescribeAutoScalingGroups operation. +// DescribeAutoScalingGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAutoScalingGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAutoScalingGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAutoScalingGroupsRequest method. +// req, resp := client.DescribeAutoScalingGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) DescribeAutoScalingGroupsRequest(input *DescribeAutoScalingGroupsInput) (req *request.Request, output *DescribeAutoScalingGroupsOutput) { op := &request.Operation{ Name: opDescribeAutoScalingGroups, @@ -553,6 +889,23 @@ func (c *AutoScaling) DescribeAutoScalingGroups(input *DescribeAutoScalingGroups return out, err } +// DescribeAutoScalingGroupsPages iterates over the pages of a DescribeAutoScalingGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeAutoScalingGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeAutoScalingGroups operation. +// pageNum := 0 +// err := client.DescribeAutoScalingGroupsPages(params, +// func(page *DescribeAutoScalingGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *AutoScaling) DescribeAutoScalingGroupsPages(input *DescribeAutoScalingGroupsInput, fn func(p *DescribeAutoScalingGroupsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeAutoScalingGroupsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -563,7 +916,28 @@ func (c *AutoScaling) DescribeAutoScalingGroupsPages(input *DescribeAutoScalingG const opDescribeAutoScalingInstances = "DescribeAutoScalingInstances" -// DescribeAutoScalingInstancesRequest generates a request for the DescribeAutoScalingInstances operation. +// DescribeAutoScalingInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAutoScalingInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAutoScalingInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAutoScalingInstancesRequest method. +// req, resp := client.DescribeAutoScalingInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) DescribeAutoScalingInstancesRequest(input *DescribeAutoScalingInstancesInput) (req *request.Request, output *DescribeAutoScalingInstancesOutput) { op := &request.Operation{ Name: opDescribeAutoScalingInstances, @@ -595,6 +969,23 @@ func (c *AutoScaling) DescribeAutoScalingInstances(input *DescribeAutoScalingIns return out, err } +// DescribeAutoScalingInstancesPages iterates over the pages of a DescribeAutoScalingInstances operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeAutoScalingInstances method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeAutoScalingInstances operation. +// pageNum := 0 +// err := client.DescribeAutoScalingInstancesPages(params, +// func(page *DescribeAutoScalingInstancesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *AutoScaling) DescribeAutoScalingInstancesPages(input *DescribeAutoScalingInstancesInput, fn func(p *DescribeAutoScalingInstancesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeAutoScalingInstancesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -605,7 +996,28 @@ func (c *AutoScaling) DescribeAutoScalingInstancesPages(input *DescribeAutoScali const opDescribeAutoScalingNotificationTypes = "DescribeAutoScalingNotificationTypes" -// DescribeAutoScalingNotificationTypesRequest generates a request for the DescribeAutoScalingNotificationTypes operation. +// DescribeAutoScalingNotificationTypesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAutoScalingNotificationTypes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAutoScalingNotificationTypes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAutoScalingNotificationTypesRequest method. +// req, resp := client.DescribeAutoScalingNotificationTypesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) DescribeAutoScalingNotificationTypesRequest(input *DescribeAutoScalingNotificationTypesInput) (req *request.Request, output *DescribeAutoScalingNotificationTypesOutput) { op := &request.Operation{ Name: opDescribeAutoScalingNotificationTypes, @@ -632,7 +1044,28 @@ func (c *AutoScaling) DescribeAutoScalingNotificationTypes(input *DescribeAutoSc const opDescribeLaunchConfigurations = "DescribeLaunchConfigurations" -// DescribeLaunchConfigurationsRequest generates a request for the DescribeLaunchConfigurations operation. +// DescribeLaunchConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLaunchConfigurations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLaunchConfigurations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLaunchConfigurationsRequest method. +// req, resp := client.DescribeLaunchConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) DescribeLaunchConfigurationsRequest(input *DescribeLaunchConfigurationsInput) (req *request.Request, output *DescribeLaunchConfigurationsOutput) { op := &request.Operation{ Name: opDescribeLaunchConfigurations, @@ -664,6 +1097,23 @@ func (c *AutoScaling) DescribeLaunchConfigurations(input *DescribeLaunchConfigur return out, err } +// DescribeLaunchConfigurationsPages iterates over the pages of a DescribeLaunchConfigurations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeLaunchConfigurations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeLaunchConfigurations operation. +// pageNum := 0 +// err := client.DescribeLaunchConfigurationsPages(params, +// func(page *DescribeLaunchConfigurationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *AutoScaling) DescribeLaunchConfigurationsPages(input *DescribeLaunchConfigurationsInput, fn func(p *DescribeLaunchConfigurationsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeLaunchConfigurationsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -674,7 +1124,28 @@ func (c *AutoScaling) DescribeLaunchConfigurationsPages(input *DescribeLaunchCon const opDescribeLifecycleHookTypes = "DescribeLifecycleHookTypes" -// DescribeLifecycleHookTypesRequest generates a request for the DescribeLifecycleHookTypes operation. +// DescribeLifecycleHookTypesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLifecycleHookTypes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLifecycleHookTypes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLifecycleHookTypesRequest method. +// req, resp := client.DescribeLifecycleHookTypesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) DescribeLifecycleHookTypesRequest(input *DescribeLifecycleHookTypesInput) (req *request.Request, output *DescribeLifecycleHookTypesOutput) { op := &request.Operation{ Name: opDescribeLifecycleHookTypes, @@ -701,7 +1172,28 @@ func (c *AutoScaling) DescribeLifecycleHookTypes(input *DescribeLifecycleHookTyp const opDescribeLifecycleHooks = "DescribeLifecycleHooks" -// DescribeLifecycleHooksRequest generates a request for the DescribeLifecycleHooks operation. +// DescribeLifecycleHooksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLifecycleHooks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLifecycleHooks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLifecycleHooksRequest method. +// req, resp := client.DescribeLifecycleHooksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) DescribeLifecycleHooksRequest(input *DescribeLifecycleHooksInput) (req *request.Request, output *DescribeLifecycleHooksOutput) { op := &request.Operation{ Name: opDescribeLifecycleHooks, @@ -728,7 +1220,28 @@ func (c *AutoScaling) DescribeLifecycleHooks(input *DescribeLifecycleHooksInput) const opDescribeLoadBalancers = "DescribeLoadBalancers" -// DescribeLoadBalancersRequest generates a request for the DescribeLoadBalancers operation. +// DescribeLoadBalancersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLoadBalancers operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLoadBalancers method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLoadBalancersRequest method. +// req, resp := client.DescribeLoadBalancersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) DescribeLoadBalancersRequest(input *DescribeLoadBalancersInput) (req *request.Request, output *DescribeLoadBalancersOutput) { op := &request.Operation{ Name: opDescribeLoadBalancers, @@ -755,7 +1268,28 @@ func (c *AutoScaling) DescribeLoadBalancers(input *DescribeLoadBalancersInput) ( const opDescribeMetricCollectionTypes = "DescribeMetricCollectionTypes" -// DescribeMetricCollectionTypesRequest generates a request for the DescribeMetricCollectionTypes operation. +// DescribeMetricCollectionTypesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMetricCollectionTypes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeMetricCollectionTypes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeMetricCollectionTypesRequest method. +// req, resp := client.DescribeMetricCollectionTypesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) DescribeMetricCollectionTypesRequest(input *DescribeMetricCollectionTypesInput) (req *request.Request, output *DescribeMetricCollectionTypesOutput) { op := &request.Operation{ Name: opDescribeMetricCollectionTypes, @@ -785,7 +1319,28 @@ func (c *AutoScaling) DescribeMetricCollectionTypes(input *DescribeMetricCollect const opDescribeNotificationConfigurations = "DescribeNotificationConfigurations" -// DescribeNotificationConfigurationsRequest generates a request for the DescribeNotificationConfigurations operation. +// DescribeNotificationConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeNotificationConfigurations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeNotificationConfigurations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeNotificationConfigurationsRequest method. +// req, resp := client.DescribeNotificationConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) DescribeNotificationConfigurationsRequest(input *DescribeNotificationConfigurationsInput) (req *request.Request, output *DescribeNotificationConfigurationsOutput) { op := &request.Operation{ Name: opDescribeNotificationConfigurations, @@ -817,6 +1372,23 @@ func (c *AutoScaling) DescribeNotificationConfigurations(input *DescribeNotifica return out, err } +// DescribeNotificationConfigurationsPages iterates over the pages of a DescribeNotificationConfigurations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeNotificationConfigurations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeNotificationConfigurations operation. +// pageNum := 0 +// err := client.DescribeNotificationConfigurationsPages(params, +// func(page *DescribeNotificationConfigurationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *AutoScaling) DescribeNotificationConfigurationsPages(input *DescribeNotificationConfigurationsInput, fn func(p *DescribeNotificationConfigurationsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeNotificationConfigurationsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -827,7 +1399,28 @@ func (c *AutoScaling) DescribeNotificationConfigurationsPages(input *DescribeNot const opDescribePolicies = "DescribePolicies" -// DescribePoliciesRequest generates a request for the DescribePolicies operation. +// DescribePoliciesRequest generates a "aws/request.Request" representing the +// client's request for the DescribePolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribePolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribePoliciesRequest method. +// req, resp := client.DescribePoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) DescribePoliciesRequest(input *DescribePoliciesInput) (req *request.Request, output *DescribePoliciesOutput) { op := &request.Operation{ Name: opDescribePolicies, @@ -858,6 +1451,23 @@ func (c *AutoScaling) DescribePolicies(input *DescribePoliciesInput) (*DescribeP return out, err } +// DescribePoliciesPages iterates over the pages of a DescribePolicies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribePolicies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribePolicies operation. +// pageNum := 0 +// err := client.DescribePoliciesPages(params, +// func(page *DescribePoliciesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *AutoScaling) DescribePoliciesPages(input *DescribePoliciesInput, fn func(p *DescribePoliciesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribePoliciesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -868,7 +1478,28 @@ func (c *AutoScaling) DescribePoliciesPages(input *DescribePoliciesInput, fn fun const opDescribeScalingActivities = "DescribeScalingActivities" -// DescribeScalingActivitiesRequest generates a request for the DescribeScalingActivities operation. +// DescribeScalingActivitiesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeScalingActivities operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeScalingActivities method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeScalingActivitiesRequest method. +// req, resp := client.DescribeScalingActivitiesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) DescribeScalingActivitiesRequest(input *DescribeScalingActivitiesInput) (req *request.Request, output *DescribeScalingActivitiesOutput) { op := &request.Operation{ Name: opDescribeScalingActivities, @@ -902,6 +1533,23 @@ func (c *AutoScaling) DescribeScalingActivities(input *DescribeScalingActivities return out, err } +// DescribeScalingActivitiesPages iterates over the pages of a DescribeScalingActivities operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeScalingActivities method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeScalingActivities operation. +// pageNum := 0 +// err := client.DescribeScalingActivitiesPages(params, +// func(page *DescribeScalingActivitiesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *AutoScaling) DescribeScalingActivitiesPages(input *DescribeScalingActivitiesInput, fn func(p *DescribeScalingActivitiesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeScalingActivitiesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -912,7 +1560,28 @@ func (c *AutoScaling) DescribeScalingActivitiesPages(input *DescribeScalingActiv const opDescribeScalingProcessTypes = "DescribeScalingProcessTypes" -// DescribeScalingProcessTypesRequest generates a request for the DescribeScalingProcessTypes operation. +// DescribeScalingProcessTypesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeScalingProcessTypes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeScalingProcessTypes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeScalingProcessTypesRequest method. +// req, resp := client.DescribeScalingProcessTypesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) DescribeScalingProcessTypesRequest(input *DescribeScalingProcessTypesInput) (req *request.Request, output *DescribeScalingProcessTypesOutput) { op := &request.Operation{ Name: opDescribeScalingProcessTypes, @@ -939,7 +1608,28 @@ func (c *AutoScaling) DescribeScalingProcessTypes(input *DescribeScalingProcessT const opDescribeScheduledActions = "DescribeScheduledActions" -// DescribeScheduledActionsRequest generates a request for the DescribeScheduledActions operation. +// DescribeScheduledActionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeScheduledActions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeScheduledActions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeScheduledActionsRequest method. +// req, resp := client.DescribeScheduledActionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) DescribeScheduledActionsRequest(input *DescribeScheduledActionsInput) (req *request.Request, output *DescribeScheduledActionsOutput) { op := &request.Operation{ Name: opDescribeScheduledActions, @@ -971,6 +1661,23 @@ func (c *AutoScaling) DescribeScheduledActions(input *DescribeScheduledActionsIn return out, err } +// DescribeScheduledActionsPages iterates over the pages of a DescribeScheduledActions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeScheduledActions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeScheduledActions operation. +// pageNum := 0 +// err := client.DescribeScheduledActionsPages(params, +// func(page *DescribeScheduledActionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *AutoScaling) DescribeScheduledActionsPages(input *DescribeScheduledActionsInput, fn func(p *DescribeScheduledActionsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeScheduledActionsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -981,7 +1688,28 @@ func (c *AutoScaling) DescribeScheduledActionsPages(input *DescribeScheduledActi const opDescribeTags = "DescribeTags" -// DescribeTagsRequest generates a request for the DescribeTags operation. +// DescribeTagsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTagsRequest method. +// req, resp := client.DescribeTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Request, output *DescribeTagsOutput) { op := &request.Operation{ Name: opDescribeTags, @@ -1021,6 +1749,23 @@ func (c *AutoScaling) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutpu return out, err } +// DescribeTagsPages iterates over the pages of a DescribeTags operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeTags method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeTags operation. +// pageNum := 0 +// err := client.DescribeTagsPages(params, +// func(page *DescribeTagsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *AutoScaling) DescribeTagsPages(input *DescribeTagsInput, fn func(p *DescribeTagsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeTagsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1031,7 +1776,28 @@ func (c *AutoScaling) DescribeTagsPages(input *DescribeTagsInput, fn func(p *Des const opDescribeTerminationPolicyTypes = "DescribeTerminationPolicyTypes" -// DescribeTerminationPolicyTypesRequest generates a request for the DescribeTerminationPolicyTypes operation. +// DescribeTerminationPolicyTypesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTerminationPolicyTypes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTerminationPolicyTypes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTerminationPolicyTypesRequest method. +// req, resp := client.DescribeTerminationPolicyTypesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) DescribeTerminationPolicyTypesRequest(input *DescribeTerminationPolicyTypesInput) (req *request.Request, output *DescribeTerminationPolicyTypesOutput) { op := &request.Operation{ Name: opDescribeTerminationPolicyTypes, @@ -1058,7 +1824,28 @@ func (c *AutoScaling) DescribeTerminationPolicyTypes(input *DescribeTerminationP const opDetachInstances = "DetachInstances" -// DetachInstancesRequest generates a request for the DetachInstances operation. +// DetachInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DetachInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DetachInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DetachInstancesRequest method. +// req, resp := client.DetachInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) DetachInstancesRequest(input *DetachInstancesInput) (req *request.Request, output *DetachInstancesOutput) { op := &request.Operation{ Name: opDetachInstances, @@ -1095,7 +1882,28 @@ func (c *AutoScaling) DetachInstances(input *DetachInstancesInput) (*DetachInsta const opDetachLoadBalancers = "DetachLoadBalancers" -// DetachLoadBalancersRequest generates a request for the DetachLoadBalancers operation. +// DetachLoadBalancersRequest generates a "aws/request.Request" representing the +// client's request for the DetachLoadBalancers operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DetachLoadBalancers method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DetachLoadBalancersRequest method. +// req, resp := client.DetachLoadBalancersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) DetachLoadBalancersRequest(input *DetachLoadBalancersInput) (req *request.Request, output *DetachLoadBalancersOutput) { op := &request.Operation{ Name: opDetachLoadBalancers, @@ -1127,7 +1935,28 @@ func (c *AutoScaling) DetachLoadBalancers(input *DetachLoadBalancersInput) (*Det const opDisableMetricsCollection = "DisableMetricsCollection" -// DisableMetricsCollectionRequest generates a request for the DisableMetricsCollection operation. +// DisableMetricsCollectionRequest generates a "aws/request.Request" representing the +// client's request for the DisableMetricsCollection operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableMetricsCollection method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableMetricsCollectionRequest method. +// req, resp := client.DisableMetricsCollectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) DisableMetricsCollectionRequest(input *DisableMetricsCollectionInput) (req *request.Request, output *DisableMetricsCollectionOutput) { op := &request.Operation{ Name: opDisableMetricsCollection, @@ -1157,7 +1986,28 @@ func (c *AutoScaling) DisableMetricsCollection(input *DisableMetricsCollectionIn const opEnableMetricsCollection = "EnableMetricsCollection" -// EnableMetricsCollectionRequest generates a request for the EnableMetricsCollection operation. +// EnableMetricsCollectionRequest generates a "aws/request.Request" representing the +// client's request for the EnableMetricsCollection operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableMetricsCollection method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableMetricsCollectionRequest method. +// req, resp := client.EnableMetricsCollectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) EnableMetricsCollectionRequest(input *EnableMetricsCollectionInput) (req *request.Request, output *EnableMetricsCollectionOutput) { op := &request.Operation{ Name: opEnableMetricsCollection, @@ -1190,7 +2040,28 @@ func (c *AutoScaling) EnableMetricsCollection(input *EnableMetricsCollectionInpu const opEnterStandby = "EnterStandby" -// EnterStandbyRequest generates a request for the EnterStandby operation. +// EnterStandbyRequest generates a "aws/request.Request" representing the +// client's request for the EnterStandby operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnterStandby method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnterStandbyRequest method. +// req, resp := client.EnterStandbyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) EnterStandbyRequest(input *EnterStandbyInput) (req *request.Request, output *EnterStandbyOutput) { op := &request.Operation{ Name: opEnterStandby, @@ -1220,7 +2091,28 @@ func (c *AutoScaling) EnterStandby(input *EnterStandbyInput) (*EnterStandbyOutpu const opExecutePolicy = "ExecutePolicy" -// ExecutePolicyRequest generates a request for the ExecutePolicy operation. +// ExecutePolicyRequest generates a "aws/request.Request" representing the +// client's request for the ExecutePolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ExecutePolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ExecutePolicyRequest method. +// req, resp := client.ExecutePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) ExecutePolicyRequest(input *ExecutePolicyInput) (req *request.Request, output *ExecutePolicyOutput) { op := &request.Operation{ Name: opExecutePolicy, @@ -1249,7 +2141,28 @@ func (c *AutoScaling) ExecutePolicy(input *ExecutePolicyInput) (*ExecutePolicyOu const opExitStandby = "ExitStandby" -// ExitStandbyRequest generates a request for the ExitStandby operation. +// ExitStandbyRequest generates a "aws/request.Request" representing the +// client's request for the ExitStandby operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ExitStandby method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ExitStandbyRequest method. +// req, resp := client.ExitStandbyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) ExitStandbyRequest(input *ExitStandbyInput) (req *request.Request, output *ExitStandbyOutput) { op := &request.Operation{ Name: opExitStandby, @@ -1279,7 +2192,28 @@ func (c *AutoScaling) ExitStandby(input *ExitStandbyInput) (*ExitStandbyOutput, const opPutLifecycleHook = "PutLifecycleHook" -// PutLifecycleHookRequest generates a request for the PutLifecycleHook operation. +// PutLifecycleHookRequest generates a "aws/request.Request" representing the +// client's request for the PutLifecycleHook operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutLifecycleHook method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutLifecycleHookRequest method. +// req, resp := client.PutLifecycleHookRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) PutLifecycleHookRequest(input *PutLifecycleHookInput) (req *request.Request, output *PutLifecycleHookOutput) { op := &request.Operation{ Name: opPutLifecycleHook, @@ -1330,7 +2264,28 @@ func (c *AutoScaling) PutLifecycleHook(input *PutLifecycleHookInput) (*PutLifecy const opPutNotificationConfiguration = "PutNotificationConfiguration" -// PutNotificationConfigurationRequest generates a request for the PutNotificationConfiguration operation. +// PutNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutNotificationConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutNotificationConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutNotificationConfigurationRequest method. +// req, resp := client.PutNotificationConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) PutNotificationConfigurationRequest(input *PutNotificationConfigurationInput) (req *request.Request, output *PutNotificationConfigurationOutput) { op := &request.Operation{ Name: opPutNotificationConfiguration, @@ -1367,7 +2322,28 @@ func (c *AutoScaling) PutNotificationConfiguration(input *PutNotificationConfigu const opPutScalingPolicy = "PutScalingPolicy" -// PutScalingPolicyRequest generates a request for the PutScalingPolicy operation. +// PutScalingPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutScalingPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutScalingPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutScalingPolicyRequest method. +// req, resp := client.PutScalingPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) PutScalingPolicyRequest(input *PutScalingPolicyInput) (req *request.Request, output *PutScalingPolicyOutput) { op := &request.Operation{ Name: opPutScalingPolicy, @@ -1402,7 +2378,28 @@ func (c *AutoScaling) PutScalingPolicy(input *PutScalingPolicyInput) (*PutScalin const opPutScheduledUpdateGroupAction = "PutScheduledUpdateGroupAction" -// PutScheduledUpdateGroupActionRequest generates a request for the PutScheduledUpdateGroupAction operation. +// PutScheduledUpdateGroupActionRequest generates a "aws/request.Request" representing the +// client's request for the PutScheduledUpdateGroupAction operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutScheduledUpdateGroupAction method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutScheduledUpdateGroupActionRequest method. +// req, resp := client.PutScheduledUpdateGroupActionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) PutScheduledUpdateGroupActionRequest(input *PutScheduledUpdateGroupActionInput) (req *request.Request, output *PutScheduledUpdateGroupActionOutput) { op := &request.Operation{ Name: opPutScheduledUpdateGroupAction, @@ -1436,7 +2433,28 @@ func (c *AutoScaling) PutScheduledUpdateGroupAction(input *PutScheduledUpdateGro const opRecordLifecycleActionHeartbeat = "RecordLifecycleActionHeartbeat" -// RecordLifecycleActionHeartbeatRequest generates a request for the RecordLifecycleActionHeartbeat operation. +// RecordLifecycleActionHeartbeatRequest generates a "aws/request.Request" representing the +// client's request for the RecordLifecycleActionHeartbeat operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RecordLifecycleActionHeartbeat method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RecordLifecycleActionHeartbeatRequest method. +// req, resp := client.RecordLifecycleActionHeartbeatRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) RecordLifecycleActionHeartbeatRequest(input *RecordLifecycleActionHeartbeatInput) (req *request.Request, output *RecordLifecycleActionHeartbeatOutput) { op := &request.Operation{ Name: opRecordLifecycleActionHeartbeat, @@ -1480,7 +2498,28 @@ func (c *AutoScaling) RecordLifecycleActionHeartbeat(input *RecordLifecycleActio const opResumeProcesses = "ResumeProcesses" -// ResumeProcessesRequest generates a request for the ResumeProcesses operation. +// ResumeProcessesRequest generates a "aws/request.Request" representing the +// client's request for the ResumeProcesses operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ResumeProcesses method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ResumeProcessesRequest method. +// req, resp := client.ResumeProcessesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) ResumeProcessesRequest(input *ScalingProcessQuery) (req *request.Request, output *ResumeProcessesOutput) { op := &request.Operation{ Name: opResumeProcesses, @@ -1514,7 +2553,28 @@ func (c *AutoScaling) ResumeProcesses(input *ScalingProcessQuery) (*ResumeProces const opSetDesiredCapacity = "SetDesiredCapacity" -// SetDesiredCapacityRequest generates a request for the SetDesiredCapacity operation. +// SetDesiredCapacityRequest generates a "aws/request.Request" representing the +// client's request for the SetDesiredCapacity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetDesiredCapacity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetDesiredCapacityRequest method. +// req, resp := client.SetDesiredCapacityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) SetDesiredCapacityRequest(input *SetDesiredCapacityInput) (req *request.Request, output *SetDesiredCapacityOutput) { op := &request.Operation{ Name: opSetDesiredCapacity, @@ -1546,7 +2606,28 @@ func (c *AutoScaling) SetDesiredCapacity(input *SetDesiredCapacityInput) (*SetDe const opSetInstanceHealth = "SetInstanceHealth" -// SetInstanceHealthRequest generates a request for the SetInstanceHealth operation. +// SetInstanceHealthRequest generates a "aws/request.Request" representing the +// client's request for the SetInstanceHealth operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetInstanceHealth method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetInstanceHealthRequest method. +// req, resp := client.SetInstanceHealthRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) SetInstanceHealthRequest(input *SetInstanceHealthInput) (req *request.Request, output *SetInstanceHealthOutput) { op := &request.Operation{ Name: opSetInstanceHealth, @@ -1578,7 +2659,28 @@ func (c *AutoScaling) SetInstanceHealth(input *SetInstanceHealthInput) (*SetInst const opSetInstanceProtection = "SetInstanceProtection" -// SetInstanceProtectionRequest generates a request for the SetInstanceProtection operation. +// SetInstanceProtectionRequest generates a "aws/request.Request" representing the +// client's request for the SetInstanceProtection operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetInstanceProtection method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetInstanceProtectionRequest method. +// req, resp := client.SetInstanceProtectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) SetInstanceProtectionRequest(input *SetInstanceProtectionInput) (req *request.Request, output *SetInstanceProtectionOutput) { op := &request.Operation{ Name: opSetInstanceProtection, @@ -1608,7 +2710,28 @@ func (c *AutoScaling) SetInstanceProtection(input *SetInstanceProtectionInput) ( const opSuspendProcesses = "SuspendProcesses" -// SuspendProcessesRequest generates a request for the SuspendProcesses operation. +// SuspendProcessesRequest generates a "aws/request.Request" representing the +// client's request for the SuspendProcesses operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SuspendProcesses method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SuspendProcessesRequest method. +// req, resp := client.SuspendProcessesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) SuspendProcessesRequest(input *ScalingProcessQuery) (req *request.Request, output *SuspendProcessesOutput) { op := &request.Operation{ Name: opSuspendProcesses, @@ -1647,7 +2770,28 @@ func (c *AutoScaling) SuspendProcesses(input *ScalingProcessQuery) (*SuspendProc const opTerminateInstanceInAutoScalingGroup = "TerminateInstanceInAutoScalingGroup" -// TerminateInstanceInAutoScalingGroupRequest generates a request for the TerminateInstanceInAutoScalingGroup operation. +// TerminateInstanceInAutoScalingGroupRequest generates a "aws/request.Request" representing the +// client's request for the TerminateInstanceInAutoScalingGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the TerminateInstanceInAutoScalingGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the TerminateInstanceInAutoScalingGroupRequest method. +// req, resp := client.TerminateInstanceInAutoScalingGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) TerminateInstanceInAutoScalingGroupRequest(input *TerminateInstanceInAutoScalingGroupInput) (req *request.Request, output *TerminateInstanceInAutoScalingGroupOutput) { op := &request.Operation{ Name: opTerminateInstanceInAutoScalingGroup, @@ -1678,7 +2822,28 @@ func (c *AutoScaling) TerminateInstanceInAutoScalingGroup(input *TerminateInstan const opUpdateAutoScalingGroup = "UpdateAutoScalingGroup" -// UpdateAutoScalingGroupRequest generates a request for the UpdateAutoScalingGroup operation. +// UpdateAutoScalingGroupRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAutoScalingGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateAutoScalingGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateAutoScalingGroupRequest method. +// req, resp := client.UpdateAutoScalingGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *AutoScaling) UpdateAutoScalingGroupRequest(input *UpdateAutoScalingGroupInput) (req *request.Request, output *UpdateAutoScalingGroupOutput) { op := &request.Operation{ Name: opUpdateAutoScalingGroup, diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go index bb58f66ed..e529e4de8 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/query" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // Auto Scaling is designed to automatically launch or terminate EC2 instances @@ -60,7 +60,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(query.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudformation/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/api.go index a40c1f263..5ec08091c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudformation/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/api.go @@ -14,7 +14,28 @@ import ( const opCancelUpdateStack = "CancelUpdateStack" -// CancelUpdateStackRequest generates a request for the CancelUpdateStack operation. +// CancelUpdateStackRequest generates a "aws/request.Request" representing the +// client's request for the CancelUpdateStack operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CancelUpdateStack method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CancelUpdateStackRequest method. +// req, resp := client.CancelUpdateStackRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFormation) CancelUpdateStackRequest(input *CancelUpdateStackInput) (req *request.Request, output *CancelUpdateStackOutput) { op := &request.Operation{ Name: opCancelUpdateStack, @@ -37,7 +58,7 @@ func (c *CloudFormation) CancelUpdateStackRequest(input *CancelUpdateStackInput) // Cancels an update on the specified stack. If the call completes successfully, // the stack rolls back the update and reverts to the previous stack configuration. // -// You can cancel only stacks that are in the UPDATE_IN_PROGRESS state. +// You can cancel only stacks that are in the UPDATE_IN_PROGRESS state. func (c *CloudFormation) CancelUpdateStack(input *CancelUpdateStackInput) (*CancelUpdateStackOutput, error) { req, out := c.CancelUpdateStackRequest(input) err := req.Send() @@ -46,7 +67,28 @@ func (c *CloudFormation) CancelUpdateStack(input *CancelUpdateStackInput) (*Canc const opContinueUpdateRollback = "ContinueUpdateRollback" -// ContinueUpdateRollbackRequest generates a request for the ContinueUpdateRollback operation. +// ContinueUpdateRollbackRequest generates a "aws/request.Request" representing the +// client's request for the ContinueUpdateRollback operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ContinueUpdateRollback method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ContinueUpdateRollbackRequest method. +// req, resp := client.ContinueUpdateRollbackRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFormation) ContinueUpdateRollbackRequest(input *ContinueUpdateRollbackInput) (req *request.Request, output *ContinueUpdateRollbackOutput) { op := &request.Operation{ Name: opContinueUpdateRollback, @@ -85,7 +127,28 @@ func (c *CloudFormation) ContinueUpdateRollback(input *ContinueUpdateRollbackInp const opCreateChangeSet = "CreateChangeSet" -// CreateChangeSetRequest generates a request for the CreateChangeSet operation. +// CreateChangeSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateChangeSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateChangeSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateChangeSetRequest method. +// req, resp := client.CreateChangeSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFormation) CreateChangeSetRequest(input *CreateChangeSetInput) (req *request.Request, output *CreateChangeSetOutput) { op := &request.Operation{ Name: opCreateChangeSet, @@ -125,7 +188,28 @@ func (c *CloudFormation) CreateChangeSet(input *CreateChangeSetInput) (*CreateCh const opCreateStack = "CreateStack" -// CreateStackRequest generates a request for the CreateStack operation. +// CreateStackRequest generates a "aws/request.Request" representing the +// client's request for the CreateStack operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateStack method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateStackRequest method. +// req, resp := client.CreateStackRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFormation) CreateStackRequest(input *CreateStackInput) (req *request.Request, output *CreateStackOutput) { op := &request.Operation{ Name: opCreateStack, @@ -154,7 +238,28 @@ func (c *CloudFormation) CreateStack(input *CreateStackInput) (*CreateStackOutpu const opDeleteChangeSet = "DeleteChangeSet" -// DeleteChangeSetRequest generates a request for the DeleteChangeSet operation. +// DeleteChangeSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteChangeSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteChangeSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteChangeSetRequest method. +// req, resp := client.DeleteChangeSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFormation) DeleteChangeSetRequest(input *DeleteChangeSetInput) (req *request.Request, output *DeleteChangeSetOutput) { op := &request.Operation{ Name: opDeleteChangeSet, @@ -185,7 +290,28 @@ func (c *CloudFormation) DeleteChangeSet(input *DeleteChangeSetInput) (*DeleteCh const opDeleteStack = "DeleteStack" -// DeleteStackRequest generates a request for the DeleteStack operation. +// DeleteStackRequest generates a "aws/request.Request" representing the +// client's request for the DeleteStack operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteStack method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteStackRequest method. +// req, resp := client.DeleteStackRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFormation) DeleteStackRequest(input *DeleteStackInput) (req *request.Request, output *DeleteStackOutput) { op := &request.Operation{ Name: opDeleteStack, @@ -216,7 +342,28 @@ func (c *CloudFormation) DeleteStack(input *DeleteStackInput) (*DeleteStackOutpu const opDescribeAccountLimits = "DescribeAccountLimits" -// DescribeAccountLimitsRequest generates a request for the DescribeAccountLimits operation. +// DescribeAccountLimitsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAccountLimits operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAccountLimits method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAccountLimitsRequest method. +// req, resp := client.DescribeAccountLimitsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFormation) DescribeAccountLimitsRequest(input *DescribeAccountLimitsInput) (req *request.Request, output *DescribeAccountLimitsOutput) { op := &request.Operation{ Name: opDescribeAccountLimits, @@ -244,7 +391,28 @@ func (c *CloudFormation) DescribeAccountLimits(input *DescribeAccountLimitsInput const opDescribeChangeSet = "DescribeChangeSet" -// DescribeChangeSetRequest generates a request for the DescribeChangeSet operation. +// DescribeChangeSetRequest generates a "aws/request.Request" representing the +// client's request for the DescribeChangeSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeChangeSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeChangeSetRequest method. +// req, resp := client.DescribeChangeSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFormation) DescribeChangeSetRequest(input *DescribeChangeSetInput) (req *request.Request, output *DescribeChangeSetOutput) { op := &request.Operation{ Name: opDescribeChangeSet, @@ -274,7 +442,28 @@ func (c *CloudFormation) DescribeChangeSet(input *DescribeChangeSetInput) (*Desc const opDescribeStackEvents = "DescribeStackEvents" -// DescribeStackEventsRequest generates a request for the DescribeStackEvents operation. +// DescribeStackEventsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeStackEvents operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeStackEvents method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeStackEventsRequest method. +// req, resp := client.DescribeStackEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFormation) DescribeStackEventsRequest(input *DescribeStackEventsInput) (req *request.Request, output *DescribeStackEventsOutput) { op := &request.Operation{ Name: opDescribeStackEvents, @@ -298,18 +487,35 @@ func (c *CloudFormation) DescribeStackEventsRequest(input *DescribeStackEventsIn return } -// Returns all stack related events for a specified stack. For more information -// about a stack's event history, go to Stacks (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/concept-stack.html) +// Returns all stack related events for a specified stack in reverse chronological +// order. For more information about a stack's event history, go to Stacks (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/concept-stack.html) // in the AWS CloudFormation User Guide. // -// You can list events for stacks that have failed to create or have been deleted -// by specifying the unique stack identifier (stack ID). +// You can list events for stacks that have failed to create or have been +// deleted by specifying the unique stack identifier (stack ID). func (c *CloudFormation) DescribeStackEvents(input *DescribeStackEventsInput) (*DescribeStackEventsOutput, error) { req, out := c.DescribeStackEventsRequest(input) err := req.Send() return out, err } +// DescribeStackEventsPages iterates over the pages of a DescribeStackEvents operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeStackEvents method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeStackEvents operation. +// pageNum := 0 +// err := client.DescribeStackEventsPages(params, +// func(page *DescribeStackEventsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *CloudFormation) DescribeStackEventsPages(input *DescribeStackEventsInput, fn func(p *DescribeStackEventsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeStackEventsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -320,7 +526,28 @@ func (c *CloudFormation) DescribeStackEventsPages(input *DescribeStackEventsInpu const opDescribeStackResource = "DescribeStackResource" -// DescribeStackResourceRequest generates a request for the DescribeStackResource operation. +// DescribeStackResourceRequest generates a "aws/request.Request" representing the +// client's request for the DescribeStackResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeStackResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeStackResourceRequest method. +// req, resp := client.DescribeStackResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFormation) DescribeStackResourceRequest(input *DescribeStackResourceInput) (req *request.Request, output *DescribeStackResourceOutput) { op := &request.Operation{ Name: opDescribeStackResource, @@ -350,7 +577,28 @@ func (c *CloudFormation) DescribeStackResource(input *DescribeStackResourceInput const opDescribeStackResources = "DescribeStackResources" -// DescribeStackResourcesRequest generates a request for the DescribeStackResources operation. +// DescribeStackResourcesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeStackResources operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeStackResources method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeStackResourcesRequest method. +// req, resp := client.DescribeStackResourcesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFormation) DescribeStackResourcesRequest(input *DescribeStackResourcesInput) (req *request.Request, output *DescribeStackResourcesOutput) { op := &request.Operation{ Name: opDescribeStackResources, @@ -373,17 +621,18 @@ func (c *CloudFormation) DescribeStackResourcesRequest(input *DescribeStackResou // returned. If PhysicalResourceId is specified, the associated resources of // the stack that the resource belongs to are returned. // -// Only the first 100 resources will be returned. If your stack has more resources -// than this, you should use ListStackResources instead. For deleted stacks, -// DescribeStackResources returns resource information for up to 90 days after -// the stack has been deleted. +// Only the first 100 resources will be returned. If your stack has more resources +// than this, you should use ListStackResources instead. +// +// For deleted stacks, DescribeStackResources returns resource information +// for up to 90 days after the stack has been deleted. // // You must specify either StackName or PhysicalResourceId, but not both. In // addition, you can specify LogicalResourceId to filter the returned result. // For more information about resources, the LogicalResourceId and PhysicalResourceId, // go to the AWS CloudFormation User Guide (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/). // -// A ValidationError is returned if you specify both StackName and PhysicalResourceId +// A ValidationError is returned if you specify both StackName and PhysicalResourceId // in the same request. func (c *CloudFormation) DescribeStackResources(input *DescribeStackResourcesInput) (*DescribeStackResourcesOutput, error) { req, out := c.DescribeStackResourcesRequest(input) @@ -393,7 +642,28 @@ func (c *CloudFormation) DescribeStackResources(input *DescribeStackResourcesInp const opDescribeStacks = "DescribeStacks" -// DescribeStacksRequest generates a request for the DescribeStacks operation. +// DescribeStacksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeStacks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeStacks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeStacksRequest method. +// req, resp := client.DescribeStacksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFormation) DescribeStacksRequest(input *DescribeStacksInput) (req *request.Request, output *DescribeStacksOutput) { op := &request.Operation{ Name: opDescribeStacks, @@ -425,6 +695,23 @@ func (c *CloudFormation) DescribeStacks(input *DescribeStacksInput) (*DescribeSt return out, err } +// DescribeStacksPages iterates over the pages of a DescribeStacks operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeStacks method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeStacks operation. +// pageNum := 0 +// err := client.DescribeStacksPages(params, +// func(page *DescribeStacksOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *CloudFormation) DescribeStacksPages(input *DescribeStacksInput, fn func(p *DescribeStacksOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeStacksRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -435,7 +722,28 @@ func (c *CloudFormation) DescribeStacksPages(input *DescribeStacksInput, fn func const opEstimateTemplateCost = "EstimateTemplateCost" -// EstimateTemplateCostRequest generates a request for the EstimateTemplateCost operation. +// EstimateTemplateCostRequest generates a "aws/request.Request" representing the +// client's request for the EstimateTemplateCost operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EstimateTemplateCost method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EstimateTemplateCostRequest method. +// req, resp := client.EstimateTemplateCostRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFormation) EstimateTemplateCostRequest(input *EstimateTemplateCostInput) (req *request.Request, output *EstimateTemplateCostOutput) { op := &request.Operation{ Name: opEstimateTemplateCost, @@ -464,7 +772,28 @@ func (c *CloudFormation) EstimateTemplateCost(input *EstimateTemplateCostInput) const opExecuteChangeSet = "ExecuteChangeSet" -// ExecuteChangeSetRequest generates a request for the ExecuteChangeSet operation. +// ExecuteChangeSetRequest generates a "aws/request.Request" representing the +// client's request for the ExecuteChangeSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ExecuteChangeSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ExecuteChangeSetRequest method. +// req, resp := client.ExecuteChangeSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFormation) ExecuteChangeSetRequest(input *ExecuteChangeSetInput) (req *request.Request, output *ExecuteChangeSetOutput) { op := &request.Operation{ Name: opExecuteChangeSet, @@ -502,7 +831,28 @@ func (c *CloudFormation) ExecuteChangeSet(input *ExecuteChangeSetInput) (*Execut const opGetStackPolicy = "GetStackPolicy" -// GetStackPolicyRequest generates a request for the GetStackPolicy operation. +// GetStackPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetStackPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetStackPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetStackPolicyRequest method. +// req, resp := client.GetStackPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFormation) GetStackPolicyRequest(input *GetStackPolicyInput) (req *request.Request, output *GetStackPolicyOutput) { op := &request.Operation{ Name: opGetStackPolicy, @@ -530,7 +880,28 @@ func (c *CloudFormation) GetStackPolicy(input *GetStackPolicyInput) (*GetStackPo const opGetTemplate = "GetTemplate" -// GetTemplateRequest generates a request for the GetTemplate operation. +// GetTemplateRequest generates a "aws/request.Request" representing the +// client's request for the GetTemplate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetTemplate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetTemplateRequest method. +// req, resp := client.GetTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFormation) GetTemplateRequest(input *GetTemplateInput) (req *request.Request, output *GetTemplateOutput) { op := &request.Operation{ Name: opGetTemplate, @@ -554,7 +925,7 @@ func (c *CloudFormation) GetTemplateRequest(input *GetTemplateInput) (req *reque // For deleted stacks, GetTemplate returns the template for up to 90 days after // the stack has been deleted. // -// If the template does not exist, a ValidationError is returned. +// If the template does not exist, a ValidationError is returned. func (c *CloudFormation) GetTemplate(input *GetTemplateInput) (*GetTemplateOutput, error) { req, out := c.GetTemplateRequest(input) err := req.Send() @@ -563,7 +934,28 @@ func (c *CloudFormation) GetTemplate(input *GetTemplateInput) (*GetTemplateOutpu const opGetTemplateSummary = "GetTemplateSummary" -// GetTemplateSummaryRequest generates a request for the GetTemplateSummary operation. +// GetTemplateSummaryRequest generates a "aws/request.Request" representing the +// client's request for the GetTemplateSummary operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetTemplateSummary method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetTemplateSummaryRequest method. +// req, resp := client.GetTemplateSummaryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFormation) GetTemplateSummaryRequest(input *GetTemplateSummaryInput) (req *request.Request, output *GetTemplateSummaryOutput) { op := &request.Operation{ Name: opGetTemplateSummary, @@ -599,7 +991,28 @@ func (c *CloudFormation) GetTemplateSummary(input *GetTemplateSummaryInput) (*Ge const opListChangeSets = "ListChangeSets" -// ListChangeSetsRequest generates a request for the ListChangeSets operation. +// ListChangeSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListChangeSets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListChangeSets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListChangeSetsRequest method. +// req, resp := client.ListChangeSetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFormation) ListChangeSetsRequest(input *ListChangeSetsInput) (req *request.Request, output *ListChangeSetsOutput) { op := &request.Operation{ Name: opListChangeSets, @@ -628,7 +1041,28 @@ func (c *CloudFormation) ListChangeSets(input *ListChangeSetsInput) (*ListChange const opListStackResources = "ListStackResources" -// ListStackResourcesRequest generates a request for the ListStackResources operation. +// ListStackResourcesRequest generates a "aws/request.Request" representing the +// client's request for the ListStackResources operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListStackResources method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListStackResourcesRequest method. +// req, resp := client.ListStackResourcesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFormation) ListStackResourcesRequest(input *ListStackResourcesInput) (req *request.Request, output *ListStackResourcesOutput) { op := &request.Operation{ Name: opListStackResources, @@ -662,6 +1096,23 @@ func (c *CloudFormation) ListStackResources(input *ListStackResourcesInput) (*Li return out, err } +// ListStackResourcesPages iterates over the pages of a ListStackResources operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListStackResources method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListStackResources operation. +// pageNum := 0 +// err := client.ListStackResourcesPages(params, +// func(page *ListStackResourcesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *CloudFormation) ListStackResourcesPages(input *ListStackResourcesInput, fn func(p *ListStackResourcesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListStackResourcesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -672,7 +1123,28 @@ func (c *CloudFormation) ListStackResourcesPages(input *ListStackResourcesInput, const opListStacks = "ListStacks" -// ListStacksRequest generates a request for the ListStacks operation. +// ListStacksRequest generates a "aws/request.Request" representing the +// client's request for the ListStacks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListStacks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListStacksRequest method. +// req, resp := client.ListStacksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFormation) ListStacksRequest(input *ListStacksInput) (req *request.Request, output *ListStacksOutput) { op := &request.Operation{ Name: opListStacks, @@ -707,6 +1179,23 @@ func (c *CloudFormation) ListStacks(input *ListStacksInput) (*ListStacksOutput, return out, err } +// ListStacksPages iterates over the pages of a ListStacks operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListStacks method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListStacks operation. +// pageNum := 0 +// err := client.ListStacksPages(params, +// func(page *ListStacksOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *CloudFormation) ListStacksPages(input *ListStacksInput, fn func(p *ListStacksOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListStacksRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -717,7 +1206,28 @@ func (c *CloudFormation) ListStacksPages(input *ListStacksInput, fn func(p *List const opSetStackPolicy = "SetStackPolicy" -// SetStackPolicyRequest generates a request for the SetStackPolicy operation. +// SetStackPolicyRequest generates a "aws/request.Request" representing the +// client's request for the SetStackPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetStackPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetStackPolicyRequest method. +// req, resp := client.SetStackPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFormation) SetStackPolicyRequest(input *SetStackPolicyInput) (req *request.Request, output *SetStackPolicyOutput) { op := &request.Operation{ Name: opSetStackPolicy, @@ -746,7 +1256,28 @@ func (c *CloudFormation) SetStackPolicy(input *SetStackPolicyInput) (*SetStackPo const opSignalResource = "SignalResource" -// SignalResourceRequest generates a request for the SignalResource operation. +// SignalResourceRequest generates a "aws/request.Request" representing the +// client's request for the SignalResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SignalResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SignalResourceRequest method. +// req, resp := client.SignalResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFormation) SignalResourceRequest(input *SignalResourceInput) (req *request.Request, output *SignalResourceOutput) { op := &request.Operation{ Name: opSignalResource, @@ -780,7 +1311,28 @@ func (c *CloudFormation) SignalResource(input *SignalResourceInput) (*SignalReso const opUpdateStack = "UpdateStack" -// UpdateStackRequest generates a request for the UpdateStack operation. +// UpdateStackRequest generates a "aws/request.Request" representing the +// client's request for the UpdateStack operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateStack method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateStackRequest method. +// req, resp := client.UpdateStackRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFormation) UpdateStackRequest(input *UpdateStackInput) (req *request.Request, output *UpdateStackOutput) { op := &request.Operation{ Name: opUpdateStack, @@ -815,7 +1367,28 @@ func (c *CloudFormation) UpdateStack(input *UpdateStackInput) (*UpdateStackOutpu const opValidateTemplate = "ValidateTemplate" -// ValidateTemplateRequest generates a request for the ValidateTemplate operation. +// ValidateTemplateRequest generates a "aws/request.Request" representing the +// client's request for the ValidateTemplate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ValidateTemplate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ValidateTemplateRequest method. +// req, resp := client.ValidateTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFormation) ValidateTemplateRequest(input *ValidateTemplateInput) (req *request.Request, output *ValidateTemplateOutput) { op := &request.Operation{ Name: opValidateTemplate, @@ -947,6 +1520,13 @@ type ChangeSetSummary struct { // Descriptive information about the change set. Description *string `min:"1" type:"string"` + // If the change set execution status is AVAILABLE, you can execute the change + // set. If you can’t execute the change set, the status indicates why. For example, + // a change set might be in an UNAVAILABLE state because AWS CloudFormation + // is still creating it or in an OBSOLETE state because the stack was already + // updated. + ExecutionStatus *string `type:"string" enum:"ExecutionStatus"` + // The ID of the stack with which the change set is associated. StackId *string `type:"string"` @@ -1210,7 +1790,7 @@ type CreateStackInput struct { DisableRollback *bool `type:"boolean"` // The Simple Notification Service (SNS) topic ARNs to publish stack related - // events. You can find your SNS topic ARNs using the SNS console (http://console.aws.amazon.com/sns) + // events. You can find your SNS topic ARNs using the SNS console (https://console.aws.amazon.com/sns) // or your Command Line Interface (CLI). NotificationARNs []*string `type:"list"` @@ -1230,9 +1810,9 @@ type CreateStackInput struct { // create stack action, such as AWS::EC2::Instance, AWS::EC2::*, or Custom::MyCustomInstance. // Use the following syntax to describe template resource types: AWS::* (for // all AWS resource), Custom::* (for all custom resources), Custom::logical_ID - // (for a specific custom resource), AWS::service_name::* (for all resources + // (for a specific custom resource), AWS::service_name::* (for all resources // of a particular AWS service), and AWS::service_name::resource_logical_ID - // (for a specific AWS resource). + // (for a specific AWS resource). // // If the list of resource types doesn't include a resource that you're creating, // the stack creation fails. By default, AWS CloudFormation grants permissions @@ -1245,8 +1825,8 @@ type CreateStackInput struct { // The name that is associated with the stack. The name must be unique in the // region in which you are creating the stack. // - // A stack name can contain only alphanumeric characters (case sensitive) and - // hyphens. It must start with an alphabetic character and cannot be longer + // A stack name can contain only alphanumeric characters (case sensitive) + // and hyphens. It must start with an alphabetic character and cannot be longer // than 128 characters. StackName *string `type:"string" required:"true"` @@ -1584,6 +2164,13 @@ type DescribeChangeSetOutput struct { // Information about the change set. Description *string `min:"1" type:"string"` + // If the change set execution status is AVAILABLE, you can execute the change + // set. If you can’t execute the change set, the status indicates why. For example, + // a change set might be in an UNAVAILABLE state because AWS CloudFormation + // is still creating it or in an OBSOLETE state because the stack was already + // updated. + ExecutionStatus *string `type:"string" enum:"ExecutionStatus"` + // If the output exceeds 1 MB, a string that identifies the next page of changes. // If there is no additional page, this value is null. NextToken *string `min:"1" type:"string"` @@ -1637,9 +2224,12 @@ type DescribeStackEventsInput struct { // The name or the unique stack ID that is associated with the stack, which // are not always interchangeable: // - // Running stacks: You can specify either the stack's name or its unique stack - // ID. Deleted stacks: You must specify the unique stack ID. Default: There - // is no default value. + // Running stacks: You can specify either the stack's name or its unique + // stack ID. + // + // Deleted stacks: You must specify the unique stack ID. + // + // Default: There is no default value. StackName *string `type:"string"` } @@ -1700,9 +2290,12 @@ type DescribeStackResourceInput struct { // The name or the unique stack ID that is associated with the stack, which // are not always interchangeable: // - // Running stacks: You can specify either the stack's name or its unique stack - // ID. Deleted stacks: You must specify the unique stack ID. Default: There - // is no default value. + // Running stacks: You can specify either the stack's name or its unique + // stack ID. + // + // Deleted stacks: You must specify the unique stack ID. + // + // Default: There is no default value. StackName *string `type:"string" required:"true"` } @@ -1777,9 +2370,12 @@ type DescribeStackResourcesInput struct { // The name or the unique stack ID that is associated with the stack, which // are not always interchangeable: // - // Running stacks: You can specify either the stack's name or its unique stack - // ID. Deleted stacks: You must specify the unique stack ID. Default: There - // is no default value. + // Running stacks: You can specify either the stack's name or its unique + // stack ID. + // + // Deleted stacks: You must specify the unique stack ID. + // + // Default: There is no default value. // // Required: Conditional. If you do not specify StackName, you must specify // PhysicalResourceId. @@ -1824,9 +2420,12 @@ type DescribeStacksInput struct { // The name or the unique stack ID that is associated with the stack, which // are not always interchangeable: // - // Running stacks: You can specify either the stack's name or its unique stack - // ID. Deleted stacks: You must specify the unique stack ID. Default: There - // is no default value. + // Running stacks: You can specify either the stack's name or its unique + // stack ID. + // + // Deleted stacks: You must specify the unique stack ID. + // + // Default: There is no default value. StackName *string `type:"string"` } @@ -1875,6 +2474,7 @@ func (s DescribeStacksOutput) GoString() string { return s.String() } +// The input for an EstimateTemplateCost action. type EstimateTemplateCostInput struct { _ struct{} `type:"structure"` @@ -2061,9 +2661,12 @@ type GetTemplateInput struct { // The name or the unique stack ID that is associated with the stack, which // are not always interchangeable: // - // Running stacks: You can specify either the stack's name or its unique stack - // ID. Deleted stacks: You must specify the unique stack ID. Default: There - // is no default value. + // Running stacks: You can specify either the stack's name or its unique + // stack ID. + // + // Deleted stacks: You must specify the unique stack ID. + // + // Default: There is no default value. StackName *string `type:"string" required:"true"` } @@ -2291,9 +2894,12 @@ type ListStackResourcesInput struct { // The name or the unique stack ID that is associated with the stack, which // are not always interchangeable: // - // Running stacks: You can specify either the stack's name or its unique stack - // ID. Deleted stacks: You must specify the unique stack ID. Default: There - // is no default value. + // Running stacks: You can specify either the stack's name or its unique + // stack ID. + // + // Deleted stacks: You must specify the unique stack ID. + // + // Default: There is no default value. StackName *string `type:"string" required:"true"` } @@ -2578,17 +3184,23 @@ type ResourceChangeDetail struct { // The group to which the CausingEntity value belongs. There are five entity // groups: // - // ResourceReference entities are Ref intrinsic functions that refer to resources - // in the template, such as { "Ref" : "MyEC2InstanceResource" }. ParameterReference - // entities are Ref intrinsic functions that get template parameter values, - // such as { "Ref" : "MyPasswordParameter" }. ResourceAttribute entities are - // Fn::GetAtt intrinsic functions that get resource attribute values, such as - // { "Fn::GetAtt" : [ "MyEC2InstanceResource", "PublicDnsName" ] }. DirectModification - // entities are changes that are made directly to the template. Automatic entities - // are AWS::CloudFormation::Stack resource types, which are also known as nested - // stacks. If you made no changes to the AWS::CloudFormation::Stack resource, - // AWS CloudFormation sets the ChangeSource to Automatic because the nested - // stack's template might have changed. Changes to a nested stack's template + // ResourceReference entities are Ref intrinsic functions that refer to + // resources in the template, such as { "Ref" : "MyEC2InstanceResource" }. + // + // ParameterReference entities are Ref intrinsic functions that get template + // parameter values, such as { "Ref" : "MyPasswordParameter" }. + // + // ResourceAttribute entities are Fn::GetAtt intrinsic functions that get + // resource attribute values, such as { "Fn::GetAtt" : [ "MyEC2InstanceResource", + // "PublicDnsName" ] }. + // + // DirectModification entities are changes that are made directly to the + // template. + // + // Automatic entities are AWS::CloudFormation::Stack resource types, which + // are also known as nested stacks. If you made no changes to the AWS::CloudFormation::Stack + // resource, AWS CloudFormation sets the ChangeSource to Automatic because the + // nested stack's template might have changed. Changes to a nested stack's template // aren't visible to AWS CloudFormation until you run an update on the parent // stack. ChangeSource *string `type:"string" enum:"ChangeSource"` @@ -2809,7 +3421,9 @@ type Stack struct { // Boolean to enable or disable rollback on stack creation failures: // - // true: disable rollback false: enable rollback + // true: disable rollback + // + // false: enable rollback DisableRollback *bool `type:"boolean"` // The time the stack was last updated. This field will only be returned if @@ -3126,7 +3740,7 @@ func (s TemplateParameter) GoString() string { return s.String() } -// The input for UpdateStack action. +// The input for an UpdateStack action. type UpdateStackInput struct { _ struct{} `type:"structure"` @@ -3284,7 +3898,7 @@ func (s *UpdateStackInput) Validate() error { return nil } -// The output for a UpdateStack action. +// The output for an UpdateStack action. type UpdateStackOutput struct { _ struct{} `type:"structure"` @@ -3435,6 +4049,21 @@ const ( EvaluationTypeDynamic = "Dynamic" ) +const ( + // @enum ExecutionStatus + ExecutionStatusUnavailable = "UNAVAILABLE" + // @enum ExecutionStatus + ExecutionStatusAvailable = "AVAILABLE" + // @enum ExecutionStatus + ExecutionStatusExecuteInProgress = "EXECUTE_IN_PROGRESS" + // @enum ExecutionStatus + ExecutionStatusExecuteComplete = "EXECUTE_COMPLETE" + // @enum ExecutionStatus + ExecutionStatusExecuteFailed = "EXECUTE_FAILED" + // @enum ExecutionStatus + ExecutionStatusObsolete = "OBSOLETE" +) + const ( // @enum OnFailure OnFailureDoNothing = "DO_NOTHING" diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudformation/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/service.go index 503142b24..bd9d2917e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudformation/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/query" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // AWS CloudFormation enables you to create and manage AWS infrastructure deployments @@ -28,7 +28,7 @@ import ( // // Amazon CloudFormation makes use of other AWS products. If you need additional // technical information about a specific AWS product, you can find the product's -// technical documentation at http://docs.aws.amazon.com/documentation/ (http://docs.aws.amazon.com/documentation/). +// technical documentation at http://docs.aws.amazon.com/ (http://docs.aws.amazon.com/). //The service client's operations are safe to be used concurrently. // It is not safe to mutate any of the client's properties though. type CloudFormation struct { @@ -75,7 +75,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(query.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/api.go index 246909143..f9bd33f6b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/api.go @@ -15,7 +15,28 @@ import ( const opCreateCloudFrontOriginAccessIdentity = "CreateCloudFrontOriginAccessIdentity2016_01_28" -// CreateCloudFrontOriginAccessIdentityRequest generates a request for the CreateCloudFrontOriginAccessIdentity operation. +// CreateCloudFrontOriginAccessIdentityRequest generates a "aws/request.Request" representing the +// client's request for the CreateCloudFrontOriginAccessIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateCloudFrontOriginAccessIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateCloudFrontOriginAccessIdentityRequest method. +// req, resp := client.CreateCloudFrontOriginAccessIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) CreateCloudFrontOriginAccessIdentityRequest(input *CreateCloudFrontOriginAccessIdentityInput) (req *request.Request, output *CreateCloudFrontOriginAccessIdentityOutput) { op := &request.Operation{ Name: opCreateCloudFrontOriginAccessIdentity, @@ -42,7 +63,28 @@ func (c *CloudFront) CreateCloudFrontOriginAccessIdentity(input *CreateCloudFron const opCreateDistribution = "CreateDistribution2016_01_28" -// CreateDistributionRequest generates a request for the CreateDistribution operation. +// CreateDistributionRequest generates a "aws/request.Request" representing the +// client's request for the CreateDistribution operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDistribution method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDistributionRequest method. +// req, resp := client.CreateDistributionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) CreateDistributionRequest(input *CreateDistributionInput) (req *request.Request, output *CreateDistributionOutput) { op := &request.Operation{ Name: opCreateDistribution, @@ -69,7 +111,28 @@ func (c *CloudFront) CreateDistribution(input *CreateDistributionInput) (*Create const opCreateInvalidation = "CreateInvalidation2016_01_28" -// CreateInvalidationRequest generates a request for the CreateInvalidation operation. +// CreateInvalidationRequest generates a "aws/request.Request" representing the +// client's request for the CreateInvalidation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateInvalidation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateInvalidationRequest method. +// req, resp := client.CreateInvalidationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) CreateInvalidationRequest(input *CreateInvalidationInput) (req *request.Request, output *CreateInvalidationOutput) { op := &request.Operation{ Name: opCreateInvalidation, @@ -96,7 +159,28 @@ func (c *CloudFront) CreateInvalidation(input *CreateInvalidationInput) (*Create const opCreateStreamingDistribution = "CreateStreamingDistribution2016_01_28" -// CreateStreamingDistributionRequest generates a request for the CreateStreamingDistribution operation. +// CreateStreamingDistributionRequest generates a "aws/request.Request" representing the +// client's request for the CreateStreamingDistribution operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateStreamingDistribution method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateStreamingDistributionRequest method. +// req, resp := client.CreateStreamingDistributionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) CreateStreamingDistributionRequest(input *CreateStreamingDistributionInput) (req *request.Request, output *CreateStreamingDistributionOutput) { op := &request.Operation{ Name: opCreateStreamingDistribution, @@ -123,7 +207,28 @@ func (c *CloudFront) CreateStreamingDistribution(input *CreateStreamingDistribut const opDeleteCloudFrontOriginAccessIdentity = "DeleteCloudFrontOriginAccessIdentity2016_01_28" -// DeleteCloudFrontOriginAccessIdentityRequest generates a request for the DeleteCloudFrontOriginAccessIdentity operation. +// DeleteCloudFrontOriginAccessIdentityRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCloudFrontOriginAccessIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteCloudFrontOriginAccessIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteCloudFrontOriginAccessIdentityRequest method. +// req, resp := client.DeleteCloudFrontOriginAccessIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) DeleteCloudFrontOriginAccessIdentityRequest(input *DeleteCloudFrontOriginAccessIdentityInput) (req *request.Request, output *DeleteCloudFrontOriginAccessIdentityOutput) { op := &request.Operation{ Name: opDeleteCloudFrontOriginAccessIdentity, @@ -152,7 +257,28 @@ func (c *CloudFront) DeleteCloudFrontOriginAccessIdentity(input *DeleteCloudFron const opDeleteDistribution = "DeleteDistribution2016_01_28" -// DeleteDistributionRequest generates a request for the DeleteDistribution operation. +// DeleteDistributionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDistribution operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDistribution method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDistributionRequest method. +// req, resp := client.DeleteDistributionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) DeleteDistributionRequest(input *DeleteDistributionInput) (req *request.Request, output *DeleteDistributionOutput) { op := &request.Operation{ Name: opDeleteDistribution, @@ -181,7 +307,28 @@ func (c *CloudFront) DeleteDistribution(input *DeleteDistributionInput) (*Delete const opDeleteStreamingDistribution = "DeleteStreamingDistribution2016_01_28" -// DeleteStreamingDistributionRequest generates a request for the DeleteStreamingDistribution operation. +// DeleteStreamingDistributionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteStreamingDistribution operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteStreamingDistribution method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteStreamingDistributionRequest method. +// req, resp := client.DeleteStreamingDistributionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) DeleteStreamingDistributionRequest(input *DeleteStreamingDistributionInput) (req *request.Request, output *DeleteStreamingDistributionOutput) { op := &request.Operation{ Name: opDeleteStreamingDistribution, @@ -210,7 +357,28 @@ func (c *CloudFront) DeleteStreamingDistribution(input *DeleteStreamingDistribut const opGetCloudFrontOriginAccessIdentity = "GetCloudFrontOriginAccessIdentity2016_01_28" -// GetCloudFrontOriginAccessIdentityRequest generates a request for the GetCloudFrontOriginAccessIdentity operation. +// GetCloudFrontOriginAccessIdentityRequest generates a "aws/request.Request" representing the +// client's request for the GetCloudFrontOriginAccessIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetCloudFrontOriginAccessIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetCloudFrontOriginAccessIdentityRequest method. +// req, resp := client.GetCloudFrontOriginAccessIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) GetCloudFrontOriginAccessIdentityRequest(input *GetCloudFrontOriginAccessIdentityInput) (req *request.Request, output *GetCloudFrontOriginAccessIdentityOutput) { op := &request.Operation{ Name: opGetCloudFrontOriginAccessIdentity, @@ -237,7 +405,28 @@ func (c *CloudFront) GetCloudFrontOriginAccessIdentity(input *GetCloudFrontOrigi const opGetCloudFrontOriginAccessIdentityConfig = "GetCloudFrontOriginAccessIdentityConfig2016_01_28" -// GetCloudFrontOriginAccessIdentityConfigRequest generates a request for the GetCloudFrontOriginAccessIdentityConfig operation. +// GetCloudFrontOriginAccessIdentityConfigRequest generates a "aws/request.Request" representing the +// client's request for the GetCloudFrontOriginAccessIdentityConfig operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetCloudFrontOriginAccessIdentityConfig method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetCloudFrontOriginAccessIdentityConfigRequest method. +// req, resp := client.GetCloudFrontOriginAccessIdentityConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) GetCloudFrontOriginAccessIdentityConfigRequest(input *GetCloudFrontOriginAccessIdentityConfigInput) (req *request.Request, output *GetCloudFrontOriginAccessIdentityConfigOutput) { op := &request.Operation{ Name: opGetCloudFrontOriginAccessIdentityConfig, @@ -264,7 +453,28 @@ func (c *CloudFront) GetCloudFrontOriginAccessIdentityConfig(input *GetCloudFron const opGetDistribution = "GetDistribution2016_01_28" -// GetDistributionRequest generates a request for the GetDistribution operation. +// GetDistributionRequest generates a "aws/request.Request" representing the +// client's request for the GetDistribution operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDistribution method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDistributionRequest method. +// req, resp := client.GetDistributionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) GetDistributionRequest(input *GetDistributionInput) (req *request.Request, output *GetDistributionOutput) { op := &request.Operation{ Name: opGetDistribution, @@ -291,7 +501,28 @@ func (c *CloudFront) GetDistribution(input *GetDistributionInput) (*GetDistribut const opGetDistributionConfig = "GetDistributionConfig2016_01_28" -// GetDistributionConfigRequest generates a request for the GetDistributionConfig operation. +// GetDistributionConfigRequest generates a "aws/request.Request" representing the +// client's request for the GetDistributionConfig operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDistributionConfig method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDistributionConfigRequest method. +// req, resp := client.GetDistributionConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) GetDistributionConfigRequest(input *GetDistributionConfigInput) (req *request.Request, output *GetDistributionConfigOutput) { op := &request.Operation{ Name: opGetDistributionConfig, @@ -318,7 +549,28 @@ func (c *CloudFront) GetDistributionConfig(input *GetDistributionConfigInput) (* const opGetInvalidation = "GetInvalidation2016_01_28" -// GetInvalidationRequest generates a request for the GetInvalidation operation. +// GetInvalidationRequest generates a "aws/request.Request" representing the +// client's request for the GetInvalidation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetInvalidation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetInvalidationRequest method. +// req, resp := client.GetInvalidationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) GetInvalidationRequest(input *GetInvalidationInput) (req *request.Request, output *GetInvalidationOutput) { op := &request.Operation{ Name: opGetInvalidation, @@ -345,7 +597,28 @@ func (c *CloudFront) GetInvalidation(input *GetInvalidationInput) (*GetInvalidat const opGetStreamingDistribution = "GetStreamingDistribution2016_01_28" -// GetStreamingDistributionRequest generates a request for the GetStreamingDistribution operation. +// GetStreamingDistributionRequest generates a "aws/request.Request" representing the +// client's request for the GetStreamingDistribution operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetStreamingDistribution method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetStreamingDistributionRequest method. +// req, resp := client.GetStreamingDistributionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) GetStreamingDistributionRequest(input *GetStreamingDistributionInput) (req *request.Request, output *GetStreamingDistributionOutput) { op := &request.Operation{ Name: opGetStreamingDistribution, @@ -372,7 +645,28 @@ func (c *CloudFront) GetStreamingDistribution(input *GetStreamingDistributionInp const opGetStreamingDistributionConfig = "GetStreamingDistributionConfig2016_01_28" -// GetStreamingDistributionConfigRequest generates a request for the GetStreamingDistributionConfig operation. +// GetStreamingDistributionConfigRequest generates a "aws/request.Request" representing the +// client's request for the GetStreamingDistributionConfig operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetStreamingDistributionConfig method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetStreamingDistributionConfigRequest method. +// req, resp := client.GetStreamingDistributionConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) GetStreamingDistributionConfigRequest(input *GetStreamingDistributionConfigInput) (req *request.Request, output *GetStreamingDistributionConfigOutput) { op := &request.Operation{ Name: opGetStreamingDistributionConfig, @@ -399,7 +693,28 @@ func (c *CloudFront) GetStreamingDistributionConfig(input *GetStreamingDistribut const opListCloudFrontOriginAccessIdentities = "ListCloudFrontOriginAccessIdentities2016_01_28" -// ListCloudFrontOriginAccessIdentitiesRequest generates a request for the ListCloudFrontOriginAccessIdentities operation. +// ListCloudFrontOriginAccessIdentitiesRequest generates a "aws/request.Request" representing the +// client's request for the ListCloudFrontOriginAccessIdentities operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListCloudFrontOriginAccessIdentities method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListCloudFrontOriginAccessIdentitiesRequest method. +// req, resp := client.ListCloudFrontOriginAccessIdentitiesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) ListCloudFrontOriginAccessIdentitiesRequest(input *ListCloudFrontOriginAccessIdentitiesInput) (req *request.Request, output *ListCloudFrontOriginAccessIdentitiesOutput) { op := &request.Operation{ Name: opListCloudFrontOriginAccessIdentities, @@ -430,6 +745,23 @@ func (c *CloudFront) ListCloudFrontOriginAccessIdentities(input *ListCloudFrontO return out, err } +// ListCloudFrontOriginAccessIdentitiesPages iterates over the pages of a ListCloudFrontOriginAccessIdentities operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListCloudFrontOriginAccessIdentities method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListCloudFrontOriginAccessIdentities operation. +// pageNum := 0 +// err := client.ListCloudFrontOriginAccessIdentitiesPages(params, +// func(page *ListCloudFrontOriginAccessIdentitiesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *CloudFront) ListCloudFrontOriginAccessIdentitiesPages(input *ListCloudFrontOriginAccessIdentitiesInput, fn func(p *ListCloudFrontOriginAccessIdentitiesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListCloudFrontOriginAccessIdentitiesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -440,7 +772,28 @@ func (c *CloudFront) ListCloudFrontOriginAccessIdentitiesPages(input *ListCloudF const opListDistributions = "ListDistributions2016_01_28" -// ListDistributionsRequest generates a request for the ListDistributions operation. +// ListDistributionsRequest generates a "aws/request.Request" representing the +// client's request for the ListDistributions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListDistributions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListDistributionsRequest method. +// req, resp := client.ListDistributionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) ListDistributionsRequest(input *ListDistributionsInput) (req *request.Request, output *ListDistributionsOutput) { op := &request.Operation{ Name: opListDistributions, @@ -471,6 +824,23 @@ func (c *CloudFront) ListDistributions(input *ListDistributionsInput) (*ListDist return out, err } +// ListDistributionsPages iterates over the pages of a ListDistributions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDistributions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDistributions operation. +// pageNum := 0 +// err := client.ListDistributionsPages(params, +// func(page *ListDistributionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *CloudFront) ListDistributionsPages(input *ListDistributionsInput, fn func(p *ListDistributionsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListDistributionsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -481,7 +851,28 @@ func (c *CloudFront) ListDistributionsPages(input *ListDistributionsInput, fn fu const opListDistributionsByWebACLId = "ListDistributionsByWebACLId2016_01_28" -// ListDistributionsByWebACLIdRequest generates a request for the ListDistributionsByWebACLId operation. +// ListDistributionsByWebACLIdRequest generates a "aws/request.Request" representing the +// client's request for the ListDistributionsByWebACLId operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListDistributionsByWebACLId method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListDistributionsByWebACLIdRequest method. +// req, resp := client.ListDistributionsByWebACLIdRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) ListDistributionsByWebACLIdRequest(input *ListDistributionsByWebACLIdInput) (req *request.Request, output *ListDistributionsByWebACLIdOutput) { op := &request.Operation{ Name: opListDistributionsByWebACLId, @@ -508,7 +899,28 @@ func (c *CloudFront) ListDistributionsByWebACLId(input *ListDistributionsByWebAC const opListInvalidations = "ListInvalidations2016_01_28" -// ListInvalidationsRequest generates a request for the ListInvalidations operation. +// ListInvalidationsRequest generates a "aws/request.Request" representing the +// client's request for the ListInvalidations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListInvalidations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListInvalidationsRequest method. +// req, resp := client.ListInvalidationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) ListInvalidationsRequest(input *ListInvalidationsInput) (req *request.Request, output *ListInvalidationsOutput) { op := &request.Operation{ Name: opListInvalidations, @@ -539,6 +951,23 @@ func (c *CloudFront) ListInvalidations(input *ListInvalidationsInput) (*ListInva return out, err } +// ListInvalidationsPages iterates over the pages of a ListInvalidations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListInvalidations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListInvalidations operation. +// pageNum := 0 +// err := client.ListInvalidationsPages(params, +// func(page *ListInvalidationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *CloudFront) ListInvalidationsPages(input *ListInvalidationsInput, fn func(p *ListInvalidationsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListInvalidationsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -549,7 +978,28 @@ func (c *CloudFront) ListInvalidationsPages(input *ListInvalidationsInput, fn fu const opListStreamingDistributions = "ListStreamingDistributions2016_01_28" -// ListStreamingDistributionsRequest generates a request for the ListStreamingDistributions operation. +// ListStreamingDistributionsRequest generates a "aws/request.Request" representing the +// client's request for the ListStreamingDistributions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListStreamingDistributions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListStreamingDistributionsRequest method. +// req, resp := client.ListStreamingDistributionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) ListStreamingDistributionsRequest(input *ListStreamingDistributionsInput) (req *request.Request, output *ListStreamingDistributionsOutput) { op := &request.Operation{ Name: opListStreamingDistributions, @@ -580,6 +1030,23 @@ func (c *CloudFront) ListStreamingDistributions(input *ListStreamingDistribution return out, err } +// ListStreamingDistributionsPages iterates over the pages of a ListStreamingDistributions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListStreamingDistributions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListStreamingDistributions operation. +// pageNum := 0 +// err := client.ListStreamingDistributionsPages(params, +// func(page *ListStreamingDistributionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *CloudFront) ListStreamingDistributionsPages(input *ListStreamingDistributionsInput, fn func(p *ListStreamingDistributionsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListStreamingDistributionsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -590,7 +1057,28 @@ func (c *CloudFront) ListStreamingDistributionsPages(input *ListStreamingDistrib const opUpdateCloudFrontOriginAccessIdentity = "UpdateCloudFrontOriginAccessIdentity2016_01_28" -// UpdateCloudFrontOriginAccessIdentityRequest generates a request for the UpdateCloudFrontOriginAccessIdentity operation. +// UpdateCloudFrontOriginAccessIdentityRequest generates a "aws/request.Request" representing the +// client's request for the UpdateCloudFrontOriginAccessIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateCloudFrontOriginAccessIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateCloudFrontOriginAccessIdentityRequest method. +// req, resp := client.UpdateCloudFrontOriginAccessIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) UpdateCloudFrontOriginAccessIdentityRequest(input *UpdateCloudFrontOriginAccessIdentityInput) (req *request.Request, output *UpdateCloudFrontOriginAccessIdentityOutput) { op := &request.Operation{ Name: opUpdateCloudFrontOriginAccessIdentity, @@ -617,7 +1105,28 @@ func (c *CloudFront) UpdateCloudFrontOriginAccessIdentity(input *UpdateCloudFron const opUpdateDistribution = "UpdateDistribution2016_01_28" -// UpdateDistributionRequest generates a request for the UpdateDistribution operation. +// UpdateDistributionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDistribution operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateDistribution method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateDistributionRequest method. +// req, resp := client.UpdateDistributionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) UpdateDistributionRequest(input *UpdateDistributionInput) (req *request.Request, output *UpdateDistributionOutput) { op := &request.Operation{ Name: opUpdateDistribution, @@ -644,7 +1153,28 @@ func (c *CloudFront) UpdateDistribution(input *UpdateDistributionInput) (*Update const opUpdateStreamingDistribution = "UpdateStreamingDistribution2016_01_28" -// UpdateStreamingDistributionRequest generates a request for the UpdateStreamingDistribution operation. +// UpdateStreamingDistributionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateStreamingDistribution operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateStreamingDistribution method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateStreamingDistributionRequest method. +// req, resp := client.UpdateStreamingDistributionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudFront) UpdateStreamingDistributionRequest(input *UpdateStreamingDistributionInput) (req *request.Request, output *UpdateStreamingDistributionOutput) { op := &request.Operation{ Name: opUpdateStreamingDistribution, diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/service.go index 51b73c6ef..99d23c72e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/restxml" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // CloudFront is a client for CloudFront. @@ -58,7 +58,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/api.go index 7fc320b74..de18a11ca 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/api.go @@ -13,7 +13,28 @@ import ( const opAddTags = "AddTags" -// AddTagsRequest generates a request for the AddTags operation. +// AddTagsRequest generates a "aws/request.Request" representing the +// client's request for the AddTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddTagsRequest method. +// req, resp := client.AddTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudTrail) AddTagsRequest(input *AddTagsInput) (req *request.Request, output *AddTagsOutput) { op := &request.Operation{ Name: opAddTags, @@ -45,7 +66,28 @@ func (c *CloudTrail) AddTags(input *AddTagsInput) (*AddTagsOutput, error) { const opCreateTrail = "CreateTrail" -// CreateTrailRequest generates a request for the CreateTrail operation. +// CreateTrailRequest generates a "aws/request.Request" representing the +// client's request for the CreateTrail operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateTrail method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateTrailRequest method. +// req, resp := client.CreateTrailRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudTrail) CreateTrailRequest(input *CreateTrailInput) (req *request.Request, output *CreateTrailOutput) { op := &request.Operation{ Name: opCreateTrail, @@ -74,7 +116,28 @@ func (c *CloudTrail) CreateTrail(input *CreateTrailInput) (*CreateTrailOutput, e const opDeleteTrail = "DeleteTrail" -// DeleteTrailRequest generates a request for the DeleteTrail operation. +// DeleteTrailRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTrail operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteTrail method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteTrailRequest method. +// req, resp := client.DeleteTrailRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudTrail) DeleteTrailRequest(input *DeleteTrailInput) (req *request.Request, output *DeleteTrailOutput) { op := &request.Operation{ Name: opDeleteTrail, @@ -103,7 +166,28 @@ func (c *CloudTrail) DeleteTrail(input *DeleteTrailInput) (*DeleteTrailOutput, e const opDescribeTrails = "DescribeTrails" -// DescribeTrailsRequest generates a request for the DescribeTrails operation. +// DescribeTrailsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTrails operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTrails method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTrailsRequest method. +// req, resp := client.DescribeTrailsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudTrail) DescribeTrailsRequest(input *DescribeTrailsInput) (req *request.Request, output *DescribeTrailsOutput) { op := &request.Operation{ Name: opDescribeTrails, @@ -131,7 +215,28 @@ func (c *CloudTrail) DescribeTrails(input *DescribeTrailsInput) (*DescribeTrails const opGetTrailStatus = "GetTrailStatus" -// GetTrailStatusRequest generates a request for the GetTrailStatus operation. +// GetTrailStatusRequest generates a "aws/request.Request" representing the +// client's request for the GetTrailStatus operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetTrailStatus method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetTrailStatusRequest method. +// req, resp := client.GetTrailStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudTrail) GetTrailStatusRequest(input *GetTrailStatusInput) (req *request.Request, output *GetTrailStatusOutput) { op := &request.Operation{ Name: opGetTrailStatus, @@ -162,7 +267,28 @@ func (c *CloudTrail) GetTrailStatus(input *GetTrailStatusInput) (*GetTrailStatus const opListPublicKeys = "ListPublicKeys" -// ListPublicKeysRequest generates a request for the ListPublicKeys operation. +// ListPublicKeysRequest generates a "aws/request.Request" representing the +// client's request for the ListPublicKeys operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListPublicKeys method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListPublicKeysRequest method. +// req, resp := client.ListPublicKeysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudTrail) ListPublicKeysRequest(input *ListPublicKeysInput) (req *request.Request, output *ListPublicKeysOutput) { op := &request.Operation{ Name: opListPublicKeys, @@ -184,7 +310,7 @@ func (c *CloudTrail) ListPublicKeysRequest(input *ListPublicKeysInput) (req *req // within the specified time range. The public key is needed to validate digest // files that were signed with its corresponding private key. // -// CloudTrail uses different private/public key pairs per region. Each digest +// CloudTrail uses different private/public key pairs per region. Each digest // file is signed with a private key unique to its region. Therefore, when you // validate a digest file from a particular region, you must look in the same // region for its corresponding public key. @@ -196,7 +322,28 @@ func (c *CloudTrail) ListPublicKeys(input *ListPublicKeysInput) (*ListPublicKeys const opListTags = "ListTags" -// ListTagsRequest generates a request for the ListTags operation. +// ListTagsRequest generates a "aws/request.Request" representing the +// client's request for the ListTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTagsRequest method. +// req, resp := client.ListTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudTrail) ListTagsRequest(input *ListTagsInput) (req *request.Request, output *ListTagsOutput) { op := &request.Operation{ Name: opListTags, @@ -214,8 +361,6 @@ func (c *CloudTrail) ListTagsRequest(input *ListTagsInput) (req *request.Request return } -// Lists the tags for the specified trail or trails in the current region. -// // Lists the tags for the trail in the current region. func (c *CloudTrail) ListTags(input *ListTagsInput) (*ListTagsOutput, error) { req, out := c.ListTagsRequest(input) @@ -225,7 +370,28 @@ func (c *CloudTrail) ListTags(input *ListTagsInput) (*ListTagsOutput, error) { const opLookupEvents = "LookupEvents" -// LookupEventsRequest generates a request for the LookupEvents operation. +// LookupEventsRequest generates a "aws/request.Request" representing the +// client's request for the LookupEvents operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the LookupEvents method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the LookupEventsRequest method. +// req, resp := client.LookupEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudTrail) LookupEventsRequest(input *LookupEventsInput) (req *request.Request, output *LookupEventsOutput) { op := &request.Operation{ Name: opLookupEvents, @@ -254,10 +420,11 @@ func (c *CloudTrail) LookupEventsRequest(input *LookupEventsInput) (req *request // 50 possible. The response includes a token that you can use to get the next // page of results. // -// The rate of lookup requests is limited to one per second per account. If -// this limit is exceeded, a throttling error occurs. Events that occurred -// during the selected time range will not be available for lookup if CloudTrail -// logging was not enabled when the events occurred. +// The rate of lookup requests is limited to one per second per account. If +// this limit is exceeded, a throttling error occurs. +// +// Events that occurred during the selected time range will not be available +// for lookup if CloudTrail logging was not enabled when the events occurred. func (c *CloudTrail) LookupEvents(input *LookupEventsInput) (*LookupEventsOutput, error) { req, out := c.LookupEventsRequest(input) err := req.Send() @@ -266,7 +433,28 @@ func (c *CloudTrail) LookupEvents(input *LookupEventsInput) (*LookupEventsOutput const opRemoveTags = "RemoveTags" -// RemoveTagsRequest generates a request for the RemoveTags operation. +// RemoveTagsRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveTagsRequest method. +// req, resp := client.RemoveTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudTrail) RemoveTagsRequest(input *RemoveTagsInput) (req *request.Request, output *RemoveTagsOutput) { op := &request.Operation{ Name: opRemoveTags, @@ -293,7 +481,28 @@ func (c *CloudTrail) RemoveTags(input *RemoveTagsInput) (*RemoveTagsOutput, erro const opStartLogging = "StartLogging" -// StartLoggingRequest generates a request for the StartLogging operation. +// StartLoggingRequest generates a "aws/request.Request" representing the +// client's request for the StartLogging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StartLogging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StartLoggingRequest method. +// req, resp := client.StartLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudTrail) StartLoggingRequest(input *StartLoggingInput) (req *request.Request, output *StartLoggingOutput) { op := &request.Operation{ Name: opStartLogging, @@ -324,7 +533,28 @@ func (c *CloudTrail) StartLogging(input *StartLoggingInput) (*StartLoggingOutput const opStopLogging = "StopLogging" -// StopLoggingRequest generates a request for the StopLogging operation. +// StopLoggingRequest generates a "aws/request.Request" representing the +// client's request for the StopLogging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StopLogging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StopLoggingRequest method. +// req, resp := client.StopLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudTrail) StopLoggingRequest(input *StopLoggingInput) (req *request.Request, output *StopLoggingOutput) { op := &request.Operation{ Name: opStopLogging, @@ -357,7 +587,28 @@ func (c *CloudTrail) StopLogging(input *StopLoggingInput) (*StopLoggingOutput, e const opUpdateTrail = "UpdateTrail" -// UpdateTrailRequest generates a request for the UpdateTrail operation. +// UpdateTrailRequest generates a "aws/request.Request" representing the +// client's request for the UpdateTrail operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateTrail method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateTrailRequest method. +// req, resp := client.UpdateTrailRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudTrail) UpdateTrailRequest(input *UpdateTrailInput) (req *request.Request, output *UpdateTrailOutput) { op := &request.Operation{ Name: opUpdateTrail, @@ -392,7 +643,9 @@ type AddTagsInput struct { _ struct{} `type:"structure"` // Specifies the ARN of the trail to which one or more tags will be added. The - // format of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail. + // format of a trail ARN is: + // + // arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail ResourceId *string `type:"string" required:"true"` // Contains a list of CloudTrail tags, up to a limit of 10. @@ -464,7 +717,7 @@ type CreateTrailInput struct { // Specifies whether log file integrity validation is enabled. The default is // false. // - // When you disable log file integrity validation, the chain of digest files + // When you disable log file integrity validation, the chain of digest files // is broken after one hour. CloudTrail will not create digest files for log // files that were delivered during a period in which log file integrity validation // was disabled. For example, if you enable log file integrity validation at @@ -488,18 +741,28 @@ type CreateTrailInput struct { // // Examples: // - // alias/MyAliasName arn:aws:kms:us-east-1:123456789012:alias/MyAliasName - // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 - // 12345678-1234-1234-1234-123456789012 + // alias/MyAliasName + // + // arn:aws:kms:us-east-1:123456789012:alias/MyAliasName + // + // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // 12345678-1234-1234-1234-123456789012 KmsKeyId *string `type:"string"` // Specifies the name of the trail. The name must meet the following requirements: // - // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores - // (_), or dashes (-) Start with a letter or number, and end with a letter or - // number Be between 3 and 128 characters Have no adjacent periods, underscores - // or dashes. Names like my-_namespace and my--namespace are invalid. Not be - // in IP address format (for example, 192.168.5.4) + // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores + // (_), or dashes (-) + // + // Start with a letter or number, and end with a letter or number + // + // Be between 3 and 128 characters + // + // Have no adjacent periods, underscores or dashes. Names like my-_namespace + // and my--namespace are invalid. + // + // Not be in IP address format (for example, 192.168.5.4) Name *string `type:"string" required:"true"` // Specifies the name of the Amazon S3 bucket designated for publishing log @@ -566,7 +829,7 @@ type CreateTrailOutput struct { // Specifies the KMS key ID that encrypts the logs delivered by CloudTrail. // The value is a fully specified ARN to a KMS key in the format: // - // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 KmsKeyId *string `type:"string"` // Specifies whether log file integrity validation is enabled. @@ -584,11 +847,19 @@ type CreateTrailOutput struct { // Your CloudTrail Log Files (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html). S3KeyPrefix *string `type:"string"` - // Specifies the name of the Amazon SNS topic defined for notification of log - // file delivery. - SnsTopicName *string `type:"string"` + // Specifies the ARN of the Amazon SNS topic that CloudTrail uses to send notifications + // when log files are delivered. The format of a topic ARN is: + // + // arn:aws:sns:us-east-1:123456789012:MyTopic + SnsTopicARN *string `type:"string"` - // Specifies the ARN of the trail that was created. + // This field is deprecated. Use SnsTopicARN. + SnsTopicName *string `deprecated:"true" type:"string"` + + // Specifies the ARN of the trail that was created. The format of a trail ARN + // is: + // + // arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail TrailARN *string `type:"string"` } @@ -607,7 +878,9 @@ type DeleteTrailInput struct { _ struct{} `type:"structure"` // Specifies the name or the CloudTrail ARN of the trail to be deleted. The - // format of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail. + // format of a trail ARN is: + // + // arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail Name *string `type:"string" required:"true"` } @@ -660,18 +933,24 @@ type DescribeTrailsInput struct { IncludeShadowTrails *bool `locationName:"includeShadowTrails" type:"boolean"` // Specifies a list of trail names, trail ARNs, or both, of the trails to describe. - // The format of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail. - // If an empty list is specified, information for the trail in the current region - // is returned. + // The format of a trail ARN is: // - // If an empty list is specified and IncludeShadowTrails is false, then information - // for all trails in the current region is returned. If an empty list is specified - // and IncludeShadowTrails is null or true, then information for all trails - // in the current region and any associated shadow trails in other regions is - // returned. If one or more trail names are specified, information is returned - // only if the names match the names of trails belonging only to the current - // region. To return information about a trail in another region, you must specify - // its trail ARN. + // arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail + // + // If an empty list is specified, information for the trail in the current + // region is returned. + // + // If an empty list is specified and IncludeShadowTrails is false, then information + // for all trails in the current region is returned. + // + // If an empty list is specified and IncludeShadowTrails is null or true, + // then information for all trails in the current region and any associated + // shadow trails in other regions is returned. + // + // If one or more trail names are specified, information is returned only + // if the names match the names of trails belonging only to the current region. + // To return information about a trail in another region, you must specify its + // trail ARN. TrailNameList []*string `locationName:"trailNameList" type:"list"` } @@ -745,7 +1024,9 @@ type GetTrailStatusInput struct { // Specifies the name or the CloudTrail ARN of the trail for which you are requesting // status. To get the status of a shadow trail (a replication of the trail in - // another region), you must specify its ARN. The format of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail. + // another region), you must specify its ARN. The format of a trail ARN is: + // + // arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail Name *string `type:"string" required:"true"` } @@ -799,10 +1080,10 @@ type GetTrailStatusOutput struct { // topic Error Responses (http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html) // in the Amazon S3 API Reference. // - // This error occurs only when there is a problem with the destination S3 bucket - // and will not occur for timeouts. To resolve the issue, create a new bucket - // and call UpdateTrail to specify the new bucket, or fix the existing objects - // so that CloudTrail can again write to the bucket. + // This error occurs only when there is a problem with the destination S3 + // bucket and will not occur for timeouts. To resolve the issue, create a new + // bucket and call UpdateTrail to specify the new bucket, or fix the existing + // objects so that CloudTrail can again write to the bucket. LatestDeliveryError *string `type:"string"` // Specifies the date and time that CloudTrail last delivered log files to an @@ -814,10 +1095,10 @@ type GetTrailStatusOutput struct { // the topic Error Responses (http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html) // in the Amazon S3 API Reference. // - // This error occurs only when there is a problem with the destination S3 bucket - // and will not occur for timeouts. To resolve the issue, create a new bucket - // and call UpdateTrail to specify the new bucket, or fix the existing objects - // so that CloudTrail can again write to the bucket. + // This error occurs only when there is a problem with the destination S3 + // bucket and will not occur for timeouts. To resolve the issue, create a new + // bucket and call UpdateTrail to specify the new bucket, or fix the existing + // objects so that CloudTrail can again write to the bucket. LatestDigestDeliveryError *string `type:"string"` // Specifies the date and time that CloudTrail last delivered a digest file @@ -901,7 +1182,7 @@ type ListPublicKeysOutput struct { // Contains an array of PublicKey objects. // - // The returned public keys may have validity time ranges that overlap. + // The returned public keys may have validity time ranges that overlap. PublicKeyList []*PublicKey `type:"list"` } @@ -923,7 +1204,9 @@ type ListTagsInput struct { NextToken *string `type:"string"` // Specifies a list of trail ARNs whose tags will be listed. The list has a - // limit of 20 ARNs. The format of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail. + // limit of 20 ARNs. The format of a trail ARN is: + // + // arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail ResourceIdList []*string `type:"list" required:"true"` } @@ -1133,7 +1416,9 @@ type RemoveTagsInput struct { _ struct{} `type:"structure"` // Specifies the ARN of the trail from which tags should be removed. The format - // of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail. + // of a trail ARN is: + // + // arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail ResourceId *string `type:"string" required:"true"` // Specifies a list of tags to be removed. @@ -1243,7 +1528,9 @@ type StartLoggingInput struct { _ struct{} `type:"structure"` // Specifies the name or the CloudTrail ARN of the trail for which CloudTrail - // logs AWS API calls. The format of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail. + // logs AWS API calls. The format of a trail ARN is: + // + // arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail Name *string `type:"string" required:"true"` } @@ -1292,7 +1579,9 @@ type StopLoggingInput struct { _ struct{} `type:"structure"` // Specifies the name or the CloudTrail ARN of the trail for which CloudTrail - // will stop logging AWS API calls. The format of a trail ARN is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail. + // will stop logging AWS API calls. The format of a trail ARN is: + // + // arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail Name *string `type:"string" required:"true"` } @@ -1396,7 +1685,7 @@ type Trail struct { // Specifies the KMS key ID that encrypts the logs delivered by CloudTrail. // The value is a fully specified ARN to a KMS key in the format: // - // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 KmsKeyId *string `type:"string"` // Specifies whether log file validation is enabled. @@ -1415,12 +1704,18 @@ type Trail struct { // maximum length is 200 characters. S3KeyPrefix *string `type:"string"` - // Name of the existing Amazon SNS topic that CloudTrail uses to notify the - // account owner when new CloudTrail log files have been delivered. The maximum - // length is 256 characters. - SnsTopicName *string `type:"string"` + // Specifies the ARN of the Amazon SNS topic that CloudTrail uses to send notifications + // when log files are delivered. The format of a topic ARN is: + // + // arn:aws:sns:us-east-1:123456789012:MyTopic + SnsTopicARN *string `type:"string"` - // The Amazon Resource Name of the trail. The TrailARN format is arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail. + // This field is deprecated. Use SnsTopicARN. + SnsTopicName *string `deprecated:"true" type:"string"` + + // Specifies the ARN of the trail. The format of a trail ARN is: + // + // arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail TrailARN *string `type:"string"` } @@ -1449,7 +1744,7 @@ type UpdateTrailInput struct { // Specifies whether log file validation is enabled. The default is false. // - // When you disable log file integrity validation, the chain of digest files + // When you disable log file integrity validation, the chain of digest files // is broken after one hour. CloudTrail will not create digest files for log // files that were delivered during a period in which log file integrity validation // was disabled. For example, if you enable log file integrity validation at @@ -1477,20 +1772,33 @@ type UpdateTrailInput struct { // // Examples: // - // alias/MyAliasName arn:aws:kms:us-east-1:123456789012:alias/MyAliasName - // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 - // 12345678-1234-1234-1234-123456789012 + // alias/MyAliasName + // + // arn:aws:kms:us-east-1:123456789012:alias/MyAliasName + // + // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // 12345678-1234-1234-1234-123456789012 KmsKeyId *string `type:"string"` // Specifies the name of the trail or trail ARN. If Name is a trail name, the // string must meet the following requirements: // - // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores - // (_), or dashes (-) Start with a letter or number, and end with a letter or - // number Be between 3 and 128 characters Have no adjacent periods, underscores - // or dashes. Names like my-_namespace and my--namespace are invalid. Not be - // in IP address format (for example, 192.168.5.4) If Name is a trail ARN, - // it must be in the format arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail. + // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores + // (_), or dashes (-) + // + // Start with a letter or number, and end with a letter or number + // + // Be between 3 and 128 characters + // + // Have no adjacent periods, underscores or dashes. Names like my-_namespace + // and my--namespace are invalid. + // + // Not be in IP address format (for example, 192.168.5.4) + // + // If Name is a trail ARN, it must be in the format: + // + // arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail Name *string `type:"string" required:"true"` // Specifies the name of the Amazon S3 bucket designated for publishing log @@ -1554,7 +1862,7 @@ type UpdateTrailOutput struct { // Specifies the KMS key ID that encrypts the logs delivered by CloudTrail. // The value is a fully specified ARN to a KMS key in the format: // - // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 KmsKeyId *string `type:"string"` // Specifies whether log file integrity validation is enabled. @@ -1572,11 +1880,19 @@ type UpdateTrailOutput struct { // Your CloudTrail Log Files (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-find-log-files.html). S3KeyPrefix *string `type:"string"` - // Specifies the name of the Amazon SNS topic defined for notification of log - // file delivery. - SnsTopicName *string `type:"string"` + // Specifies the ARN of the Amazon SNS topic that CloudTrail uses to send notifications + // when log files are delivered. The format of a topic ARN is: + // + // arn:aws:sns:us-east-1:123456789012:MyTopic + SnsTopicARN *string `type:"string"` - // Specifies the ARN of the trail that was updated. + // This field is deprecated. Use SnsTopicARN. + SnsTopicName *string `deprecated:"true" type:"string"` + + // Specifies the ARN of the trail that was updated. The format of a trail ARN + // is: + // + // arn:aws:cloudtrail:us-east-1:123456789012:trail/MyTrail TrailARN *string `type:"string"` } diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/service.go index b3e217929..621e16698 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudtrail/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // This is the CloudTrail API Reference. It provides descriptions of actions, @@ -20,16 +20,17 @@ import ( // IP address, the request parameters, and the response elements returned by // the service. // -// As an alternative to using the API, you can use one of the AWS SDKs, which -// consist of libraries and sample code for various programming languages and -// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient -// way to create programmatic access to AWSCloudTrail. For example, the SDKs -// take care of cryptographically signing requests, managing errors, and retrying +// As an alternative to the API, you can use one of the AWS SDKs, which consist +// of libraries and sample code for various programming languages and platforms +// (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient way +// to create programmatic access to AWSCloudTrail. For example, the SDKs take +// care of cryptographically signing requests, managing errors, and retrying // requests automatically. For information about the AWS SDKs, including how // to download and install them, see the Tools for Amazon Web Services page -// (http://aws.amazon.com/tools/). See the CloudTrail User Guide for information -// about the data that is included with each AWS API call listed in the log -// files. +// (http://aws.amazon.com/tools/). +// +// See the CloudTrail User Guide for information about the data that is included +// with each AWS API call listed in the log files. //The service client's operations are safe to be used concurrently. // It is not safe to mutate any of the client's properties though. type CloudTrail struct { @@ -78,7 +79,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go index e36887cc1..e23db4766 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go @@ -15,7 +15,28 @@ import ( const opDeleteAlarms = "DeleteAlarms" -// DeleteAlarmsRequest generates a request for the DeleteAlarms operation. +// DeleteAlarmsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAlarms operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteAlarms method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteAlarmsRequest method. +// req, resp := client.DeleteAlarmsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatch) DeleteAlarmsRequest(input *DeleteAlarmsInput) (req *request.Request, output *DeleteAlarmsOutput) { op := &request.Operation{ Name: opDeleteAlarms, @@ -44,7 +65,28 @@ func (c *CloudWatch) DeleteAlarms(input *DeleteAlarmsInput) (*DeleteAlarmsOutput const opDescribeAlarmHistory = "DescribeAlarmHistory" -// DescribeAlarmHistoryRequest generates a request for the DescribeAlarmHistory operation. +// DescribeAlarmHistoryRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAlarmHistory operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAlarmHistory method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAlarmHistoryRequest method. +// req, resp := client.DescribeAlarmHistoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatch) DescribeAlarmHistoryRequest(input *DescribeAlarmHistoryInput) (req *request.Request, output *DescribeAlarmHistoryOutput) { op := &request.Operation{ Name: opDescribeAlarmHistory, @@ -80,6 +122,23 @@ func (c *CloudWatch) DescribeAlarmHistory(input *DescribeAlarmHistoryInput) (*De return out, err } +// DescribeAlarmHistoryPages iterates over the pages of a DescribeAlarmHistory operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeAlarmHistory method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeAlarmHistory operation. +// pageNum := 0 +// err := client.DescribeAlarmHistoryPages(params, +// func(page *DescribeAlarmHistoryOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *CloudWatch) DescribeAlarmHistoryPages(input *DescribeAlarmHistoryInput, fn func(p *DescribeAlarmHistoryOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeAlarmHistoryRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -90,7 +149,28 @@ func (c *CloudWatch) DescribeAlarmHistoryPages(input *DescribeAlarmHistoryInput, const opDescribeAlarms = "DescribeAlarms" -// DescribeAlarmsRequest generates a request for the DescribeAlarms operation. +// DescribeAlarmsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAlarms operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAlarms method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAlarmsRequest method. +// req, resp := client.DescribeAlarmsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatch) DescribeAlarmsRequest(input *DescribeAlarmsInput) (req *request.Request, output *DescribeAlarmsOutput) { op := &request.Operation{ Name: opDescribeAlarms, @@ -123,6 +203,23 @@ func (c *CloudWatch) DescribeAlarms(input *DescribeAlarmsInput) (*DescribeAlarms return out, err } +// DescribeAlarmsPages iterates over the pages of a DescribeAlarms operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeAlarms method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeAlarms operation. +// pageNum := 0 +// err := client.DescribeAlarmsPages(params, +// func(page *DescribeAlarmsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *CloudWatch) DescribeAlarmsPages(input *DescribeAlarmsInput, fn func(p *DescribeAlarmsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeAlarmsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -133,7 +230,28 @@ func (c *CloudWatch) DescribeAlarmsPages(input *DescribeAlarmsInput, fn func(p * const opDescribeAlarmsForMetric = "DescribeAlarmsForMetric" -// DescribeAlarmsForMetricRequest generates a request for the DescribeAlarmsForMetric operation. +// DescribeAlarmsForMetricRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAlarmsForMetric operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAlarmsForMetric method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAlarmsForMetricRequest method. +// req, resp := client.DescribeAlarmsForMetricRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatch) DescribeAlarmsForMetricRequest(input *DescribeAlarmsForMetricInput) (req *request.Request, output *DescribeAlarmsForMetricOutput) { op := &request.Operation{ Name: opDescribeAlarmsForMetric, @@ -161,7 +279,28 @@ func (c *CloudWatch) DescribeAlarmsForMetric(input *DescribeAlarmsForMetricInput const opDisableAlarmActions = "DisableAlarmActions" -// DisableAlarmActionsRequest generates a request for the DisableAlarmActions operation. +// DisableAlarmActionsRequest generates a "aws/request.Request" representing the +// client's request for the DisableAlarmActions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableAlarmActions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableAlarmActionsRequest method. +// req, resp := client.DisableAlarmActionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatch) DisableAlarmActionsRequest(input *DisableAlarmActionsInput) (req *request.Request, output *DisableAlarmActionsOutput) { op := &request.Operation{ Name: opDisableAlarmActions, @@ -191,7 +330,28 @@ func (c *CloudWatch) DisableAlarmActions(input *DisableAlarmActionsInput) (*Disa const opEnableAlarmActions = "EnableAlarmActions" -// EnableAlarmActionsRequest generates a request for the EnableAlarmActions operation. +// EnableAlarmActionsRequest generates a "aws/request.Request" representing the +// client's request for the EnableAlarmActions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableAlarmActions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableAlarmActionsRequest method. +// req, resp := client.EnableAlarmActionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatch) EnableAlarmActionsRequest(input *EnableAlarmActionsInput) (req *request.Request, output *EnableAlarmActionsOutput) { op := &request.Operation{ Name: opEnableAlarmActions, @@ -220,7 +380,28 @@ func (c *CloudWatch) EnableAlarmActions(input *EnableAlarmActionsInput) (*Enable const opGetMetricStatistics = "GetMetricStatistics" -// GetMetricStatisticsRequest generates a request for the GetMetricStatistics operation. +// GetMetricStatisticsRequest generates a "aws/request.Request" representing the +// client's request for the GetMetricStatistics operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetMetricStatistics method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetMetricStatisticsRequest method. +// req, resp := client.GetMetricStatisticsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatch) GetMetricStatisticsRequest(input *GetMetricStatisticsInput) (req *request.Request, output *GetMetricStatisticsOutput) { op := &request.Operation{ Name: opGetMetricStatistics, @@ -273,7 +454,28 @@ func (c *CloudWatch) GetMetricStatistics(input *GetMetricStatisticsInput) (*GetM const opListMetrics = "ListMetrics" -// ListMetricsRequest generates a request for the ListMetrics operation. +// ListMetricsRequest generates a "aws/request.Request" representing the +// client's request for the ListMetrics operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListMetrics method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListMetricsRequest method. +// req, resp := client.ListMetricsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatch) ListMetricsRequest(input *ListMetricsInput) (req *request.Request, output *ListMetricsOutput) { op := &request.Operation{ Name: opListMetrics, @@ -312,6 +514,23 @@ func (c *CloudWatch) ListMetrics(input *ListMetricsInput) (*ListMetricsOutput, e return out, err } +// ListMetricsPages iterates over the pages of a ListMetrics operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListMetrics method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListMetrics operation. +// pageNum := 0 +// err := client.ListMetricsPages(params, +// func(page *ListMetricsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *CloudWatch) ListMetricsPages(input *ListMetricsInput, fn func(p *ListMetricsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListMetricsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -322,7 +541,28 @@ func (c *CloudWatch) ListMetricsPages(input *ListMetricsInput, fn func(p *ListMe const opPutMetricAlarm = "PutMetricAlarm" -// PutMetricAlarmRequest generates a request for the PutMetricAlarm operation. +// PutMetricAlarmRequest generates a "aws/request.Request" representing the +// client's request for the PutMetricAlarm operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutMetricAlarm method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutMetricAlarmRequest method. +// req, resp := client.PutMetricAlarmRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatch) PutMetricAlarmRequest(input *PutMetricAlarmInput) (req *request.Request, output *PutMetricAlarmOutput) { op := &request.Operation{ Name: opPutMetricAlarm, @@ -381,7 +621,28 @@ func (c *CloudWatch) PutMetricAlarm(input *PutMetricAlarmInput) (*PutMetricAlarm const opPutMetricData = "PutMetricData" -// PutMetricDataRequest generates a request for the PutMetricData operation. +// PutMetricDataRequest generates a "aws/request.Request" representing the +// client's request for the PutMetricData operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutMetricData method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutMetricDataRequest method. +// req, resp := client.PutMetricDataRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatch) PutMetricDataRequest(input *PutMetricDataInput) (req *request.Request, output *PutMetricDataOutput) { op := &request.Operation{ Name: opPutMetricData, @@ -425,7 +686,28 @@ func (c *CloudWatch) PutMetricData(input *PutMetricDataInput) (*PutMetricDataOut const opSetAlarmState = "SetAlarmState" -// SetAlarmStateRequest generates a request for the SetAlarmState operation. +// SetAlarmStateRequest generates a "aws/request.Request" representing the +// client's request for the SetAlarmState operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetAlarmState method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetAlarmStateRequest method. +// req, resp := client.SetAlarmStateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatch) SetAlarmStateRequest(input *SetAlarmStateInput) (req *request.Request, output *SetAlarmStateOutput) { op := &request.Operation{ Name: opSetAlarmState, diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/service.go index 342498f0e..8b707e5f1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/query" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // Amazon CloudWatch monitors your Amazon Web Services (AWS) resources and the @@ -72,7 +72,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(query.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/waiters.go new file mode 100644 index 000000000..c1ca3f334 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/waiters.go @@ -0,0 +1,30 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package cloudwatch + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *CloudWatch) WaitUntilAlarmExists(input *DescribeAlarmsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeAlarms", + Delay: 5, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "length(MetricAlarms[]) > `0`", + Expected: true, + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/api.go index bd35b89e8..2ae268d88 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/api.go @@ -15,7 +15,28 @@ import ( const opDeleteRule = "DeleteRule" -// DeleteRuleRequest generates a request for the DeleteRule operation. +// DeleteRuleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteRuleRequest method. +// req, resp := client.DeleteRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchEvents) DeleteRuleRequest(input *DeleteRuleInput) (req *request.Request, output *DeleteRuleOutput) { op := &request.Operation{ Name: opDeleteRule, @@ -49,7 +70,28 @@ func (c *CloudWatchEvents) DeleteRule(input *DeleteRuleInput) (*DeleteRuleOutput const opDescribeRule = "DescribeRule" -// DescribeRuleRequest generates a request for the DescribeRule operation. +// DescribeRuleRequest generates a "aws/request.Request" representing the +// client's request for the DescribeRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeRuleRequest method. +// req, resp := client.DescribeRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchEvents) DescribeRuleRequest(input *DescribeRuleInput) (req *request.Request, output *DescribeRuleOutput) { op := &request.Operation{ Name: opDescribeRule, @@ -76,7 +118,28 @@ func (c *CloudWatchEvents) DescribeRule(input *DescribeRuleInput) (*DescribeRule const opDisableRule = "DisableRule" -// DisableRuleRequest generates a request for the DisableRule operation. +// DisableRuleRequest generates a "aws/request.Request" representing the +// client's request for the DisableRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableRuleRequest method. +// req, resp := client.DisableRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchEvents) DisableRuleRequest(input *DisableRuleInput) (req *request.Request, output *DisableRuleOutput) { op := &request.Operation{ Name: opDisableRule, @@ -110,7 +173,28 @@ func (c *CloudWatchEvents) DisableRule(input *DisableRuleInput) (*DisableRuleOut const opEnableRule = "EnableRule" -// EnableRuleRequest generates a request for the EnableRule operation. +// EnableRuleRequest generates a "aws/request.Request" representing the +// client's request for the EnableRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableRuleRequest method. +// req, resp := client.EnableRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchEvents) EnableRuleRequest(input *EnableRuleInput) (req *request.Request, output *EnableRuleOutput) { op := &request.Operation{ Name: opEnableRule, @@ -143,7 +227,28 @@ func (c *CloudWatchEvents) EnableRule(input *EnableRuleInput) (*EnableRuleOutput const opListRuleNamesByTarget = "ListRuleNamesByTarget" -// ListRuleNamesByTargetRequest generates a request for the ListRuleNamesByTarget operation. +// ListRuleNamesByTargetRequest generates a "aws/request.Request" representing the +// client's request for the ListRuleNamesByTarget operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListRuleNamesByTarget method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListRuleNamesByTargetRequest method. +// req, resp := client.ListRuleNamesByTargetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchEvents) ListRuleNamesByTargetRequest(input *ListRuleNamesByTargetInput) (req *request.Request, output *ListRuleNamesByTargetOutput) { op := &request.Operation{ Name: opListRuleNamesByTarget, @@ -175,7 +280,28 @@ func (c *CloudWatchEvents) ListRuleNamesByTarget(input *ListRuleNamesByTargetInp const opListRules = "ListRules" -// ListRulesRequest generates a request for the ListRules operation. +// ListRulesRequest generates a "aws/request.Request" representing the +// client's request for the ListRules operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListRules method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListRulesRequest method. +// req, resp := client.ListRulesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchEvents) ListRulesRequest(input *ListRulesInput) (req *request.Request, output *ListRulesOutput) { op := &request.Operation{ Name: opListRules, @@ -206,7 +332,28 @@ func (c *CloudWatchEvents) ListRules(input *ListRulesInput) (*ListRulesOutput, e const opListTargetsByRule = "ListTargetsByRule" -// ListTargetsByRuleRequest generates a request for the ListTargetsByRule operation. +// ListTargetsByRuleRequest generates a "aws/request.Request" representing the +// client's request for the ListTargetsByRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTargetsByRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTargetsByRuleRequest method. +// req, resp := client.ListTargetsByRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchEvents) ListTargetsByRuleRequest(input *ListTargetsByRuleInput) (req *request.Request, output *ListTargetsByRuleOutput) { op := &request.Operation{ Name: opListTargetsByRule, @@ -233,7 +380,28 @@ func (c *CloudWatchEvents) ListTargetsByRule(input *ListTargetsByRuleInput) (*Li const opPutEvents = "PutEvents" -// PutEventsRequest generates a request for the PutEvents operation. +// PutEventsRequest generates a "aws/request.Request" representing the +// client's request for the PutEvents operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutEvents method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutEventsRequest method. +// req, resp := client.PutEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchEvents) PutEventsRequest(input *PutEventsInput) (req *request.Request, output *PutEventsOutput) { op := &request.Operation{ Name: opPutEvents, @@ -261,7 +429,28 @@ func (c *CloudWatchEvents) PutEvents(input *PutEventsInput) (*PutEventsOutput, e const opPutRule = "PutRule" -// PutRuleRequest generates a request for the PutRule operation. +// PutRuleRequest generates a "aws/request.Request" representing the +// client's request for the PutRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutRuleRequest method. +// req, resp := client.PutRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchEvents) PutRuleRequest(input *PutRuleInput) (req *request.Request, output *PutRuleOutput) { op := &request.Operation{ Name: opPutRule, @@ -305,7 +494,28 @@ func (c *CloudWatchEvents) PutRule(input *PutRuleInput) (*PutRuleOutput, error) const opPutTargets = "PutTargets" -// PutTargetsRequest generates a request for the PutTargets operation. +// PutTargetsRequest generates a "aws/request.Request" representing the +// client's request for the PutTargets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutTargets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutTargetsRequest method. +// req, resp := client.PutTargetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchEvents) PutTargetsRequest(input *PutTargetsInput) (req *request.Request, output *PutTargetsOutput) { op := &request.Operation{ Name: opPutTargets, @@ -355,7 +565,28 @@ func (c *CloudWatchEvents) PutTargets(input *PutTargetsInput) (*PutTargetsOutput const opRemoveTargets = "RemoveTargets" -// RemoveTargetsRequest generates a request for the RemoveTargets operation. +// RemoveTargetsRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTargets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveTargets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveTargetsRequest method. +// req, resp := client.RemoveTargetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchEvents) RemoveTargetsRequest(input *RemoveTargetsInput) (req *request.Request, output *RemoveTargetsOutput) { op := &request.Operation{ Name: opRemoveTargets, @@ -387,7 +618,28 @@ func (c *CloudWatchEvents) RemoveTargets(input *RemoveTargetsInput) (*RemoveTarg const opTestEventPattern = "TestEventPattern" -// TestEventPatternRequest generates a request for the TestEventPattern operation. +// TestEventPatternRequest generates a "aws/request.Request" representing the +// client's request for the TestEventPattern operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the TestEventPattern method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the TestEventPatternRequest method. +// req, resp := client.TestEventPatternRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchEvents) TestEventPatternRequest(input *TestEventPatternInput) (req *request.Request, output *TestEventPatternOutput) { op := &request.Operation{ Name: opTestEventPattern, diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/service.go index 14f99f9b4..0501a80d9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchevents/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // Amazon CloudWatch Events helps you to respond to state changes in your AWS @@ -73,7 +73,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go index d5be83323..d2f65e266 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go @@ -14,7 +14,28 @@ import ( const opCancelExportTask = "CancelExportTask" -// CancelExportTaskRequest generates a request for the CancelExportTask operation. +// CancelExportTaskRequest generates a "aws/request.Request" representing the +// client's request for the CancelExportTask operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CancelExportTask method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CancelExportTaskRequest method. +// req, resp := client.CancelExportTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchLogs) CancelExportTaskRequest(input *CancelExportTaskInput) (req *request.Request, output *CancelExportTaskOutput) { op := &request.Operation{ Name: opCancelExportTask, @@ -43,7 +64,28 @@ func (c *CloudWatchLogs) CancelExportTask(input *CancelExportTaskInput) (*Cancel const opCreateExportTask = "CreateExportTask" -// CreateExportTaskRequest generates a request for the CreateExportTask operation. +// CreateExportTaskRequest generates a "aws/request.Request" representing the +// client's request for the CreateExportTask operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateExportTask method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateExportTaskRequest method. +// req, resp := client.CreateExportTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchLogs) CreateExportTaskRequest(input *CreateExportTaskInput) (req *request.Request, output *CreateExportTaskOutput) { op := &request.Operation{ Name: opCreateExportTask, @@ -82,7 +124,28 @@ func (c *CloudWatchLogs) CreateExportTask(input *CreateExportTaskInput) (*Create const opCreateLogGroup = "CreateLogGroup" -// CreateLogGroupRequest generates a request for the CreateLogGroup operation. +// CreateLogGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateLogGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateLogGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateLogGroupRequest method. +// req, resp := client.CreateLogGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchLogs) CreateLogGroupRequest(input *CreateLogGroupInput) (req *request.Request, output *CreateLogGroupOutput) { op := &request.Operation{ Name: opCreateLogGroup, @@ -117,7 +180,28 @@ func (c *CloudWatchLogs) CreateLogGroup(input *CreateLogGroupInput) (*CreateLogG const opCreateLogStream = "CreateLogStream" -// CreateLogStreamRequest generates a request for the CreateLogStream operation. +// CreateLogStreamRequest generates a "aws/request.Request" representing the +// client's request for the CreateLogStream operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateLogStream method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateLogStreamRequest method. +// req, resp := client.CreateLogStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchLogs) CreateLogStreamRequest(input *CreateLogStreamInput) (req *request.Request, output *CreateLogStreamOutput) { op := &request.Operation{ Name: opCreateLogStream, @@ -152,7 +236,28 @@ func (c *CloudWatchLogs) CreateLogStream(input *CreateLogStreamInput) (*CreateLo const opDeleteDestination = "DeleteDestination" -// DeleteDestinationRequest generates a request for the DeleteDestination operation. +// DeleteDestinationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDestination operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDestination method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDestinationRequest method. +// req, resp := client.DeleteDestinationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchLogs) DeleteDestinationRequest(input *DeleteDestinationInput) (req *request.Request, output *DeleteDestinationOutput) { op := &request.Operation{ Name: opDeleteDestination, @@ -183,7 +288,28 @@ func (c *CloudWatchLogs) DeleteDestination(input *DeleteDestinationInput) (*Dele const opDeleteLogGroup = "DeleteLogGroup" -// DeleteLogGroupRequest generates a request for the DeleteLogGroup operation. +// DeleteLogGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLogGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteLogGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteLogGroupRequest method. +// req, resp := client.DeleteLogGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchLogs) DeleteLogGroupRequest(input *DeleteLogGroupInput) (req *request.Request, output *DeleteLogGroupOutput) { op := &request.Operation{ Name: opDeleteLogGroup, @@ -213,7 +339,28 @@ func (c *CloudWatchLogs) DeleteLogGroup(input *DeleteLogGroupInput) (*DeleteLogG const opDeleteLogStream = "DeleteLogStream" -// DeleteLogStreamRequest generates a request for the DeleteLogStream operation. +// DeleteLogStreamRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLogStream operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteLogStream method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteLogStreamRequest method. +// req, resp := client.DeleteLogStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchLogs) DeleteLogStreamRequest(input *DeleteLogStreamInput) (req *request.Request, output *DeleteLogStreamOutput) { op := &request.Operation{ Name: opDeleteLogStream, @@ -243,7 +390,28 @@ func (c *CloudWatchLogs) DeleteLogStream(input *DeleteLogStreamInput) (*DeleteLo const opDeleteMetricFilter = "DeleteMetricFilter" -// DeleteMetricFilterRequest generates a request for the DeleteMetricFilter operation. +// DeleteMetricFilterRequest generates a "aws/request.Request" representing the +// client's request for the DeleteMetricFilter operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteMetricFilter method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteMetricFilterRequest method. +// req, resp := client.DeleteMetricFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchLogs) DeleteMetricFilterRequest(input *DeleteMetricFilterInput) (req *request.Request, output *DeleteMetricFilterOutput) { op := &request.Operation{ Name: opDeleteMetricFilter, @@ -272,7 +440,28 @@ func (c *CloudWatchLogs) DeleteMetricFilter(input *DeleteMetricFilterInput) (*De const opDeleteRetentionPolicy = "DeleteRetentionPolicy" -// DeleteRetentionPolicyRequest generates a request for the DeleteRetentionPolicy operation. +// DeleteRetentionPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRetentionPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteRetentionPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteRetentionPolicyRequest method. +// req, resp := client.DeleteRetentionPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchLogs) DeleteRetentionPolicyRequest(input *DeleteRetentionPolicyInput) (req *request.Request, output *DeleteRetentionPolicyOutput) { op := &request.Operation{ Name: opDeleteRetentionPolicy, @@ -302,7 +491,28 @@ func (c *CloudWatchLogs) DeleteRetentionPolicy(input *DeleteRetentionPolicyInput const opDeleteSubscriptionFilter = "DeleteSubscriptionFilter" -// DeleteSubscriptionFilterRequest generates a request for the DeleteSubscriptionFilter operation. +// DeleteSubscriptionFilterRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSubscriptionFilter operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteSubscriptionFilter method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteSubscriptionFilterRequest method. +// req, resp := client.DeleteSubscriptionFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchLogs) DeleteSubscriptionFilterRequest(input *DeleteSubscriptionFilterInput) (req *request.Request, output *DeleteSubscriptionFilterOutput) { op := &request.Operation{ Name: opDeleteSubscriptionFilter, @@ -331,7 +541,28 @@ func (c *CloudWatchLogs) DeleteSubscriptionFilter(input *DeleteSubscriptionFilte const opDescribeDestinations = "DescribeDestinations" -// DescribeDestinationsRequest generates a request for the DescribeDestinations operation. +// DescribeDestinationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDestinations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDestinations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDestinationsRequest method. +// req, resp := client.DescribeDestinationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchLogs) DescribeDestinationsRequest(input *DescribeDestinationsInput) (req *request.Request, output *DescribeDestinationsOutput) { op := &request.Operation{ Name: opDescribeDestinations, @@ -369,6 +600,23 @@ func (c *CloudWatchLogs) DescribeDestinations(input *DescribeDestinationsInput) return out, err } +// DescribeDestinationsPages iterates over the pages of a DescribeDestinations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDestinations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDestinations operation. +// pageNum := 0 +// err := client.DescribeDestinationsPages(params, +// func(page *DescribeDestinationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *CloudWatchLogs) DescribeDestinationsPages(input *DescribeDestinationsInput, fn func(p *DescribeDestinationsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeDestinationsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -379,7 +627,28 @@ func (c *CloudWatchLogs) DescribeDestinationsPages(input *DescribeDestinationsIn const opDescribeExportTasks = "DescribeExportTasks" -// DescribeExportTasksRequest generates a request for the DescribeExportTasks operation. +// DescribeExportTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeExportTasks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeExportTasks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeExportTasksRequest method. +// req, resp := client.DescribeExportTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchLogs) DescribeExportTasksRequest(input *DescribeExportTasksInput) (req *request.Request, output *DescribeExportTasksOutput) { op := &request.Operation{ Name: opDescribeExportTasks, @@ -413,7 +682,28 @@ func (c *CloudWatchLogs) DescribeExportTasks(input *DescribeExportTasksInput) (* const opDescribeLogGroups = "DescribeLogGroups" -// DescribeLogGroupsRequest generates a request for the DescribeLogGroups operation. +// DescribeLogGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLogGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLogGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLogGroupsRequest method. +// req, resp := client.DescribeLogGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchLogs) DescribeLogGroupsRequest(input *DescribeLogGroupsInput) (req *request.Request, output *DescribeLogGroupsOutput) { op := &request.Operation{ Name: opDescribeLogGroups, @@ -451,6 +741,23 @@ func (c *CloudWatchLogs) DescribeLogGroups(input *DescribeLogGroupsInput) (*Desc return out, err } +// DescribeLogGroupsPages iterates over the pages of a DescribeLogGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeLogGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeLogGroups operation. +// pageNum := 0 +// err := client.DescribeLogGroupsPages(params, +// func(page *DescribeLogGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *CloudWatchLogs) DescribeLogGroupsPages(input *DescribeLogGroupsInput, fn func(p *DescribeLogGroupsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeLogGroupsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -461,7 +768,28 @@ func (c *CloudWatchLogs) DescribeLogGroupsPages(input *DescribeLogGroupsInput, f const opDescribeLogStreams = "DescribeLogStreams" -// DescribeLogStreamsRequest generates a request for the DescribeLogStreams operation. +// DescribeLogStreamsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLogStreams operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLogStreams method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLogStreamsRequest method. +// req, resp := client.DescribeLogStreamsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchLogs) DescribeLogStreamsRequest(input *DescribeLogStreamsInput) (req *request.Request, output *DescribeLogStreamsOutput) { op := &request.Operation{ Name: opDescribeLogStreams, @@ -500,6 +828,23 @@ func (c *CloudWatchLogs) DescribeLogStreams(input *DescribeLogStreamsInput) (*De return out, err } +// DescribeLogStreamsPages iterates over the pages of a DescribeLogStreams operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeLogStreams method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeLogStreams operation. +// pageNum := 0 +// err := client.DescribeLogStreamsPages(params, +// func(page *DescribeLogStreamsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *CloudWatchLogs) DescribeLogStreamsPages(input *DescribeLogStreamsInput, fn func(p *DescribeLogStreamsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeLogStreamsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -510,7 +855,28 @@ func (c *CloudWatchLogs) DescribeLogStreamsPages(input *DescribeLogStreamsInput, const opDescribeMetricFilters = "DescribeMetricFilters" -// DescribeMetricFiltersRequest generates a request for the DescribeMetricFilters operation. +// DescribeMetricFiltersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMetricFilters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeMetricFilters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeMetricFiltersRequest method. +// req, resp := client.DescribeMetricFiltersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchLogs) DescribeMetricFiltersRequest(input *DescribeMetricFiltersInput) (req *request.Request, output *DescribeMetricFiltersOutput) { op := &request.Operation{ Name: opDescribeMetricFilters, @@ -547,6 +913,23 @@ func (c *CloudWatchLogs) DescribeMetricFilters(input *DescribeMetricFiltersInput return out, err } +// DescribeMetricFiltersPages iterates over the pages of a DescribeMetricFilters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeMetricFilters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeMetricFilters operation. +// pageNum := 0 +// err := client.DescribeMetricFiltersPages(params, +// func(page *DescribeMetricFiltersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *CloudWatchLogs) DescribeMetricFiltersPages(input *DescribeMetricFiltersInput, fn func(p *DescribeMetricFiltersOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeMetricFiltersRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -557,7 +940,28 @@ func (c *CloudWatchLogs) DescribeMetricFiltersPages(input *DescribeMetricFilters const opDescribeSubscriptionFilters = "DescribeSubscriptionFilters" -// DescribeSubscriptionFiltersRequest generates a request for the DescribeSubscriptionFilters operation. +// DescribeSubscriptionFiltersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSubscriptionFilters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSubscriptionFilters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSubscriptionFiltersRequest method. +// req, resp := client.DescribeSubscriptionFiltersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchLogs) DescribeSubscriptionFiltersRequest(input *DescribeSubscriptionFiltersInput) (req *request.Request, output *DescribeSubscriptionFiltersOutput) { op := &request.Operation{ Name: opDescribeSubscriptionFilters, @@ -595,6 +999,23 @@ func (c *CloudWatchLogs) DescribeSubscriptionFilters(input *DescribeSubscription return out, err } +// DescribeSubscriptionFiltersPages iterates over the pages of a DescribeSubscriptionFilters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeSubscriptionFilters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeSubscriptionFilters operation. +// pageNum := 0 +// err := client.DescribeSubscriptionFiltersPages(params, +// func(page *DescribeSubscriptionFiltersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *CloudWatchLogs) DescribeSubscriptionFiltersPages(input *DescribeSubscriptionFiltersInput, fn func(p *DescribeSubscriptionFiltersOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeSubscriptionFiltersRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -605,7 +1026,28 @@ func (c *CloudWatchLogs) DescribeSubscriptionFiltersPages(input *DescribeSubscri const opFilterLogEvents = "FilterLogEvents" -// FilterLogEventsRequest generates a request for the FilterLogEvents operation. +// FilterLogEventsRequest generates a "aws/request.Request" representing the +// client's request for the FilterLogEvents operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the FilterLogEvents method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the FilterLogEventsRequest method. +// req, resp := client.FilterLogEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchLogs) FilterLogEventsRequest(input *FilterLogEventsInput) (req *request.Request, output *FilterLogEventsOutput) { op := &request.Operation{ Name: opFilterLogEvents, @@ -649,6 +1091,23 @@ func (c *CloudWatchLogs) FilterLogEvents(input *FilterLogEventsInput) (*FilterLo return out, err } +// FilterLogEventsPages iterates over the pages of a FilterLogEvents operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See FilterLogEvents method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a FilterLogEvents operation. +// pageNum := 0 +// err := client.FilterLogEventsPages(params, +// func(page *FilterLogEventsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *CloudWatchLogs) FilterLogEventsPages(input *FilterLogEventsInput, fn func(p *FilterLogEventsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.FilterLogEventsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -659,7 +1118,28 @@ func (c *CloudWatchLogs) FilterLogEventsPages(input *FilterLogEventsInput, fn fu const opGetLogEvents = "GetLogEvents" -// GetLogEventsRequest generates a request for the GetLogEvents operation. +// GetLogEventsRequest generates a "aws/request.Request" representing the +// client's request for the GetLogEvents operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetLogEvents method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetLogEventsRequest method. +// req, resp := client.GetLogEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchLogs) GetLogEventsRequest(input *GetLogEventsInput) (req *request.Request, output *GetLogEventsOutput) { op := &request.Operation{ Name: opGetLogEvents, @@ -699,6 +1179,23 @@ func (c *CloudWatchLogs) GetLogEvents(input *GetLogEventsInput) (*GetLogEventsOu return out, err } +// GetLogEventsPages iterates over the pages of a GetLogEvents operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetLogEvents method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetLogEvents operation. +// pageNum := 0 +// err := client.GetLogEventsPages(params, +// func(page *GetLogEventsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *CloudWatchLogs) GetLogEventsPages(input *GetLogEventsInput, fn func(p *GetLogEventsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.GetLogEventsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -709,7 +1206,28 @@ func (c *CloudWatchLogs) GetLogEventsPages(input *GetLogEventsInput, fn func(p * const opPutDestination = "PutDestination" -// PutDestinationRequest generates a request for the PutDestination operation. +// PutDestinationRequest generates a "aws/request.Request" representing the +// client's request for the PutDestination operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutDestination method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutDestinationRequest method. +// req, resp := client.PutDestinationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchLogs) PutDestinationRequest(input *PutDestinationInput) (req *request.Request, output *PutDestinationOutput) { op := &request.Operation{ Name: opPutDestination, @@ -746,7 +1264,28 @@ func (c *CloudWatchLogs) PutDestination(input *PutDestinationInput) (*PutDestina const opPutDestinationPolicy = "PutDestinationPolicy" -// PutDestinationPolicyRequest generates a request for the PutDestinationPolicy operation. +// PutDestinationPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutDestinationPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutDestinationPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutDestinationPolicyRequest method. +// req, resp := client.PutDestinationPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchLogs) PutDestinationPolicyRequest(input *PutDestinationPolicyInput) (req *request.Request, output *PutDestinationPolicyOutput) { op := &request.Operation{ Name: opPutDestinationPolicy, @@ -778,7 +1317,28 @@ func (c *CloudWatchLogs) PutDestinationPolicy(input *PutDestinationPolicyInput) const opPutLogEvents = "PutLogEvents" -// PutLogEventsRequest generates a request for the PutLogEvents operation. +// PutLogEventsRequest generates a "aws/request.Request" representing the +// client's request for the PutLogEvents operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutLogEvents method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutLogEventsRequest method. +// req, resp := client.PutLogEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchLogs) PutLogEventsRequest(input *PutLogEventsInput) (req *request.Request, output *PutLogEventsOutput) { op := &request.Operation{ Name: opPutLogEvents, @@ -819,7 +1379,28 @@ func (c *CloudWatchLogs) PutLogEvents(input *PutLogEventsInput) (*PutLogEventsOu const opPutMetricFilter = "PutMetricFilter" -// PutMetricFilterRequest generates a request for the PutMetricFilter operation. +// PutMetricFilterRequest generates a "aws/request.Request" representing the +// client's request for the PutMetricFilter operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutMetricFilter method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutMetricFilterRequest method. +// req, resp := client.PutMetricFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchLogs) PutMetricFilterRequest(input *PutMetricFilterInput) (req *request.Request, output *PutMetricFilterOutput) { op := &request.Operation{ Name: opPutMetricFilter, @@ -853,7 +1434,28 @@ func (c *CloudWatchLogs) PutMetricFilter(input *PutMetricFilterInput) (*PutMetri const opPutRetentionPolicy = "PutRetentionPolicy" -// PutRetentionPolicyRequest generates a request for the PutRetentionPolicy operation. +// PutRetentionPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutRetentionPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutRetentionPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutRetentionPolicyRequest method. +// req, resp := client.PutRetentionPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchLogs) PutRetentionPolicyRequest(input *PutRetentionPolicyInput) (req *request.Request, output *PutRetentionPolicyOutput) { op := &request.Operation{ Name: opPutRetentionPolicy, @@ -884,7 +1486,28 @@ func (c *CloudWatchLogs) PutRetentionPolicy(input *PutRetentionPolicyInput) (*Pu const opPutSubscriptionFilter = "PutSubscriptionFilter" -// PutSubscriptionFilterRequest generates a request for the PutSubscriptionFilter operation. +// PutSubscriptionFilterRequest generates a "aws/request.Request" representing the +// client's request for the PutSubscriptionFilter operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutSubscriptionFilter method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutSubscriptionFilterRequest method. +// req, resp := client.PutSubscriptionFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchLogs) PutSubscriptionFilterRequest(input *PutSubscriptionFilterInput) (req *request.Request, output *PutSubscriptionFilterOutput) { op := &request.Operation{ Name: opPutSubscriptionFilter, @@ -926,7 +1549,28 @@ func (c *CloudWatchLogs) PutSubscriptionFilter(input *PutSubscriptionFilterInput const opTestMetricFilter = "TestMetricFilter" -// TestMetricFilterRequest generates a request for the TestMetricFilter operation. +// TestMetricFilterRequest generates a "aws/request.Request" representing the +// client's request for the TestMetricFilter operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the TestMetricFilter method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the TestMetricFilterRequest method. +// req, resp := client.TestMetricFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CloudWatchLogs) TestMetricFilterRequest(input *TestMetricFilterInput) (req *request.Request, output *TestMetricFilterOutput) { op := &request.Operation{ Name: opTestMetricFilter, diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go index e5a47270c..064110304 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // You can use Amazon CloudWatch Logs to monitor, store, and access your log @@ -88,7 +88,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go b/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go index 88d3a006c..e4e8c8cd0 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codecommit/api.go @@ -14,7 +14,28 @@ import ( const opBatchGetRepositories = "BatchGetRepositories" -// BatchGetRepositoriesRequest generates a request for the BatchGetRepositories operation. +// BatchGetRepositoriesRequest generates a "aws/request.Request" representing the +// client's request for the BatchGetRepositories operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the BatchGetRepositories method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the BatchGetRepositoriesRequest method. +// req, resp := client.BatchGetRepositoriesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeCommit) BatchGetRepositoriesRequest(input *BatchGetRepositoriesInput) (req *request.Request, output *BatchGetRepositoriesOutput) { op := &request.Operation{ Name: opBatchGetRepositories, @@ -47,7 +68,28 @@ func (c *CodeCommit) BatchGetRepositories(input *BatchGetRepositoriesInput) (*Ba const opCreateBranch = "CreateBranch" -// CreateBranchRequest generates a request for the CreateBranch operation. +// CreateBranchRequest generates a "aws/request.Request" representing the +// client's request for the CreateBranch operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateBranch method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateBranchRequest method. +// req, resp := client.CreateBranchRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeCommit) CreateBranchRequest(input *CreateBranchInput) (req *request.Request, output *CreateBranchOutput) { op := &request.Operation{ Name: opCreateBranch, @@ -79,7 +121,28 @@ func (c *CodeCommit) CreateBranch(input *CreateBranchInput) (*CreateBranchOutput const opCreateRepository = "CreateRepository" -// CreateRepositoryRequest generates a request for the CreateRepository operation. +// CreateRepositoryRequest generates a "aws/request.Request" representing the +// client's request for the CreateRepository operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateRepository method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateRepositoryRequest method. +// req, resp := client.CreateRepositoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeCommit) CreateRepositoryRequest(input *CreateRepositoryInput) (req *request.Request, output *CreateRepositoryOutput) { op := &request.Operation{ Name: opCreateRepository, @@ -106,7 +169,28 @@ func (c *CodeCommit) CreateRepository(input *CreateRepositoryInput) (*CreateRepo const opDeleteRepository = "DeleteRepository" -// DeleteRepositoryRequest generates a request for the DeleteRepository operation. +// DeleteRepositoryRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRepository operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteRepository method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteRepositoryRequest method. +// req, resp := client.DeleteRepositoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeCommit) DeleteRepositoryRequest(input *DeleteRepositoryInput) (req *request.Request, output *DeleteRepositoryOutput) { op := &request.Operation{ Name: opDeleteRepository, @@ -138,7 +222,28 @@ func (c *CodeCommit) DeleteRepository(input *DeleteRepositoryInput) (*DeleteRepo const opGetBranch = "GetBranch" -// GetBranchRequest generates a request for the GetBranch operation. +// GetBranchRequest generates a "aws/request.Request" representing the +// client's request for the GetBranch operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBranch method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBranchRequest method. +// req, resp := client.GetBranchRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeCommit) GetBranchRequest(input *GetBranchInput) (req *request.Request, output *GetBranchOutput) { op := &request.Operation{ Name: opGetBranch, @@ -166,7 +271,28 @@ func (c *CodeCommit) GetBranch(input *GetBranchInput) (*GetBranchOutput, error) const opGetCommit = "GetCommit" -// GetCommitRequest generates a request for the GetCommit operation. +// GetCommitRequest generates a "aws/request.Request" representing the +// client's request for the GetCommit operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetCommit method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetCommitRequest method. +// req, resp := client.GetCommitRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeCommit) GetCommitRequest(input *GetCommitInput) (req *request.Request, output *GetCommitOutput) { op := &request.Operation{ Name: opGetCommit, @@ -194,7 +320,28 @@ func (c *CodeCommit) GetCommit(input *GetCommitInput) (*GetCommitOutput, error) const opGetRepository = "GetRepository" -// GetRepositoryRequest generates a request for the GetRepository operation. +// GetRepositoryRequest generates a "aws/request.Request" representing the +// client's request for the GetRepository operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetRepository method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetRepositoryRequest method. +// req, resp := client.GetRepositoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeCommit) GetRepositoryRequest(input *GetRepositoryInput) (req *request.Request, output *GetRepositoryOutput) { op := &request.Operation{ Name: opGetRepository, @@ -227,7 +374,28 @@ func (c *CodeCommit) GetRepository(input *GetRepositoryInput) (*GetRepositoryOut const opGetRepositoryTriggers = "GetRepositoryTriggers" -// GetRepositoryTriggersRequest generates a request for the GetRepositoryTriggers operation. +// GetRepositoryTriggersRequest generates a "aws/request.Request" representing the +// client's request for the GetRepositoryTriggers operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetRepositoryTriggers method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetRepositoryTriggersRequest method. +// req, resp := client.GetRepositoryTriggersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeCommit) GetRepositoryTriggersRequest(input *GetRepositoryTriggersInput) (req *request.Request, output *GetRepositoryTriggersOutput) { op := &request.Operation{ Name: opGetRepositoryTriggers, @@ -254,7 +422,28 @@ func (c *CodeCommit) GetRepositoryTriggers(input *GetRepositoryTriggersInput) (* const opListBranches = "ListBranches" -// ListBranchesRequest generates a request for the ListBranches operation. +// ListBranchesRequest generates a "aws/request.Request" representing the +// client's request for the ListBranches operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListBranches method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListBranchesRequest method. +// req, resp := client.ListBranchesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeCommit) ListBranchesRequest(input *ListBranchesInput) (req *request.Request, output *ListBranchesOutput) { op := &request.Operation{ Name: opListBranches, @@ -285,6 +474,23 @@ func (c *CodeCommit) ListBranches(input *ListBranchesInput) (*ListBranchesOutput return out, err } +// ListBranchesPages iterates over the pages of a ListBranches operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListBranches method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListBranches operation. +// pageNum := 0 +// err := client.ListBranchesPages(params, +// func(page *ListBranchesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *CodeCommit) ListBranchesPages(input *ListBranchesInput, fn func(p *ListBranchesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListBranchesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -295,7 +501,28 @@ func (c *CodeCommit) ListBranchesPages(input *ListBranchesInput, fn func(p *List const opListRepositories = "ListRepositories" -// ListRepositoriesRequest generates a request for the ListRepositories operation. +// ListRepositoriesRequest generates a "aws/request.Request" representing the +// client's request for the ListRepositories operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListRepositories method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListRepositoriesRequest method. +// req, resp := client.ListRepositoriesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeCommit) ListRepositoriesRequest(input *ListRepositoriesInput) (req *request.Request, output *ListRepositoriesOutput) { op := &request.Operation{ Name: opListRepositories, @@ -326,6 +553,23 @@ func (c *CodeCommit) ListRepositories(input *ListRepositoriesInput) (*ListReposi return out, err } +// ListRepositoriesPages iterates over the pages of a ListRepositories operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListRepositories method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListRepositories operation. +// pageNum := 0 +// err := client.ListRepositoriesPages(params, +// func(page *ListRepositoriesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *CodeCommit) ListRepositoriesPages(input *ListRepositoriesInput, fn func(p *ListRepositoriesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListRepositoriesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -336,7 +580,28 @@ func (c *CodeCommit) ListRepositoriesPages(input *ListRepositoriesInput, fn func const opPutRepositoryTriggers = "PutRepositoryTriggers" -// PutRepositoryTriggersRequest generates a request for the PutRepositoryTriggers operation. +// PutRepositoryTriggersRequest generates a "aws/request.Request" representing the +// client's request for the PutRepositoryTriggers operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutRepositoryTriggers method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutRepositoryTriggersRequest method. +// req, resp := client.PutRepositoryTriggersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeCommit) PutRepositoryTriggersRequest(input *PutRepositoryTriggersInput) (req *request.Request, output *PutRepositoryTriggersOutput) { op := &request.Operation{ Name: opPutRepositoryTriggers, @@ -364,7 +629,28 @@ func (c *CodeCommit) PutRepositoryTriggers(input *PutRepositoryTriggersInput) (* const opTestRepositoryTriggers = "TestRepositoryTriggers" -// TestRepositoryTriggersRequest generates a request for the TestRepositoryTriggers operation. +// TestRepositoryTriggersRequest generates a "aws/request.Request" representing the +// client's request for the TestRepositoryTriggers operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the TestRepositoryTriggers method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the TestRepositoryTriggersRequest method. +// req, resp := client.TestRepositoryTriggersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeCommit) TestRepositoryTriggersRequest(input *TestRepositoryTriggersInput) (req *request.Request, output *TestRepositoryTriggersOutput) { op := &request.Operation{ Name: opTestRepositoryTriggers, @@ -394,7 +680,28 @@ func (c *CodeCommit) TestRepositoryTriggers(input *TestRepositoryTriggersInput) const opUpdateDefaultBranch = "UpdateDefaultBranch" -// UpdateDefaultBranchRequest generates a request for the UpdateDefaultBranch operation. +// UpdateDefaultBranchRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDefaultBranch operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateDefaultBranch method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateDefaultBranchRequest method. +// req, resp := client.UpdateDefaultBranchRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeCommit) UpdateDefaultBranchRequest(input *UpdateDefaultBranchInput) (req *request.Request, output *UpdateDefaultBranchOutput) { op := &request.Operation{ Name: opUpdateDefaultBranch, @@ -427,7 +734,28 @@ func (c *CodeCommit) UpdateDefaultBranch(input *UpdateDefaultBranchInput) (*Upda const opUpdateRepositoryDescription = "UpdateRepositoryDescription" -// UpdateRepositoryDescriptionRequest generates a request for the UpdateRepositoryDescription operation. +// UpdateRepositoryDescriptionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRepositoryDescription operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateRepositoryDescription method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateRepositoryDescriptionRequest method. +// req, resp := client.UpdateRepositoryDescriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeCommit) UpdateRepositoryDescriptionRequest(input *UpdateRepositoryDescriptionInput) (req *request.Request, output *UpdateRepositoryDescriptionOutput) { op := &request.Operation{ Name: opUpdateRepositoryDescription, @@ -462,7 +790,28 @@ func (c *CodeCommit) UpdateRepositoryDescription(input *UpdateRepositoryDescript const opUpdateRepositoryName = "UpdateRepositoryName" -// UpdateRepositoryNameRequest generates a request for the UpdateRepositoryName operation. +// UpdateRepositoryNameRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRepositoryName operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateRepositoryName method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateRepositoryNameRequest method. +// req, resp := client.UpdateRepositoryNameRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeCommit) UpdateRepositoryNameRequest(input *UpdateRepositoryNameInput) (req *request.Request, output *UpdateRepositoryNameOutput) { op := &request.Operation{ Name: opUpdateRepositoryName, diff --git a/vendor/github.com/aws/aws-sdk-go/service/codecommit/service.go b/vendor/github.com/aws/aws-sdk-go/service/codecommit/service.go index bf756307a..ec608b7c1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codecommit/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codecommit/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // This is the AWS CodeCommit API Reference. This reference provides descriptions @@ -86,7 +86,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/codedeploy/api.go b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/api.go index fb348cd7f..241f23ab9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codedeploy/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/api.go @@ -14,7 +14,28 @@ import ( const opAddTagsToOnPremisesInstances = "AddTagsToOnPremisesInstances" -// AddTagsToOnPremisesInstancesRequest generates a request for the AddTagsToOnPremisesInstances operation. +// AddTagsToOnPremisesInstancesRequest generates a "aws/request.Request" representing the +// client's request for the AddTagsToOnPremisesInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddTagsToOnPremisesInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddTagsToOnPremisesInstancesRequest method. +// req, resp := client.AddTagsToOnPremisesInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) AddTagsToOnPremisesInstancesRequest(input *AddTagsToOnPremisesInstancesInput) (req *request.Request, output *AddTagsToOnPremisesInstancesOutput) { op := &request.Operation{ Name: opAddTagsToOnPremisesInstances, @@ -43,7 +64,28 @@ func (c *CodeDeploy) AddTagsToOnPremisesInstances(input *AddTagsToOnPremisesInst const opBatchGetApplicationRevisions = "BatchGetApplicationRevisions" -// BatchGetApplicationRevisionsRequest generates a request for the BatchGetApplicationRevisions operation. +// BatchGetApplicationRevisionsRequest generates a "aws/request.Request" representing the +// client's request for the BatchGetApplicationRevisions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the BatchGetApplicationRevisions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the BatchGetApplicationRevisionsRequest method. +// req, resp := client.BatchGetApplicationRevisionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) BatchGetApplicationRevisionsRequest(input *BatchGetApplicationRevisionsInput) (req *request.Request, output *BatchGetApplicationRevisionsOutput) { op := &request.Operation{ Name: opBatchGetApplicationRevisions, @@ -70,7 +112,28 @@ func (c *CodeDeploy) BatchGetApplicationRevisions(input *BatchGetApplicationRevi const opBatchGetApplications = "BatchGetApplications" -// BatchGetApplicationsRequest generates a request for the BatchGetApplications operation. +// BatchGetApplicationsRequest generates a "aws/request.Request" representing the +// client's request for the BatchGetApplications operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the BatchGetApplications method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the BatchGetApplicationsRequest method. +// req, resp := client.BatchGetApplicationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) BatchGetApplicationsRequest(input *BatchGetApplicationsInput) (req *request.Request, output *BatchGetApplicationsOutput) { op := &request.Operation{ Name: opBatchGetApplications, @@ -97,7 +160,28 @@ func (c *CodeDeploy) BatchGetApplications(input *BatchGetApplicationsInput) (*Ba const opBatchGetDeploymentGroups = "BatchGetDeploymentGroups" -// BatchGetDeploymentGroupsRequest generates a request for the BatchGetDeploymentGroups operation. +// BatchGetDeploymentGroupsRequest generates a "aws/request.Request" representing the +// client's request for the BatchGetDeploymentGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the BatchGetDeploymentGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the BatchGetDeploymentGroupsRequest method. +// req, resp := client.BatchGetDeploymentGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) BatchGetDeploymentGroupsRequest(input *BatchGetDeploymentGroupsInput) (req *request.Request, output *BatchGetDeploymentGroupsOutput) { op := &request.Operation{ Name: opBatchGetDeploymentGroups, @@ -124,7 +208,28 @@ func (c *CodeDeploy) BatchGetDeploymentGroups(input *BatchGetDeploymentGroupsInp const opBatchGetDeploymentInstances = "BatchGetDeploymentInstances" -// BatchGetDeploymentInstancesRequest generates a request for the BatchGetDeploymentInstances operation. +// BatchGetDeploymentInstancesRequest generates a "aws/request.Request" representing the +// client's request for the BatchGetDeploymentInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the BatchGetDeploymentInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the BatchGetDeploymentInstancesRequest method. +// req, resp := client.BatchGetDeploymentInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) BatchGetDeploymentInstancesRequest(input *BatchGetDeploymentInstancesInput) (req *request.Request, output *BatchGetDeploymentInstancesOutput) { op := &request.Operation{ Name: opBatchGetDeploymentInstances, @@ -152,7 +257,28 @@ func (c *CodeDeploy) BatchGetDeploymentInstances(input *BatchGetDeploymentInstan const opBatchGetDeployments = "BatchGetDeployments" -// BatchGetDeploymentsRequest generates a request for the BatchGetDeployments operation. +// BatchGetDeploymentsRequest generates a "aws/request.Request" representing the +// client's request for the BatchGetDeployments operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the BatchGetDeployments method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the BatchGetDeploymentsRequest method. +// req, resp := client.BatchGetDeploymentsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) BatchGetDeploymentsRequest(input *BatchGetDeploymentsInput) (req *request.Request, output *BatchGetDeploymentsOutput) { op := &request.Operation{ Name: opBatchGetDeployments, @@ -179,7 +305,28 @@ func (c *CodeDeploy) BatchGetDeployments(input *BatchGetDeploymentsInput) (*Batc const opBatchGetOnPremisesInstances = "BatchGetOnPremisesInstances" -// BatchGetOnPremisesInstancesRequest generates a request for the BatchGetOnPremisesInstances operation. +// BatchGetOnPremisesInstancesRequest generates a "aws/request.Request" representing the +// client's request for the BatchGetOnPremisesInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the BatchGetOnPremisesInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the BatchGetOnPremisesInstancesRequest method. +// req, resp := client.BatchGetOnPremisesInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) BatchGetOnPremisesInstancesRequest(input *BatchGetOnPremisesInstancesInput) (req *request.Request, output *BatchGetOnPremisesInstancesOutput) { op := &request.Operation{ Name: opBatchGetOnPremisesInstances, @@ -206,7 +353,28 @@ func (c *CodeDeploy) BatchGetOnPremisesInstances(input *BatchGetOnPremisesInstan const opCreateApplication = "CreateApplication" -// CreateApplicationRequest generates a request for the CreateApplication operation. +// CreateApplicationRequest generates a "aws/request.Request" representing the +// client's request for the CreateApplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateApplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateApplicationRequest method. +// req, resp := client.CreateApplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) CreateApplicationRequest(input *CreateApplicationInput) (req *request.Request, output *CreateApplicationOutput) { op := &request.Operation{ Name: opCreateApplication, @@ -233,7 +401,28 @@ func (c *CodeDeploy) CreateApplication(input *CreateApplicationInput) (*CreateAp const opCreateDeployment = "CreateDeployment" -// CreateDeploymentRequest generates a request for the CreateDeployment operation. +// CreateDeploymentRequest generates a "aws/request.Request" representing the +// client's request for the CreateDeployment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDeployment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDeploymentRequest method. +// req, resp := client.CreateDeploymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) CreateDeploymentRequest(input *CreateDeploymentInput) (req *request.Request, output *CreateDeploymentOutput) { op := &request.Operation{ Name: opCreateDeployment, @@ -260,7 +449,28 @@ func (c *CodeDeploy) CreateDeployment(input *CreateDeploymentInput) (*CreateDepl const opCreateDeploymentConfig = "CreateDeploymentConfig" -// CreateDeploymentConfigRequest generates a request for the CreateDeploymentConfig operation. +// CreateDeploymentConfigRequest generates a "aws/request.Request" representing the +// client's request for the CreateDeploymentConfig operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDeploymentConfig method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDeploymentConfigRequest method. +// req, resp := client.CreateDeploymentConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) CreateDeploymentConfigRequest(input *CreateDeploymentConfigInput) (req *request.Request, output *CreateDeploymentConfigOutput) { op := &request.Operation{ Name: opCreateDeploymentConfig, @@ -287,7 +497,28 @@ func (c *CodeDeploy) CreateDeploymentConfig(input *CreateDeploymentConfigInput) const opCreateDeploymentGroup = "CreateDeploymentGroup" -// CreateDeploymentGroupRequest generates a request for the CreateDeploymentGroup operation. +// CreateDeploymentGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateDeploymentGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDeploymentGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDeploymentGroupRequest method. +// req, resp := client.CreateDeploymentGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) CreateDeploymentGroupRequest(input *CreateDeploymentGroupInput) (req *request.Request, output *CreateDeploymentGroupOutput) { op := &request.Operation{ Name: opCreateDeploymentGroup, @@ -314,7 +545,28 @@ func (c *CodeDeploy) CreateDeploymentGroup(input *CreateDeploymentGroupInput) (* const opDeleteApplication = "DeleteApplication" -// DeleteApplicationRequest generates a request for the DeleteApplication operation. +// DeleteApplicationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteApplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteApplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteApplicationRequest method. +// req, resp := client.DeleteApplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) DeleteApplicationRequest(input *DeleteApplicationInput) (req *request.Request, output *DeleteApplicationOutput) { op := &request.Operation{ Name: opDeleteApplication, @@ -343,7 +595,28 @@ func (c *CodeDeploy) DeleteApplication(input *DeleteApplicationInput) (*DeleteAp const opDeleteDeploymentConfig = "DeleteDeploymentConfig" -// DeleteDeploymentConfigRequest generates a request for the DeleteDeploymentConfig operation. +// DeleteDeploymentConfigRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDeploymentConfig operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDeploymentConfig method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDeploymentConfigRequest method. +// req, resp := client.DeleteDeploymentConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) DeleteDeploymentConfigRequest(input *DeleteDeploymentConfigInput) (req *request.Request, output *DeleteDeploymentConfigOutput) { op := &request.Operation{ Name: opDeleteDeploymentConfig, @@ -375,7 +648,28 @@ func (c *CodeDeploy) DeleteDeploymentConfig(input *DeleteDeploymentConfigInput) const opDeleteDeploymentGroup = "DeleteDeploymentGroup" -// DeleteDeploymentGroupRequest generates a request for the DeleteDeploymentGroup operation. +// DeleteDeploymentGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDeploymentGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDeploymentGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDeploymentGroupRequest method. +// req, resp := client.DeleteDeploymentGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) DeleteDeploymentGroupRequest(input *DeleteDeploymentGroupInput) (req *request.Request, output *DeleteDeploymentGroupOutput) { op := &request.Operation{ Name: opDeleteDeploymentGroup, @@ -402,7 +696,28 @@ func (c *CodeDeploy) DeleteDeploymentGroup(input *DeleteDeploymentGroupInput) (* const opDeregisterOnPremisesInstance = "DeregisterOnPremisesInstance" -// DeregisterOnPremisesInstanceRequest generates a request for the DeregisterOnPremisesInstance operation. +// DeregisterOnPremisesInstanceRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterOnPremisesInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeregisterOnPremisesInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeregisterOnPremisesInstanceRequest method. +// req, resp := client.DeregisterOnPremisesInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) DeregisterOnPremisesInstanceRequest(input *DeregisterOnPremisesInstanceInput) (req *request.Request, output *DeregisterOnPremisesInstanceOutput) { op := &request.Operation{ Name: opDeregisterOnPremisesInstance, @@ -431,7 +746,28 @@ func (c *CodeDeploy) DeregisterOnPremisesInstance(input *DeregisterOnPremisesIns const opGetApplication = "GetApplication" -// GetApplicationRequest generates a request for the GetApplication operation. +// GetApplicationRequest generates a "aws/request.Request" representing the +// client's request for the GetApplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetApplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetApplicationRequest method. +// req, resp := client.GetApplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) GetApplicationRequest(input *GetApplicationInput) (req *request.Request, output *GetApplicationOutput) { op := &request.Operation{ Name: opGetApplication, @@ -458,7 +794,28 @@ func (c *CodeDeploy) GetApplication(input *GetApplicationInput) (*GetApplication const opGetApplicationRevision = "GetApplicationRevision" -// GetApplicationRevisionRequest generates a request for the GetApplicationRevision operation. +// GetApplicationRevisionRequest generates a "aws/request.Request" representing the +// client's request for the GetApplicationRevision operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetApplicationRevision method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetApplicationRevisionRequest method. +// req, resp := client.GetApplicationRevisionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) GetApplicationRevisionRequest(input *GetApplicationRevisionInput) (req *request.Request, output *GetApplicationRevisionOutput) { op := &request.Operation{ Name: opGetApplicationRevision, @@ -485,7 +842,28 @@ func (c *CodeDeploy) GetApplicationRevision(input *GetApplicationRevisionInput) const opGetDeployment = "GetDeployment" -// GetDeploymentRequest generates a request for the GetDeployment operation. +// GetDeploymentRequest generates a "aws/request.Request" representing the +// client's request for the GetDeployment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDeployment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDeploymentRequest method. +// req, resp := client.GetDeploymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) GetDeploymentRequest(input *GetDeploymentInput) (req *request.Request, output *GetDeploymentOutput) { op := &request.Operation{ Name: opGetDeployment, @@ -512,7 +890,28 @@ func (c *CodeDeploy) GetDeployment(input *GetDeploymentInput) (*GetDeploymentOut const opGetDeploymentConfig = "GetDeploymentConfig" -// GetDeploymentConfigRequest generates a request for the GetDeploymentConfig operation. +// GetDeploymentConfigRequest generates a "aws/request.Request" representing the +// client's request for the GetDeploymentConfig operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDeploymentConfig method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDeploymentConfigRequest method. +// req, resp := client.GetDeploymentConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) GetDeploymentConfigRequest(input *GetDeploymentConfigInput) (req *request.Request, output *GetDeploymentConfigOutput) { op := &request.Operation{ Name: opGetDeploymentConfig, @@ -539,7 +938,28 @@ func (c *CodeDeploy) GetDeploymentConfig(input *GetDeploymentConfigInput) (*GetD const opGetDeploymentGroup = "GetDeploymentGroup" -// GetDeploymentGroupRequest generates a request for the GetDeploymentGroup operation. +// GetDeploymentGroupRequest generates a "aws/request.Request" representing the +// client's request for the GetDeploymentGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDeploymentGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDeploymentGroupRequest method. +// req, resp := client.GetDeploymentGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) GetDeploymentGroupRequest(input *GetDeploymentGroupInput) (req *request.Request, output *GetDeploymentGroupOutput) { op := &request.Operation{ Name: opGetDeploymentGroup, @@ -566,7 +986,28 @@ func (c *CodeDeploy) GetDeploymentGroup(input *GetDeploymentGroupInput) (*GetDep const opGetDeploymentInstance = "GetDeploymentInstance" -// GetDeploymentInstanceRequest generates a request for the GetDeploymentInstance operation. +// GetDeploymentInstanceRequest generates a "aws/request.Request" representing the +// client's request for the GetDeploymentInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDeploymentInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDeploymentInstanceRequest method. +// req, resp := client.GetDeploymentInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) GetDeploymentInstanceRequest(input *GetDeploymentInstanceInput) (req *request.Request, output *GetDeploymentInstanceOutput) { op := &request.Operation{ Name: opGetDeploymentInstance, @@ -593,7 +1034,28 @@ func (c *CodeDeploy) GetDeploymentInstance(input *GetDeploymentInstanceInput) (* const opGetOnPremisesInstance = "GetOnPremisesInstance" -// GetOnPremisesInstanceRequest generates a request for the GetOnPremisesInstance operation. +// GetOnPremisesInstanceRequest generates a "aws/request.Request" representing the +// client's request for the GetOnPremisesInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetOnPremisesInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetOnPremisesInstanceRequest method. +// req, resp := client.GetOnPremisesInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) GetOnPremisesInstanceRequest(input *GetOnPremisesInstanceInput) (req *request.Request, output *GetOnPremisesInstanceOutput) { op := &request.Operation{ Name: opGetOnPremisesInstance, @@ -620,7 +1082,28 @@ func (c *CodeDeploy) GetOnPremisesInstance(input *GetOnPremisesInstanceInput) (* const opListApplicationRevisions = "ListApplicationRevisions" -// ListApplicationRevisionsRequest generates a request for the ListApplicationRevisions operation. +// ListApplicationRevisionsRequest generates a "aws/request.Request" representing the +// client's request for the ListApplicationRevisions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListApplicationRevisions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListApplicationRevisionsRequest method. +// req, resp := client.ListApplicationRevisionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) ListApplicationRevisionsRequest(input *ListApplicationRevisionsInput) (req *request.Request, output *ListApplicationRevisionsOutput) { op := &request.Operation{ Name: opListApplicationRevisions, @@ -651,6 +1134,23 @@ func (c *CodeDeploy) ListApplicationRevisions(input *ListApplicationRevisionsInp return out, err } +// ListApplicationRevisionsPages iterates over the pages of a ListApplicationRevisions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListApplicationRevisions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListApplicationRevisions operation. +// pageNum := 0 +// err := client.ListApplicationRevisionsPages(params, +// func(page *ListApplicationRevisionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *CodeDeploy) ListApplicationRevisionsPages(input *ListApplicationRevisionsInput, fn func(p *ListApplicationRevisionsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListApplicationRevisionsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -661,7 +1161,28 @@ func (c *CodeDeploy) ListApplicationRevisionsPages(input *ListApplicationRevisio const opListApplications = "ListApplications" -// ListApplicationsRequest generates a request for the ListApplications operation. +// ListApplicationsRequest generates a "aws/request.Request" representing the +// client's request for the ListApplications operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListApplications method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListApplicationsRequest method. +// req, resp := client.ListApplicationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) ListApplicationsRequest(input *ListApplicationsInput) (req *request.Request, output *ListApplicationsOutput) { op := &request.Operation{ Name: opListApplications, @@ -692,6 +1213,23 @@ func (c *CodeDeploy) ListApplications(input *ListApplicationsInput) (*ListApplic return out, err } +// ListApplicationsPages iterates over the pages of a ListApplications operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListApplications method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListApplications operation. +// pageNum := 0 +// err := client.ListApplicationsPages(params, +// func(page *ListApplicationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *CodeDeploy) ListApplicationsPages(input *ListApplicationsInput, fn func(p *ListApplicationsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListApplicationsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -702,7 +1240,28 @@ func (c *CodeDeploy) ListApplicationsPages(input *ListApplicationsInput, fn func const opListDeploymentConfigs = "ListDeploymentConfigs" -// ListDeploymentConfigsRequest generates a request for the ListDeploymentConfigs operation. +// ListDeploymentConfigsRequest generates a "aws/request.Request" representing the +// client's request for the ListDeploymentConfigs operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListDeploymentConfigs method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListDeploymentConfigsRequest method. +// req, resp := client.ListDeploymentConfigsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) ListDeploymentConfigsRequest(input *ListDeploymentConfigsInput) (req *request.Request, output *ListDeploymentConfigsOutput) { op := &request.Operation{ Name: opListDeploymentConfigs, @@ -733,6 +1292,23 @@ func (c *CodeDeploy) ListDeploymentConfigs(input *ListDeploymentConfigsInput) (* return out, err } +// ListDeploymentConfigsPages iterates over the pages of a ListDeploymentConfigs operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDeploymentConfigs method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDeploymentConfigs operation. +// pageNum := 0 +// err := client.ListDeploymentConfigsPages(params, +// func(page *ListDeploymentConfigsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *CodeDeploy) ListDeploymentConfigsPages(input *ListDeploymentConfigsInput, fn func(p *ListDeploymentConfigsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListDeploymentConfigsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -743,7 +1319,28 @@ func (c *CodeDeploy) ListDeploymentConfigsPages(input *ListDeploymentConfigsInpu const opListDeploymentGroups = "ListDeploymentGroups" -// ListDeploymentGroupsRequest generates a request for the ListDeploymentGroups operation. +// ListDeploymentGroupsRequest generates a "aws/request.Request" representing the +// client's request for the ListDeploymentGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListDeploymentGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListDeploymentGroupsRequest method. +// req, resp := client.ListDeploymentGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) ListDeploymentGroupsRequest(input *ListDeploymentGroupsInput) (req *request.Request, output *ListDeploymentGroupsOutput) { op := &request.Operation{ Name: opListDeploymentGroups, @@ -775,6 +1372,23 @@ func (c *CodeDeploy) ListDeploymentGroups(input *ListDeploymentGroupsInput) (*Li return out, err } +// ListDeploymentGroupsPages iterates over the pages of a ListDeploymentGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDeploymentGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDeploymentGroups operation. +// pageNum := 0 +// err := client.ListDeploymentGroupsPages(params, +// func(page *ListDeploymentGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *CodeDeploy) ListDeploymentGroupsPages(input *ListDeploymentGroupsInput, fn func(p *ListDeploymentGroupsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListDeploymentGroupsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -785,7 +1399,28 @@ func (c *CodeDeploy) ListDeploymentGroupsPages(input *ListDeploymentGroupsInput, const opListDeploymentInstances = "ListDeploymentInstances" -// ListDeploymentInstancesRequest generates a request for the ListDeploymentInstances operation. +// ListDeploymentInstancesRequest generates a "aws/request.Request" representing the +// client's request for the ListDeploymentInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListDeploymentInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListDeploymentInstancesRequest method. +// req, resp := client.ListDeploymentInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) ListDeploymentInstancesRequest(input *ListDeploymentInstancesInput) (req *request.Request, output *ListDeploymentInstancesOutput) { op := &request.Operation{ Name: opListDeploymentInstances, @@ -817,6 +1452,23 @@ func (c *CodeDeploy) ListDeploymentInstances(input *ListDeploymentInstancesInput return out, err } +// ListDeploymentInstancesPages iterates over the pages of a ListDeploymentInstances operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDeploymentInstances method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDeploymentInstances operation. +// pageNum := 0 +// err := client.ListDeploymentInstancesPages(params, +// func(page *ListDeploymentInstancesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *CodeDeploy) ListDeploymentInstancesPages(input *ListDeploymentInstancesInput, fn func(p *ListDeploymentInstancesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListDeploymentInstancesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -827,7 +1479,28 @@ func (c *CodeDeploy) ListDeploymentInstancesPages(input *ListDeploymentInstances const opListDeployments = "ListDeployments" -// ListDeploymentsRequest generates a request for the ListDeployments operation. +// ListDeploymentsRequest generates a "aws/request.Request" representing the +// client's request for the ListDeployments operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListDeployments method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListDeploymentsRequest method. +// req, resp := client.ListDeploymentsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) ListDeploymentsRequest(input *ListDeploymentsInput) (req *request.Request, output *ListDeploymentsOutput) { op := &request.Operation{ Name: opListDeployments, @@ -859,6 +1532,23 @@ func (c *CodeDeploy) ListDeployments(input *ListDeploymentsInput) (*ListDeployme return out, err } +// ListDeploymentsPages iterates over the pages of a ListDeployments operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListDeployments method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListDeployments operation. +// pageNum := 0 +// err := client.ListDeploymentsPages(params, +// func(page *ListDeploymentsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *CodeDeploy) ListDeploymentsPages(input *ListDeploymentsInput, fn func(p *ListDeploymentsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListDeploymentsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -869,7 +1559,28 @@ func (c *CodeDeploy) ListDeploymentsPages(input *ListDeploymentsInput, fn func(p const opListOnPremisesInstances = "ListOnPremisesInstances" -// ListOnPremisesInstancesRequest generates a request for the ListOnPremisesInstances operation. +// ListOnPremisesInstancesRequest generates a "aws/request.Request" representing the +// client's request for the ListOnPremisesInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListOnPremisesInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListOnPremisesInstancesRequest method. +// req, resp := client.ListOnPremisesInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) ListOnPremisesInstancesRequest(input *ListOnPremisesInstancesInput) (req *request.Request, output *ListOnPremisesInstancesOutput) { op := &request.Operation{ Name: opListOnPremisesInstances, @@ -900,7 +1611,28 @@ func (c *CodeDeploy) ListOnPremisesInstances(input *ListOnPremisesInstancesInput const opRegisterApplicationRevision = "RegisterApplicationRevision" -// RegisterApplicationRevisionRequest generates a request for the RegisterApplicationRevision operation. +// RegisterApplicationRevisionRequest generates a "aws/request.Request" representing the +// client's request for the RegisterApplicationRevision operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterApplicationRevision method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterApplicationRevisionRequest method. +// req, resp := client.RegisterApplicationRevisionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) RegisterApplicationRevisionRequest(input *RegisterApplicationRevisionInput) (req *request.Request, output *RegisterApplicationRevisionOutput) { op := &request.Operation{ Name: opRegisterApplicationRevision, @@ -929,7 +1661,28 @@ func (c *CodeDeploy) RegisterApplicationRevision(input *RegisterApplicationRevis const opRegisterOnPremisesInstance = "RegisterOnPremisesInstance" -// RegisterOnPremisesInstanceRequest generates a request for the RegisterOnPremisesInstance operation. +// RegisterOnPremisesInstanceRequest generates a "aws/request.Request" representing the +// client's request for the RegisterOnPremisesInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterOnPremisesInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterOnPremisesInstanceRequest method. +// req, resp := client.RegisterOnPremisesInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) RegisterOnPremisesInstanceRequest(input *RegisterOnPremisesInstanceInput) (req *request.Request, output *RegisterOnPremisesInstanceOutput) { op := &request.Operation{ Name: opRegisterOnPremisesInstance, @@ -958,7 +1711,28 @@ func (c *CodeDeploy) RegisterOnPremisesInstance(input *RegisterOnPremisesInstanc const opRemoveTagsFromOnPremisesInstances = "RemoveTagsFromOnPremisesInstances" -// RemoveTagsFromOnPremisesInstancesRequest generates a request for the RemoveTagsFromOnPremisesInstances operation. +// RemoveTagsFromOnPremisesInstancesRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTagsFromOnPremisesInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveTagsFromOnPremisesInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveTagsFromOnPremisesInstancesRequest method. +// req, resp := client.RemoveTagsFromOnPremisesInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) RemoveTagsFromOnPremisesInstancesRequest(input *RemoveTagsFromOnPremisesInstancesInput) (req *request.Request, output *RemoveTagsFromOnPremisesInstancesOutput) { op := &request.Operation{ Name: opRemoveTagsFromOnPremisesInstances, @@ -987,7 +1761,28 @@ func (c *CodeDeploy) RemoveTagsFromOnPremisesInstances(input *RemoveTagsFromOnPr const opStopDeployment = "StopDeployment" -// StopDeploymentRequest generates a request for the StopDeployment operation. +// StopDeploymentRequest generates a "aws/request.Request" representing the +// client's request for the StopDeployment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StopDeployment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StopDeploymentRequest method. +// req, resp := client.StopDeploymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) StopDeploymentRequest(input *StopDeploymentInput) (req *request.Request, output *StopDeploymentOutput) { op := &request.Operation{ Name: opStopDeployment, @@ -1014,7 +1809,28 @@ func (c *CodeDeploy) StopDeployment(input *StopDeploymentInput) (*StopDeployment const opUpdateApplication = "UpdateApplication" -// UpdateApplicationRequest generates a request for the UpdateApplication operation. +// UpdateApplicationRequest generates a "aws/request.Request" representing the +// client's request for the UpdateApplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateApplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateApplicationRequest method. +// req, resp := client.UpdateApplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) UpdateApplicationRequest(input *UpdateApplicationInput) (req *request.Request, output *UpdateApplicationOutput) { op := &request.Operation{ Name: opUpdateApplication, @@ -1043,7 +1859,28 @@ func (c *CodeDeploy) UpdateApplication(input *UpdateApplicationInput) (*UpdateAp const opUpdateDeploymentGroup = "UpdateDeploymentGroup" -// UpdateDeploymentGroupRequest generates a request for the UpdateDeploymentGroup operation. +// UpdateDeploymentGroupRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDeploymentGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateDeploymentGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateDeploymentGroupRequest method. +// req, resp := client.UpdateDeploymentGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *CodeDeploy) UpdateDeploymentGroupRequest(input *UpdateDeploymentGroupInput) (req *request.Request, output *UpdateDeploymentGroupOutput) { op := &request.Operation{ Name: opUpdateDeploymentGroup, diff --git a/vendor/github.com/aws/aws-sdk-go/service/codedeploy/service.go b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/service.go index 6f9e89b9a..e89d8da85 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/codedeploy/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // Overview This reference guide provides descriptions of the AWS CodeDeploy @@ -107,7 +107,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go index 8c3b4251c..0cd145ee6 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go @@ -11,9 +11,80 @@ import ( "github.com/aws/aws-sdk-go/aws/request" ) +const opAddTagsToResource = "AddTagsToResource" + +// AddTagsToResourceRequest generates a "aws/request.Request" representing the +// client's request for the AddTagsToResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddTagsToResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddTagsToResourceRequest method. +// req, resp := client.AddTagsToResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) AddTagsToResourceRequest(input *AddTagsToResourceInput) (req *request.Request, output *AddTagsToResourceOutput) { + op := &request.Operation{ + Name: opAddTagsToResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddTagsToResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &AddTagsToResourceOutput{} + req.Data = output + return +} + +// Adds or overwrites one or more tags for the specified Amazon Directory Services +// directory. Each directory can have a maximum of 10 tags. Each tag consists +// of a key and optional value. Tag keys must be unique per resource. +func (c *DirectoryService) AddTagsToResource(input *AddTagsToResourceInput) (*AddTagsToResourceOutput, error) { + req, out := c.AddTagsToResourceRequest(input) + err := req.Send() + return out, err +} + const opConnectDirectory = "ConnectDirectory" -// ConnectDirectoryRequest generates a request for the ConnectDirectory operation. +// ConnectDirectoryRequest generates a "aws/request.Request" representing the +// client's request for the ConnectDirectory operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ConnectDirectory method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ConnectDirectoryRequest method. +// req, resp := client.ConnectDirectoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DirectoryService) ConnectDirectoryRequest(input *ConnectDirectoryInput) (req *request.Request, output *ConnectDirectoryOutput) { op := &request.Operation{ Name: opConnectDirectory, @@ -40,7 +111,28 @@ func (c *DirectoryService) ConnectDirectory(input *ConnectDirectoryInput) (*Conn const opCreateAlias = "CreateAlias" -// CreateAliasRequest generates a request for the CreateAlias operation. +// CreateAliasRequest generates a "aws/request.Request" representing the +// client's request for the CreateAlias operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateAlias method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateAliasRequest method. +// req, resp := client.CreateAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DirectoryService) CreateAliasRequest(input *CreateAliasInput) (req *request.Request, output *CreateAliasOutput) { op := &request.Operation{ Name: opCreateAlias, @@ -60,7 +152,7 @@ func (c *DirectoryService) CreateAliasRequest(input *CreateAliasInput) (req *req // Creates an alias for a directory and assigns the alias to the directory. // The alias is used to construct the access URL for the directory, such as -// http://alias.awsapps.com. +// http://.awsapps.com. // // After an alias has been created, it cannot be deleted or reused, so this // operation should only be used when absolutely necessary. @@ -72,7 +164,28 @@ func (c *DirectoryService) CreateAlias(input *CreateAliasInput) (*CreateAliasOut const opCreateComputer = "CreateComputer" -// CreateComputerRequest generates a request for the CreateComputer operation. +// CreateComputerRequest generates a "aws/request.Request" representing the +// client's request for the CreateComputer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateComputer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateComputerRequest method. +// req, resp := client.CreateComputerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DirectoryService) CreateComputerRequest(input *CreateComputerInput) (req *request.Request, output *CreateComputerOutput) { op := &request.Operation{ Name: opCreateComputer, @@ -100,7 +213,28 @@ func (c *DirectoryService) CreateComputer(input *CreateComputerInput) (*CreateCo const opCreateConditionalForwarder = "CreateConditionalForwarder" -// CreateConditionalForwarderRequest generates a request for the CreateConditionalForwarder operation. +// CreateConditionalForwarderRequest generates a "aws/request.Request" representing the +// client's request for the CreateConditionalForwarder operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateConditionalForwarder method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateConditionalForwarderRequest method. +// req, resp := client.CreateConditionalForwarderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DirectoryService) CreateConditionalForwarderRequest(input *CreateConditionalForwarderInput) (req *request.Request, output *CreateConditionalForwarderOutput) { op := &request.Operation{ Name: opCreateConditionalForwarder, @@ -129,7 +263,28 @@ func (c *DirectoryService) CreateConditionalForwarder(input *CreateConditionalFo const opCreateDirectory = "CreateDirectory" -// CreateDirectoryRequest generates a request for the CreateDirectory operation. +// CreateDirectoryRequest generates a "aws/request.Request" representing the +// client's request for the CreateDirectory operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDirectory method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDirectoryRequest method. +// req, resp := client.CreateDirectoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DirectoryService) CreateDirectoryRequest(input *CreateDirectoryInput) (req *request.Request, output *CreateDirectoryOutput) { op := &request.Operation{ Name: opCreateDirectory, @@ -156,7 +311,28 @@ func (c *DirectoryService) CreateDirectory(input *CreateDirectoryInput) (*Create const opCreateMicrosoftAD = "CreateMicrosoftAD" -// CreateMicrosoftADRequest generates a request for the CreateMicrosoftAD operation. +// CreateMicrosoftADRequest generates a "aws/request.Request" representing the +// client's request for the CreateMicrosoftAD operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateMicrosoftAD method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateMicrosoftADRequest method. +// req, resp := client.CreateMicrosoftADRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DirectoryService) CreateMicrosoftADRequest(input *CreateMicrosoftADInput) (req *request.Request, output *CreateMicrosoftADOutput) { op := &request.Operation{ Name: opCreateMicrosoftAD, @@ -183,7 +359,28 @@ func (c *DirectoryService) CreateMicrosoftAD(input *CreateMicrosoftADInput) (*Cr const opCreateSnapshot = "CreateSnapshot" -// CreateSnapshotRequest generates a request for the CreateSnapshot operation. +// CreateSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the CreateSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateSnapshotRequest method. +// req, resp := client.CreateSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DirectoryService) CreateSnapshotRequest(input *CreateSnapshotInput) (req *request.Request, output *CreateSnapshotOutput) { op := &request.Operation{ Name: opCreateSnapshot, @@ -212,7 +409,28 @@ func (c *DirectoryService) CreateSnapshot(input *CreateSnapshotInput) (*CreateSn const opCreateTrust = "CreateTrust" -// CreateTrustRequest generates a request for the CreateTrust operation. +// CreateTrustRequest generates a "aws/request.Request" representing the +// client's request for the CreateTrust operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateTrust method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateTrustRequest method. +// req, resp := client.CreateTrustRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DirectoryService) CreateTrustRequest(input *CreateTrustInput) (req *request.Request, output *CreateTrustOutput) { op := &request.Operation{ Name: opCreateTrust, @@ -246,7 +464,28 @@ func (c *DirectoryService) CreateTrust(input *CreateTrustInput) (*CreateTrustOut const opDeleteConditionalForwarder = "DeleteConditionalForwarder" -// DeleteConditionalForwarderRequest generates a request for the DeleteConditionalForwarder operation. +// DeleteConditionalForwarderRequest generates a "aws/request.Request" representing the +// client's request for the DeleteConditionalForwarder operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteConditionalForwarder method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteConditionalForwarderRequest method. +// req, resp := client.DeleteConditionalForwarderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DirectoryService) DeleteConditionalForwarderRequest(input *DeleteConditionalForwarderInput) (req *request.Request, output *DeleteConditionalForwarderOutput) { op := &request.Operation{ Name: opDeleteConditionalForwarder, @@ -273,7 +512,28 @@ func (c *DirectoryService) DeleteConditionalForwarder(input *DeleteConditionalFo const opDeleteDirectory = "DeleteDirectory" -// DeleteDirectoryRequest generates a request for the DeleteDirectory operation. +// DeleteDirectoryRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDirectory operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDirectory method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDirectoryRequest method. +// req, resp := client.DeleteDirectoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DirectoryService) DeleteDirectoryRequest(input *DeleteDirectoryInput) (req *request.Request, output *DeleteDirectoryOutput) { op := &request.Operation{ Name: opDeleteDirectory, @@ -300,7 +560,28 @@ func (c *DirectoryService) DeleteDirectory(input *DeleteDirectoryInput) (*Delete const opDeleteSnapshot = "DeleteSnapshot" -// DeleteSnapshotRequest generates a request for the DeleteSnapshot operation. +// DeleteSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteSnapshotRequest method. +// req, resp := client.DeleteSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DirectoryService) DeleteSnapshotRequest(input *DeleteSnapshotInput) (req *request.Request, output *DeleteSnapshotOutput) { op := &request.Operation{ Name: opDeleteSnapshot, @@ -327,7 +608,28 @@ func (c *DirectoryService) DeleteSnapshot(input *DeleteSnapshotInput) (*DeleteSn const opDeleteTrust = "DeleteTrust" -// DeleteTrustRequest generates a request for the DeleteTrust operation. +// DeleteTrustRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTrust operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteTrust method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteTrustRequest method. +// req, resp := client.DeleteTrustRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DirectoryService) DeleteTrustRequest(input *DeleteTrustInput) (req *request.Request, output *DeleteTrustOutput) { op := &request.Operation{ Name: opDeleteTrust, @@ -355,7 +657,28 @@ func (c *DirectoryService) DeleteTrust(input *DeleteTrustInput) (*DeleteTrustOut const opDeregisterEventTopic = "DeregisterEventTopic" -// DeregisterEventTopicRequest generates a request for the DeregisterEventTopic operation. +// DeregisterEventTopicRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterEventTopic operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeregisterEventTopic method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeregisterEventTopicRequest method. +// req, resp := client.DeregisterEventTopicRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DirectoryService) DeregisterEventTopicRequest(input *DeregisterEventTopicInput) (req *request.Request, output *DeregisterEventTopicOutput) { op := &request.Operation{ Name: opDeregisterEventTopic, @@ -382,7 +705,28 @@ func (c *DirectoryService) DeregisterEventTopic(input *DeregisterEventTopicInput const opDescribeConditionalForwarders = "DescribeConditionalForwarders" -// DescribeConditionalForwardersRequest generates a request for the DescribeConditionalForwarders operation. +// DescribeConditionalForwardersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeConditionalForwarders operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeConditionalForwarders method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeConditionalForwardersRequest method. +// req, resp := client.DescribeConditionalForwardersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DirectoryService) DescribeConditionalForwardersRequest(input *DescribeConditionalForwardersInput) (req *request.Request, output *DescribeConditionalForwardersOutput) { op := &request.Operation{ Name: opDescribeConditionalForwarders, @@ -412,7 +756,28 @@ func (c *DirectoryService) DescribeConditionalForwarders(input *DescribeConditio const opDescribeDirectories = "DescribeDirectories" -// DescribeDirectoriesRequest generates a request for the DescribeDirectories operation. +// DescribeDirectoriesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDirectories operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDirectories method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDirectoriesRequest method. +// req, resp := client.DescribeDirectoriesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DirectoryService) DescribeDirectoriesRequest(input *DescribeDirectoriesInput) (req *request.Request, output *DescribeDirectoriesOutput) { op := &request.Operation{ Name: opDescribeDirectories, @@ -450,7 +815,28 @@ func (c *DirectoryService) DescribeDirectories(input *DescribeDirectoriesInput) const opDescribeEventTopics = "DescribeEventTopics" -// DescribeEventTopicsRequest generates a request for the DescribeEventTopics operation. +// DescribeEventTopicsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEventTopics operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEventTopics method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEventTopicsRequest method. +// req, resp := client.DescribeEventTopicsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DirectoryService) DescribeEventTopicsRequest(input *DescribeEventTopicsInput) (req *request.Request, output *DescribeEventTopicsOutput) { op := &request.Operation{ Name: opDescribeEventTopics, @@ -481,7 +867,28 @@ func (c *DirectoryService) DescribeEventTopics(input *DescribeEventTopicsInput) const opDescribeSnapshots = "DescribeSnapshots" -// DescribeSnapshotsRequest generates a request for the DescribeSnapshots operation. +// DescribeSnapshotsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSnapshots operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSnapshots method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSnapshotsRequest method. +// req, resp := client.DescribeSnapshotsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DirectoryService) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *request.Request, output *DescribeSnapshotsOutput) { op := &request.Operation{ Name: opDescribeSnapshots, @@ -515,7 +922,28 @@ func (c *DirectoryService) DescribeSnapshots(input *DescribeSnapshotsInput) (*De const opDescribeTrusts = "DescribeTrusts" -// DescribeTrustsRequest generates a request for the DescribeTrusts operation. +// DescribeTrustsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTrusts operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTrusts method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTrustsRequest method. +// req, resp := client.DescribeTrustsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DirectoryService) DescribeTrustsRequest(input *DescribeTrustsInput) (req *request.Request, output *DescribeTrustsOutput) { op := &request.Operation{ Name: opDescribeTrusts, @@ -545,7 +973,28 @@ func (c *DirectoryService) DescribeTrusts(input *DescribeTrustsInput) (*Describe const opDisableRadius = "DisableRadius" -// DisableRadiusRequest generates a request for the DisableRadius operation. +// DisableRadiusRequest generates a "aws/request.Request" representing the +// client's request for the DisableRadius operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableRadius method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableRadiusRequest method. +// req, resp := client.DisableRadiusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DirectoryService) DisableRadiusRequest(input *DisableRadiusInput) (req *request.Request, output *DisableRadiusOutput) { op := &request.Operation{ Name: opDisableRadius, @@ -573,7 +1022,28 @@ func (c *DirectoryService) DisableRadius(input *DisableRadiusInput) (*DisableRad const opDisableSso = "DisableSso" -// DisableSsoRequest generates a request for the DisableSso operation. +// DisableSsoRequest generates a "aws/request.Request" representing the +// client's request for the DisableSso operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableSso method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableSsoRequest method. +// req, resp := client.DisableSsoRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DirectoryService) DisableSsoRequest(input *DisableSsoInput) (req *request.Request, output *DisableSsoOutput) { op := &request.Operation{ Name: opDisableSso, @@ -600,7 +1070,28 @@ func (c *DirectoryService) DisableSso(input *DisableSsoInput) (*DisableSsoOutput const opEnableRadius = "EnableRadius" -// EnableRadiusRequest generates a request for the EnableRadius operation. +// EnableRadiusRequest generates a "aws/request.Request" representing the +// client's request for the EnableRadius operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableRadius method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableRadiusRequest method. +// req, resp := client.EnableRadiusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DirectoryService) EnableRadiusRequest(input *EnableRadiusInput) (req *request.Request, output *EnableRadiusOutput) { op := &request.Operation{ Name: opEnableRadius, @@ -628,7 +1119,28 @@ func (c *DirectoryService) EnableRadius(input *EnableRadiusInput) (*EnableRadius const opEnableSso = "EnableSso" -// EnableSsoRequest generates a request for the EnableSso operation. +// EnableSsoRequest generates a "aws/request.Request" representing the +// client's request for the EnableSso operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableSso method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableSsoRequest method. +// req, resp := client.EnableSsoRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DirectoryService) EnableSsoRequest(input *EnableSsoInput) (req *request.Request, output *EnableSsoOutput) { op := &request.Operation{ Name: opEnableSso, @@ -655,7 +1167,28 @@ func (c *DirectoryService) EnableSso(input *EnableSsoInput) (*EnableSsoOutput, e const opGetDirectoryLimits = "GetDirectoryLimits" -// GetDirectoryLimitsRequest generates a request for the GetDirectoryLimits operation. +// GetDirectoryLimitsRequest generates a "aws/request.Request" representing the +// client's request for the GetDirectoryLimits operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDirectoryLimits method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDirectoryLimitsRequest method. +// req, resp := client.GetDirectoryLimitsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DirectoryService) GetDirectoryLimitsRequest(input *GetDirectoryLimitsInput) (req *request.Request, output *GetDirectoryLimitsOutput) { op := &request.Operation{ Name: opGetDirectoryLimits, @@ -682,7 +1215,28 @@ func (c *DirectoryService) GetDirectoryLimits(input *GetDirectoryLimitsInput) (* const opGetSnapshotLimits = "GetSnapshotLimits" -// GetSnapshotLimitsRequest generates a request for the GetSnapshotLimits operation. +// GetSnapshotLimitsRequest generates a "aws/request.Request" representing the +// client's request for the GetSnapshotLimits operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetSnapshotLimits method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetSnapshotLimitsRequest method. +// req, resp := client.GetSnapshotLimitsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DirectoryService) GetSnapshotLimitsRequest(input *GetSnapshotLimitsInput) (req *request.Request, output *GetSnapshotLimitsOutput) { op := &request.Operation{ Name: opGetSnapshotLimits, @@ -707,9 +1261,78 @@ func (c *DirectoryService) GetSnapshotLimits(input *GetSnapshotLimitsInput) (*Ge return out, err } +const opListTagsForResource = "ListTagsForResource" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTagsForResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsForResourceOutput{} + req.Data = output + return +} + +// Lists all tags on an Amazon Directory Services directory. +func (c *DirectoryService) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + err := req.Send() + return out, err +} + const opRegisterEventTopic = "RegisterEventTopic" -// RegisterEventTopicRequest generates a request for the RegisterEventTopic operation. +// RegisterEventTopicRequest generates a "aws/request.Request" representing the +// client's request for the RegisterEventTopic operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterEventTopic method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterEventTopicRequest method. +// req, resp := client.RegisterEventTopicRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DirectoryService) RegisterEventTopicRequest(input *RegisterEventTopicInput) (req *request.Request, output *RegisterEventTopicOutput) { op := &request.Operation{ Name: opRegisterEventTopic, @@ -739,9 +1362,78 @@ func (c *DirectoryService) RegisterEventTopic(input *RegisterEventTopicInput) (* return out, err } +const opRemoveTagsFromResource = "RemoveTagsFromResource" + +// RemoveTagsFromResourceRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTagsFromResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveTagsFromResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveTagsFromResourceRequest method. +// req, resp := client.RemoveTagsFromResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) RemoveTagsFromResourceRequest(input *RemoveTagsFromResourceInput) (req *request.Request, output *RemoveTagsFromResourceOutput) { + op := &request.Operation{ + Name: opRemoveTagsFromResource, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveTagsFromResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveTagsFromResourceOutput{} + req.Data = output + return +} + +// Removes tags from an Amazon Directory Services directory. +func (c *DirectoryService) RemoveTagsFromResource(input *RemoveTagsFromResourceInput) (*RemoveTagsFromResourceOutput, error) { + req, out := c.RemoveTagsFromResourceRequest(input) + err := req.Send() + return out, err +} + const opRestoreFromSnapshot = "RestoreFromSnapshot" -// RestoreFromSnapshotRequest generates a request for the RestoreFromSnapshot operation. +// RestoreFromSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the RestoreFromSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RestoreFromSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RestoreFromSnapshotRequest method. +// req, resp := client.RestoreFromSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DirectoryService) RestoreFromSnapshotRequest(input *RestoreFromSnapshotInput) (req *request.Request, output *RestoreFromSnapshotOutput) { op := &request.Operation{ Name: opRestoreFromSnapshot, @@ -776,7 +1468,28 @@ func (c *DirectoryService) RestoreFromSnapshot(input *RestoreFromSnapshotInput) const opUpdateConditionalForwarder = "UpdateConditionalForwarder" -// UpdateConditionalForwarderRequest generates a request for the UpdateConditionalForwarder operation. +// UpdateConditionalForwarderRequest generates a "aws/request.Request" representing the +// client's request for the UpdateConditionalForwarder operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateConditionalForwarder method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateConditionalForwarderRequest method. +// req, resp := client.UpdateConditionalForwarderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DirectoryService) UpdateConditionalForwarderRequest(input *UpdateConditionalForwarderInput) (req *request.Request, output *UpdateConditionalForwarderOutput) { op := &request.Operation{ Name: opUpdateConditionalForwarder, @@ -803,7 +1516,28 @@ func (c *DirectoryService) UpdateConditionalForwarder(input *UpdateConditionalFo const opUpdateRadius = "UpdateRadius" -// UpdateRadiusRequest generates a request for the UpdateRadius operation. +// UpdateRadiusRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRadius operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateRadius method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateRadiusRequest method. +// req, resp := client.UpdateRadiusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DirectoryService) UpdateRadiusRequest(input *UpdateRadiusInput) (req *request.Request, output *UpdateRadiusOutput) { op := &request.Operation{ Name: opUpdateRadius, @@ -831,7 +1565,28 @@ func (c *DirectoryService) UpdateRadius(input *UpdateRadiusInput) (*UpdateRadius const opVerifyTrust = "VerifyTrust" -// VerifyTrustRequest generates a request for the VerifyTrust operation. +// VerifyTrustRequest generates a "aws/request.Request" representing the +// client's request for the VerifyTrust operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the VerifyTrust method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the VerifyTrustRequest method. +// req, resp := client.VerifyTrustRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DirectoryService) VerifyTrustRequest(input *VerifyTrustInput) (req *request.Request, output *VerifyTrustOutput) { op := &request.Operation{ Name: opVerifyTrust, @@ -860,6 +1615,66 @@ func (c *DirectoryService) VerifyTrust(input *VerifyTrustInput) (*VerifyTrustOut return out, err } +type AddTagsToResourceInput struct { + _ struct{} `type:"structure"` + + // The ID of the directory to which to add the tag. + ResourceId *string `type:"string" required:"true"` + + // The tags to be assigned to the Amazon Directory Services directory. + Tags []*Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s AddTagsToResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddTagsToResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddTagsToResourceInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AddTagsToResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddTagsToResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddTagsToResourceOutput) GoString() string { + return s.String() +} + // Represents a named directory attribute. type Attribute struct { _ struct{} `type:"structure"` @@ -1232,6 +2047,7 @@ func (s *CreateConditionalForwarderInput) Validate() error { return nil } +// The result of a CreateConditinalForwarder request. type CreateConditionalForwarderOutput struct { _ struct{} `type:"structure"` } @@ -1383,6 +2199,7 @@ func (s *CreateMicrosoftADInput) Validate() error { return nil } +// Result of a CreateMicrosoftAD request. type CreateMicrosoftADOutput struct { _ struct{} `type:"structure"` @@ -1463,6 +2280,7 @@ func (s CreateSnapshotOutput) GoString() string { type CreateTrustInput struct { _ struct{} `type:"structure"` + // The IP addresses of the remote DNS server associated with RemoteDomainName. ConditionalForwarderIpAddrs []*string `type:"list"` // The Directory ID of the Microsoft AD in the AWS cloud for which to establish @@ -1519,6 +2337,7 @@ func (s *CreateTrustInput) Validate() error { return nil } +// The result of a CreateTrust request. type CreateTrustOutput struct { _ struct{} `type:"structure"` @@ -1536,6 +2355,7 @@ func (s CreateTrustOutput) GoString() string { return s.String() } +// Deletes a conditional forwarder. type DeleteConditionalForwarderInput struct { _ struct{} `type:"structure"` @@ -1573,6 +2393,7 @@ func (s *DeleteConditionalForwarderInput) Validate() error { return nil } +// The result of a DeleteConditionalForwarder request. type DeleteConditionalForwarderOutput struct { _ struct{} `type:"structure"` } @@ -1690,6 +2511,7 @@ func (s DeleteSnapshotOutput) GoString() string { type DeleteTrustInput struct { _ struct{} `type:"structure"` + // Delete a conditional forwarder as part of a DeleteTrustRequest. DeleteAssociatedConditionalForwarder *bool `type:"boolean"` // The Trust ID of the trust relationship to be deleted. @@ -1719,6 +2541,7 @@ func (s *DeleteTrustInput) Validate() error { return nil } +// The result of a DeleteTrust request. type DeleteTrustOutput struct { _ struct{} `type:"structure"` @@ -1777,6 +2600,7 @@ func (s *DeregisterEventTopicInput) Validate() error { return nil } +// The result of a DeregisterEventTopic request. type DeregisterEventTopicOutput struct { _ struct{} `type:"structure"` } @@ -1791,6 +2615,7 @@ func (s DeregisterEventTopicOutput) GoString() string { return s.String() } +// Describes a conditional forwarder. type DescribeConditionalForwardersInput struct { _ struct{} `type:"structure"` @@ -1826,6 +2651,7 @@ func (s *DescribeConditionalForwardersInput) Validate() error { return nil } +// The result of a DescribeConditionalForwarder request. type DescribeConditionalForwardersOutput struct { _ struct{} `type:"structure"` @@ -1901,6 +2727,7 @@ func (s DescribeDirectoriesOutput) GoString() string { return s.String() } +// Describes event topics. type DescribeEventTopicsInput struct { _ struct{} `type:"structure"` @@ -1925,6 +2752,7 @@ func (s DescribeEventTopicsInput) GoString() string { return s.String() } +// The result of a DescribeEventTopic request. type DescribeEventTopicsOutput struct { _ struct{} `type:"structure"` @@ -2035,6 +2863,7 @@ func (s DescribeTrustsInput) GoString() string { return s.String() } +// The result of a DescribeTrust request. type DescribeTrustsOutput struct { _ struct{} `type:"structure"` @@ -2074,7 +2903,11 @@ type DirectoryConnectSettings struct { // The username of an account in the on-premises directory that is used to connect // to the directory. This account must have the following privileges: // - // Read users and groups Create computer objects Join computers to the domain + // Read users and groups + // + // Create computer objects + // + // Join computers to the domain CustomerUserName *string `min:"1" type:"string" required:"true"` // A list of subnet identifiers in the VPC in which the AD Connector is created. @@ -2156,8 +2989,8 @@ func (s DirectoryConnectSettingsDescription) GoString() string { type DirectoryDescription struct { _ struct{} `type:"structure"` - // The access URL for the directory, such as http://alias.awsapps.com. If no - // alias has been created for the directory, alias is the directory identifier, + // The access URL for the directory, such as http://.awsapps.com. If + // no alias has been created for the directory, is the directory identifier, // such as d-XXXXXXXXXX. AccessUrl *string `min:"1" type:"string"` @@ -2699,6 +3532,62 @@ func (s GetSnapshotLimitsOutput) GoString() string { return s.String() } +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // Reserved for future use. + Limit *int64 `type:"integer"` + + // Reserved for future use. + NextToken *string `type:"string"` + + // The ID of the directory for which you want to retrieve tags. + ResourceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure"` + + // Reserved for future use. + NextToken *string `type:"string"` + + // List of tags returned by the ListTagsForResource operation. + Tags []*Tag `type:"list"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + // Contains information about a Remote Authentication Dial In User Service (RADIUS) // server. type RadiusSettings struct { @@ -2766,6 +3655,7 @@ func (s *RadiusSettings) Validate() error { return nil } +// Registers a new event topic. type RegisterEventTopicInput struct { _ struct{} `type:"structure"` @@ -2806,6 +3696,7 @@ func (s *RegisterEventTopicInput) Validate() error { return nil } +// The result of a RegisterEventTopic request. type RegisterEventTopicOutput struct { _ struct{} `type:"structure"` } @@ -2820,6 +3711,56 @@ func (s RegisterEventTopicOutput) GoString() string { return s.String() } +type RemoveTagsFromResourceInput struct { + _ struct{} `type:"structure"` + + // The ID of the directory from which to remove the tag. + ResourceId *string `type:"string" required:"true"` + + // The tag key (name) of the tag to be removed. + TagKeys []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s RemoveTagsFromResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveTagsFromResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveTagsFromResourceInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RemoveTagsFromResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveTagsFromResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveTagsFromResourceOutput) GoString() string { + return s.String() +} + // An object representing the inputs for the RestoreFromSnapshot operation. type RestoreFromSnapshotInput struct { _ struct{} `type:"structure"` @@ -2923,6 +3864,53 @@ func (s SnapshotLimits) GoString() string { return s.String() } +// Metadata assigned to an Amazon Directory Services directory consisting of +// a key-value pair. +type Tag struct { + _ struct{} `type:"structure"` + + // A key is the required name of the tag. The string value can be from 1 to + // 128 Unicode characters in length and cannot be prefixed with "aws:". The + // string can only contain only the set of Unicode letters, digits, white-space, + // '_', '.', '/', '=', '+', '-' (Java regex: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$"). + Key *string `min:"1" type:"string" required:"true"` + + // A value is the optional value of the tag. The string value can be from 1 + // to 256 Unicode characters in length. The string can only contain only the + // set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' + // (Java regex: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$"). + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // Describes a trust relationship between an Microsoft AD in the AWS cloud and // an external domain. type Trust struct { @@ -2970,6 +3958,7 @@ func (s Trust) GoString() string { return s.String() } +// Updates a conditional forwarder. type UpdateConditionalForwarderInput struct { _ struct{} `type:"structure"` @@ -3015,6 +4004,7 @@ func (s *UpdateConditionalForwarderInput) Validate() error { return nil } +// The result of an UpdateConditionalForwarder request. type UpdateConditionalForwarderOutput struct { _ struct{} `type:"structure"` } @@ -3118,6 +4108,7 @@ func (s *VerifyTrustInput) Validate() error { return nil } +// Result of a VerifyTrust request. type VerifyTrustOutput struct { _ struct{} `type:"structure"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/directoryservice/service.go b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/service.go index 9eb1b1a91..62acf33ce 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/directoryservice/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // This is the AWS Directory Service API Reference. This guide provides detailed @@ -62,7 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go index b4cc4e1cc..3e9d97cbc 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go @@ -13,7 +13,28 @@ import ( const opBatchGetItem = "BatchGetItem" -// BatchGetItemRequest generates a request for the BatchGetItem operation. +// BatchGetItemRequest generates a "aws/request.Request" representing the +// client's request for the BatchGetItem operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the BatchGetItem method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the BatchGetItemRequest method. +// req, resp := client.BatchGetItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DynamoDB) BatchGetItemRequest(input *BatchGetItemInput) (req *request.Request, output *BatchGetItemOutput) { op := &request.Operation{ Name: opBatchGetItem, @@ -47,10 +68,10 @@ func (c *DynamoDB) BatchGetItemRequest(input *BatchGetItemInput) (req *request.R // operation returns a value for UnprocessedKeys. You can use this value to // retry the operation starting with the next item to get. // -// If you request more than 100 items BatchGetItem will return a ValidationException +// If you request more than 100 items BatchGetItem will return a ValidationException // with the message "Too many items requested for the BatchGetItem call". // -// For example, if you ask to retrieve 100 items, but each individual item +// For example, if you ask to retrieve 100 items, but each individual item // is 300 KB in size, the system returns 52 items (so as not to exceed the 16 // MB limit). It also returns an appropriate UnprocessedKeys value so you can // get the next page of results. If desired, your application can include its @@ -78,7 +99,7 @@ func (c *DynamoDB) BatchGetItemRequest(input *BatchGetItemInput) (req *request.R // In order to minimize response latency, BatchGetItem retrieves items in parallel. // // When designing your application, keep in mind that DynamoDB does not return -// attributes in any particular order. To help parse the response by item, include +// items in any particular order. To help parse the response by item, include // the primary key values for the items in your request in the AttributesToGet // parameter. // @@ -92,6 +113,23 @@ func (c *DynamoDB) BatchGetItem(input *BatchGetItemInput) (*BatchGetItemOutput, return out, err } +// BatchGetItemPages iterates over the pages of a BatchGetItem operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See BatchGetItem method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a BatchGetItem operation. +// pageNum := 0 +// err := client.BatchGetItemPages(params, +// func(page *BatchGetItemOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *DynamoDB) BatchGetItemPages(input *BatchGetItemInput, fn func(p *BatchGetItemOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.BatchGetItemRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -102,7 +140,28 @@ func (c *DynamoDB) BatchGetItemPages(input *BatchGetItemInput, fn func(p *BatchG const opBatchWriteItem = "BatchWriteItem" -// BatchWriteItemRequest generates a request for the BatchWriteItem operation. +// BatchWriteItemRequest generates a "aws/request.Request" representing the +// client's request for the BatchWriteItem operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the BatchWriteItem method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the BatchWriteItemRequest method. +// req, resp := client.BatchWriteItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DynamoDB) BatchWriteItemRequest(input *BatchWriteItemInput) (req *request.Request, output *BatchWriteItemOutput) { op := &request.Operation{ Name: opBatchWriteItem, @@ -125,7 +184,7 @@ func (c *DynamoDB) BatchWriteItemRequest(input *BatchWriteItemInput) (req *reque // can comprise as many as 25 put or delete requests. Individual items to be // written can be as large as 400 KB. // -// BatchWriteItem cannot update items. To update items, use the UpdateItem +// BatchWriteItem cannot update items. To update items, use the UpdateItem // API. // // The individual PutItem and DeleteItem operations specified in BatchWriteItem @@ -186,9 +245,9 @@ func (c *DynamoDB) BatchWriteItemRequest(input *BatchWriteItemInput) (req *reque // // There are more than 25 requests in the batch. // -// Any individual item in a batch exceeds 400 KB. +// Any individual item in a batch exceeds 400 KB. // -// The total request size exceeds 16 MB. +// The total request size exceeds 16 MB. func (c *DynamoDB) BatchWriteItem(input *BatchWriteItemInput) (*BatchWriteItemOutput, error) { req, out := c.BatchWriteItemRequest(input) err := req.Send() @@ -197,7 +256,28 @@ func (c *DynamoDB) BatchWriteItem(input *BatchWriteItemInput) (*BatchWriteItemOu const opCreateTable = "CreateTable" -// CreateTableRequest generates a request for the CreateTable operation. +// CreateTableRequest generates a "aws/request.Request" representing the +// client's request for the CreateTable operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateTable method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateTableRequest method. +// req, resp := client.CreateTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DynamoDB) CreateTableRequest(input *CreateTableInput) (req *request.Request, output *CreateTableOutput) { op := &request.Operation{ Name: opCreateTable, @@ -219,10 +299,10 @@ func (c *DynamoDB) CreateTableRequest(input *CreateTableInput) (req *request.Req // table names must be unique within each region. That is, you can have two // tables with same name if you create the tables in different regions. // -// CreateTable is an asynchronous operation. Upon receiving a CreateTable request, -// DynamoDB immediately returns a response with a TableStatus of CREATING. After -// the table is created, DynamoDB sets the TableStatus to ACTIVE. You can perform -// read and write operations only on an ACTIVE table. +// CreateTable is an asynchronous operation. Upon receiving a CreateTable +// request, DynamoDB immediately returns a response with a TableStatus of CREATING. +// After the table is created, DynamoDB sets the TableStatus to ACTIVE. You +// can perform read and write operations only on an ACTIVE table. // // You can optionally define secondary indexes on the new table, as part of // the CreateTable operation. If you want to create multiple tables with secondary @@ -238,7 +318,28 @@ func (c *DynamoDB) CreateTable(input *CreateTableInput) (*CreateTableOutput, err const opDeleteItem = "DeleteItem" -// DeleteItemRequest generates a request for the DeleteItem operation. +// DeleteItemRequest generates a "aws/request.Request" representing the +// client's request for the DeleteItem operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteItem method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteItemRequest method. +// req, resp := client.DeleteItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DynamoDB) DeleteItemRequest(input *DeleteItemInput) (req *request.Request, output *DeleteItemOutput) { op := &request.Operation{ Name: opDeleteItem, @@ -278,7 +379,28 @@ func (c *DynamoDB) DeleteItem(input *DeleteItemInput) (*DeleteItemOutput, error) const opDeleteTable = "DeleteTable" -// DeleteTableRequest generates a request for the DeleteTable operation. +// DeleteTableRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTable operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteTable method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteTableRequest method. +// req, resp := client.DeleteTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DynamoDB) DeleteTableRequest(input *DeleteTableInput) (req *request.Request, output *DeleteTableOutput) { op := &request.Operation{ Name: opDeleteTable, @@ -322,7 +444,28 @@ func (c *DynamoDB) DeleteTable(input *DeleteTableInput) (*DeleteTableOutput, err const opDescribeLimits = "DescribeLimits" -// DescribeLimitsRequest generates a request for the DescribeLimits operation. +// DescribeLimitsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLimits operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLimits method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLimitsRequest method. +// req, resp := client.DescribeLimitsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DynamoDB) DescribeLimitsRequest(input *DescribeLimitsInput) (req *request.Request, output *DescribeLimitsOutput) { op := &request.Operation{ Name: opDescribeLimits, @@ -357,23 +500,33 @@ func (c *DynamoDB) DescribeLimitsRequest(input *DescribeLimitsInput) (req *reque // the capacity you are currently using to those limits imposed by your account // so that you have enough time to apply for an increase before you hit a limit. // -// For example, you could use one of the AWS SDKs to do the following: +// For example, you could use one of the AWS SDKs to do the following: // // Call DescribeLimits for a particular region to obtain your current account -// limits on provisioned capacity there. Create a variable to hold the aggregate -// read capacity units provisioned for all your tables in that region, and one -// to hold the aggregate write capacity units. Zero them both. Call ListTables -// to obtain a list of all your DynamoDB tables. For each table name listed -// by ListTables, do the following: +// limits on provisioned capacity there. // -// Call DescribeTable with the table name. Use the data returned by DescribeTable -// to add the read capacity units and write capacity units provisioned for the -// table itself to your variables. If the table has one or more global secondary -// indexes (GSIs), loop over these GSIs and add their provisioned capacity values -// to your variables as well. Report the account limits for that region returned -// by DescribeLimits, along with the total current provisioned capacity levels -// you have calculated. This will let you see whether you are getting close -// to your account-level limits. +// Create a variable to hold the aggregate read capacity units provisioned +// for all your tables in that region, and one to hold the aggregate write capacity +// units. Zero them both. +// +// Call ListTables to obtain a list of all your DynamoDB tables. +// +// For each table name listed by ListTables, do the following: +// +// Call DescribeTable with the table name. +// +// Use the data returned by DescribeTable to add the read capacity units and +// write capacity units provisioned for the table itself to your variables. +// +// If the table has one or more global secondary indexes (GSIs), loop over +// these GSIs and add their provisioned capacity values to your variables as +// well. +// +// Report the account limits for that region returned by DescribeLimits, +// along with the total current provisioned capacity levels you have calculated. +// +// This will let you see whether you are getting close to your account-level +// limits. // // The per-table limits apply only when you are creating a new table. They // restrict the sum of the provisioned capacity of the new table itself and @@ -384,7 +537,7 @@ func (c *DynamoDB) DescribeLimitsRequest(input *DescribeLimitsInput) (req *reque // the aggregate provisioned capacity over all your tables and GSIs cannot exceed // either of the per-account limits. // -// DescribeLimits should only be called periodically. You can expect throttling +// DescribeLimits should only be called periodically. You can expect throttling // errors if you call it more than once in a minute. // // The DescribeLimits Request element has no content. @@ -396,7 +549,28 @@ func (c *DynamoDB) DescribeLimits(input *DescribeLimitsInput) (*DescribeLimitsOu const opDescribeTable = "DescribeTable" -// DescribeTableRequest generates a request for the DescribeTable operation. +// DescribeTableRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTable operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTable method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTableRequest method. +// req, resp := client.DescribeTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DynamoDB) DescribeTableRequest(input *DescribeTableInput) (req *request.Request, output *DescribeTableOutput) { op := &request.Operation{ Name: opDescribeTable, @@ -431,7 +605,28 @@ func (c *DynamoDB) DescribeTable(input *DescribeTableInput) (*DescribeTableOutpu const opGetItem = "GetItem" -// GetItemRequest generates a request for the GetItem operation. +// GetItemRequest generates a "aws/request.Request" representing the +// client's request for the GetItem operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetItem method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetItemRequest method. +// req, resp := client.GetItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DynamoDB) GetItemRequest(input *GetItemInput) (req *request.Request, output *GetItemOutput) { op := &request.Operation{ Name: opGetItem, @@ -452,7 +647,7 @@ func (c *DynamoDB) GetItemRequest(input *GetItemInput) (req *request.Request, ou // The GetItem operation returns a set of attributes for the item with the given // primary key. If there is no matching item, GetItem does not return any data. // -// GetItem provides an eventually consistent read by default. If your application +// GetItem provides an eventually consistent read by default. If your application // requires a strongly consistent read, set ConsistentRead to true. Although // a strongly consistent read might take more time than an eventually consistent // read, it always returns the last updated value. @@ -464,7 +659,28 @@ func (c *DynamoDB) GetItem(input *GetItemInput) (*GetItemOutput, error) { const opListTables = "ListTables" -// ListTablesRequest generates a request for the ListTables operation. +// ListTablesRequest generates a "aws/request.Request" representing the +// client's request for the ListTables operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTables method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTablesRequest method. +// req, resp := client.ListTablesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DynamoDB) ListTablesRequest(input *ListTablesInput) (req *request.Request, output *ListTablesOutput) { op := &request.Operation{ Name: opListTables, @@ -497,6 +713,23 @@ func (c *DynamoDB) ListTables(input *ListTablesInput) (*ListTablesOutput, error) return out, err } +// ListTablesPages iterates over the pages of a ListTables operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTables method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTables operation. +// pageNum := 0 +// err := client.ListTablesPages(params, +// func(page *ListTablesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *DynamoDB) ListTablesPages(input *ListTablesInput, fn func(p *ListTablesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListTablesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -507,7 +740,28 @@ func (c *DynamoDB) ListTablesPages(input *ListTablesInput, fn func(p *ListTables const opPutItem = "PutItem" -// PutItemRequest generates a request for the PutItem operation. +// PutItemRequest generates a "aws/request.Request" representing the +// client's request for the PutItem operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutItem method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutItemRequest method. +// req, resp := client.PutItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DynamoDB) PutItemRequest(input *PutItemInput) (req *request.Request, output *PutItemOutput) { op := &request.Operation{ Name: opPutItem, @@ -560,7 +814,28 @@ func (c *DynamoDB) PutItem(input *PutItemInput) (*PutItemOutput, error) { const opQuery = "Query" -// QueryRequest generates a request for the Query operation. +// QueryRequest generates a "aws/request.Request" representing the +// client's request for the Query operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the Query method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the QueryRequest method. +// req, resp := client.QueryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DynamoDB) QueryRequest(input *QueryInput) (req *request.Request, output *QueryOutput) { op := &request.Operation{ Name: opQuery, @@ -601,8 +876,9 @@ func (c *DynamoDB) QueryRequest(input *QueryInput) (req *request.Request, output // set size limit of 1 MB, the query stops and results are returned to the user // with the LastEvaluatedKey element to continue the query in a subsequent operation. // Unlike a Scan operation, a Query operation never returns both an empty result -// set and a LastEvaluatedKey value. LastEvaluatedKey is only provided if the -// results exceed 1 MB, or if you have used the Limit parameter. +// set and a LastEvaluatedKey value. LastEvaluatedKey is only provided if you +// have used the Limit parameter, or if the result set exceeds 1 MB (prior to +// applying a filter). // // You can query a table, a local secondary index, or a global secondary index. // For a query on a table or on a local secondary index, you can set the ConsistentRead @@ -615,6 +891,23 @@ func (c *DynamoDB) Query(input *QueryInput) (*QueryOutput, error) { return out, err } +// QueryPages iterates over the pages of a Query operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See Query method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a Query operation. +// pageNum := 0 +// err := client.QueryPages(params, +// func(page *QueryOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *DynamoDB) QueryPages(input *QueryInput, fn func(p *QueryOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.QueryRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -625,7 +918,28 @@ func (c *DynamoDB) QueryPages(input *QueryInput, fn func(p *QueryOutput, lastPag const opScan = "Scan" -// ScanRequest generates a request for the Scan operation. +// ScanRequest generates a "aws/request.Request" representing the +// client's request for the Scan operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the Scan method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ScanRequest method. +// req, resp := client.ScanRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DynamoDB) ScanRequest(input *ScanInput) (req *request.Request, output *ScanOutput) { op := &request.Operation{ Name: opScan, @@ -676,6 +990,23 @@ func (c *DynamoDB) Scan(input *ScanInput) (*ScanOutput, error) { return out, err } +// ScanPages iterates over the pages of a Scan operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See Scan method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a Scan operation. +// pageNum := 0 +// err := client.ScanPages(params, +// func(page *ScanOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *DynamoDB) ScanPages(input *ScanInput, fn func(p *ScanOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ScanRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -686,7 +1017,28 @@ func (c *DynamoDB) ScanPages(input *ScanInput, fn func(p *ScanOutput, lastPage b const opUpdateItem = "UpdateItem" -// UpdateItemRequest generates a request for the UpdateItem operation. +// UpdateItemRequest generates a "aws/request.Request" representing the +// client's request for the UpdateItem operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateItem method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateItemRequest method. +// req, resp := client.UpdateItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DynamoDB) UpdateItemRequest(input *UpdateItemInput) (req *request.Request, output *UpdateItemOutput) { op := &request.Operation{ Name: opUpdateItem, @@ -720,7 +1072,28 @@ func (c *DynamoDB) UpdateItem(input *UpdateItemInput) (*UpdateItemOutput, error) const opUpdateTable = "UpdateTable" -// UpdateTableRequest generates a request for the UpdateTable operation. +// UpdateTableRequest generates a "aws/request.Request" representing the +// client's request for the UpdateTable operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateTable method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateTableRequest method. +// req, resp := client.UpdateTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *DynamoDB) UpdateTableRequest(input *UpdateTableInput) (req *request.Request, output *UpdateTableOutput) { op := &request.Operation{ Name: opUpdateTable, @@ -743,17 +1116,17 @@ func (c *DynamoDB) UpdateTableRequest(input *UpdateTableInput) (req *request.Req // // You can only perform one of the following operations at once: // -// Modify the provisioned throughput settings of the table. +// Modify the provisioned throughput settings of the table. // -// Enable or disable Streams on the table. +// Enable or disable Streams on the table. // -// Remove a global secondary index from the table. +// Remove a global secondary index from the table. // -// Create a new global secondary index on the table. Once the index begins +// Create a new global secondary index on the table. Once the index begins // backfilling, you can use UpdateTable to perform other operations. // -// UpdateTable is an asynchronous operation; while it is executing, the table -// status changes from ACTIVE to UPDATING. While it is UPDATING, you cannot +// UpdateTable is an asynchronous operation; while it is executing, the +// table status changes from ACTIVE to UPDATING. While it is UPDATING, you cannot // issue another UpdateTable request. When the table returns to the ACTIVE state, // the UpdateTable operation is complete. func (c *DynamoDB) UpdateTable(input *UpdateTableInput) (*UpdateTableOutput, error) { @@ -771,8 +1144,11 @@ type AttributeDefinition struct { // The data type for the attribute, where: // - // S - the attribute is of type String N - the attribute is of type Number - // B - the attribute is of type Binary + // S - the attribute is of type String + // + // N - the attribute is of type Number + // + // B - the attribute is of type Binary AttributeType *string `type:"string" required:"true" enum:"ScalarAttributeType"` } @@ -877,10 +1253,10 @@ type AttributeValueUpdate struct { // // If an item with the specified Key is found in the table: // - // PUT - Adds the specified attribute to the item. If the attribute already + // PUT - Adds the specified attribute to the item. If the attribute already // exists, it is replaced by the new value. // - // DELETE - If no value is specified, the attribute and its value are removed + // DELETE - If no value is specified, the attribute and its value are removed // from the item. The data type of the specified value must match the existing // value's data type. // @@ -889,7 +1265,7 @@ type AttributeValueUpdate struct { // DELETE action specified [a,c], then the final attribute value would be [b]. // Specifying an empty set is an error. // - // ADD - If the attribute does not already exist, then the attribute and + // ADD - If the attribute does not already exist, then the attribute and // its values are added to the item. If the attribute does exist, then the behavior // of ADD depends on the data type of the attribute: // @@ -925,12 +1301,12 @@ type AttributeValueUpdate struct { // // If no item with the specified Key is found: // - // PUT - DynamoDB creates a new item with the specified primary key, and + // PUT - DynamoDB creates a new item with the specified primary key, and // then adds the attribute. // - // DELETE - Nothing happens; there is no attribute to delete. + // DELETE - Nothing happens; there is no attribute to delete. // - // ADD - DynamoDB creates an item with the supplied primary key and number + // ADD - DynamoDB creates an item with the supplied primary key and number // (or set of numbers) for the attribute value. The only data types allowed // are number and number set; no other data types can be specified. Action *string `type:"string" enum:"AttributeAction"` @@ -965,7 +1341,7 @@ type BatchGetItemInput struct { // // Each element in the map of items to retrieve consists of the following: // - // ConsistentRead - If true, a strongly consistent read is used; if false + // ConsistentRead - If true, a strongly consistent read is used; if false // (the default), an eventually consistent read is used. // // ExpressionAttributeNames - One or more substitution tokens for attribute @@ -983,34 +1359,34 @@ type BatchGetItemInput struct { // Use the # character in an expression to dereference an attribute name. // For example, consider the following attribute name: // - // Percentile + // Percentile // - // The name of this attribute conflicts with a reserved word, so it cannot + // The name of this attribute conflicts with a reserved word, so it cannot // be used directly in an expression. (For the complete list of reserved words, // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) // in the Amazon DynamoDB Developer Guide). To work around this, you could specify // the following for ExpressionAttributeNames: // - // {"#P":"Percentile"} + // {"#P":"Percentile"} // - // You could then use this substitution in an expression, as in this example: + // You could then use this substitution in an expression, as in this example: // - // #P = :val + // #P = :val // - // Tokens that begin with the : character are expression attribute values, + // Tokens that begin with the : character are expression attribute values, // which are placeholders for the actual value at runtime. // - // For more information on expression attribute names, see Accessing Item Attributes - // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // For more information on expression attribute names, see Accessing Item + // Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) // in the Amazon DynamoDB Developer Guide. // - // Keys - An array of primary key attribute values that define specific items - // in the table. For each primary key, you must provide all of the key attributes. - // For example, with a simple primary key, you only need to provide the partition - // key value. For a composite key, you must provide both the partition key value - // and the sort key value. + // Keys - An array of primary key attribute values that define specific + // items in the table. For each primary key, you must provide all of the key + // attributes. For example, with a simple primary key, you only need to provide + // the partition key value. For a composite key, you must provide both the partition + // key value and the sort key value. // - // ProjectionExpression - A string that identifies one or more attributes + // ProjectionExpression - A string that identifies one or more attributes // to retrieve from the table. These attributes can include scalars, sets, or // elements of a JSON document. The attributes in the expression must be separated // by commas. @@ -1032,9 +1408,9 @@ type BatchGetItemInput struct { // This parameter allows you to retrieve attributes of type List or Map; however, // it cannot retrieve individual elements within a List or a Map. // - // The names of one or more attributes to retrieve. If no attribute names are - // provided, then all attributes will be returned. If any of the requested attributes - // are not found, they will not appear in the result. + // The names of one or more attributes to retrieve. If no attribute names + // are provided, then all attributes will be returned. If any of the requested + // attributes are not found, they will not appear in the result. // // Note that AttributesToGet has no effect on provisioned throughput consumption. // DynamoDB determines capacity units consumed based on item size, not on the @@ -1044,7 +1420,7 @@ type BatchGetItemInput struct { // Determines the level of detail about provisioned throughput consumption that // is returned in the response: // - // INDEXES - The response includes the aggregate ConsumedCapacity for the + // INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary index // that was accessed. // @@ -1052,10 +1428,10 @@ type BatchGetItemInput struct { // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity // information for table(s). // - // TOTAL - The response includes only the aggregate ConsumedCapacity for the - // operation. + // TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. // - // NONE - No ConsumedCapacity details are included in the response. + // NONE - No ConsumedCapacity details are included in the response. ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` } @@ -1103,9 +1479,9 @@ type BatchGetItemOutput struct { // // Each element consists of: // - // TableName - The table that consumed the provisioned throughput. + // TableName - The table that consumed the provisioned throughput. // - // CapacityUnits - The total number of capacity units consumed. + // CapacityUnits - The total number of capacity units consumed. ConsumedCapacity []*ConsumedCapacity `type:"list"` // A map of table name to a list of items. Each object in Responses consists @@ -1120,14 +1496,14 @@ type BatchGetItemOutput struct { // // Each element consists of: // - // Keys - An array of primary key attribute values that define specific items - // in the table. + // Keys - An array of primary key attribute values that define specific + // items in the table. // - // AttributesToGet - One or more attributes to be retrieved from the table + // AttributesToGet - One or more attributes to be retrieved from the table // or index. By default, all attributes are returned. If a requested attribute // is not found, it does not appear in the result. // - // ConsistentRead - The consistency of a read operation. If set to true, + // ConsistentRead - The consistency of a read operation. If set to true, // then a strongly consistent read is used; otherwise, an eventually consistent // read is used. // @@ -1154,20 +1530,20 @@ type BatchWriteItemInput struct { // to be performed (DeleteRequest or PutRequest). Each element in the map consists // of the following: // - // DeleteRequest - Perform a DeleteItem operation on the specified item. + // DeleteRequest - Perform a DeleteItem operation on the specified item. // The item to be deleted is identified by a Key subelement: // - // Key - A map of primary key attribute values that uniquely identify the + // Key - A map of primary key attribute values that uniquely identify the // ! item. Each entry in this map consists of an attribute name and an attribute // value. For each primary key, you must provide all of the key attributes. // For example, with a simple primary key, you only need to provide a value // for the partition key. For a composite primary key, you must provide values // for both the partition key and the sort key. // - // PutRequest - Perform a PutItem operation on the specified item. The + // PutRequest - Perform a PutItem operation on the specified item. The // item to be put is identified by an Item subelement: // - // Item - A map of attributes and their values. Each entry in this map consists + // Item - A map of attributes and their values. Each entry in this map consists // of an attribute name and an attribute value. Attribute values must not be // null; string and binary type attributes must have lengths greater than zero; // and set type attributes must not be empty. Requests that contain empty values @@ -1181,7 +1557,7 @@ type BatchWriteItemInput struct { // Determines the level of detail about provisioned throughput consumption that // is returned in the response: // - // INDEXES - The response includes the aggregate ConsumedCapacity for the + // INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary index // that was accessed. // @@ -1189,10 +1565,10 @@ type BatchWriteItemInput struct { // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity // information for table(s). // - // TOTAL - The response includes only the aggregate ConsumedCapacity for the - // operation. + // TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. // - // NONE - No ConsumedCapacity details are included in the response. + // NONE - No ConsumedCapacity details are included in the response. ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` // Determines whether item collection metrics are returned. If set to SIZE, @@ -1236,9 +1612,9 @@ type BatchWriteItemOutput struct { // // Each element consists of: // - // TableName - The table that consumed the provisioned throughput. + // TableName - The table that consumed the provisioned throughput. // - // CapacityUnits - The total number of capacity units consumed. + // CapacityUnits - The total number of capacity units consumed. ConsumedCapacity []*ConsumedCapacity `type:"list"` // A list of tables that were processed by BatchWriteItem and, for each table, @@ -1247,10 +1623,10 @@ type BatchWriteItemOutput struct { // // Each entry consists of the following subelements: // - // ItemCollectionKey - The partition key value of the item collection. This + // ItemCollectionKey - The partition key value of the item collection. This // is the same as the partition key value of the item. // - // SizeEstimateRange - An estimate of item collection size, expressed in + // SizeEstimateRange - An estimate of item collection size, expressed in // GB. This is a two-element array containing a lower bound and an upper bound // for the estimate. The estimate includes the size of all the items in the // table, plus the size of all attributes projected into all of the local secondary @@ -1269,17 +1645,17 @@ type BatchWriteItemOutput struct { // Each UnprocessedItems entry consists of a table name and, for that table, // a list of operations to perform (DeleteRequest or PutRequest). // - // DeleteRequest - Perform a DeleteItem operation on the specified item. + // DeleteRequest - Perform a DeleteItem operation on the specified item. // The item to be deleted is identified by a Key subelement: // - // Key - A map of primary key attribute values that uniquely identify the + // Key - A map of primary key attribute values that uniquely identify the // item. Each entry in this map consists of an attribute name and an attribute // value. // - // PutRequest - Perform a PutItem operation on the specified item. The + // PutRequest - Perform a PutItem operation on the specified item. The // item to be put is identified by an Item subelement: // - // Item - A map of attributes and their values. Each entry in this map consists + // Item - A map of attributes and their values. Each entry in this map consists // of an attribute name and an attribute value. Attribute values must not be // null; string and binary type attributes must have lengths greater than zero; // and set type attributes must not be empty. Requests that contain empty values @@ -1331,7 +1707,7 @@ func (s Capacity) GoString() string { // // EQ | LE | LT | GE | GT | BEGINS_WITH | BETWEEN // -// Condition is also used in a QueryFilter, which evaluates the query results +// Condition is also used in a QueryFilter, which evaluates the query results // and returns only the desired values. // // For a Scan operation, Condition is used in a ScanFilter, which evaluates @@ -1358,122 +1734,122 @@ type Condition struct { // // The following comparison operators are available: // - // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS // | BEGINS_WITH | IN | BETWEEN // // The following are descriptions of each comparison operator. // - // EQ : Equal. EQ is supported for all datatypes, including lists and maps. + // EQ : Equal. EQ is supported for all datatypes, including lists and maps. // - // AttributeValueList can contain only one AttributeValue element of type String, - // Number, Binary, String Set, Number Set, or Binary Set. If an item contains - // an AttributeValue element of a different type than the one provided in the - // request, the value does not match. For example, {"S":"6"} does not equal - // {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, Binary, String Set, Number Set, or Binary Set. If an item + // contains an AttributeValue element of a different type than the one provided + // in the request, the value does not match. For example, {"S":"6"} does not + // equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. // - // NE : Not equal. NE is supported for all datatypes, including lists and + // NE : Not equal. NE is supported for all datatypes, including lists and // maps. // - // AttributeValueList can contain only one AttributeValue of type String, Number, - // Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue - // of a different type than the one provided in the request, the value does - // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} - // does not equal {"NS":["6", "2", "1"]}. + // AttributeValueList can contain only one AttributeValue of type String, + // Number, Binary, String Set, Number Set, or Binary Set. If an item contains + // an AttributeValue of a different type than the one provided in the request, + // the value does not match. For example, {"S":"6"} does not equal {"N":"6"}. + // Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. // - // LE : Less than or equal. + // LE : Less than or equal. // - // AttributeValueList can contain only one AttributeValue element of type String, + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // LT : Less than. + // + // AttributeValueList can contain only one AttributeValue of type String, // Number, or Binary (not a set type). If an item contains an AttributeValue // element of a different type than the one provided in the request, the value // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} // does not compare to {"NS":["6", "2", "1"]}. // - // LT : Less than. + // GE : Greater than or equal. // - // AttributeValueList can contain only one AttributeValue of type String, Number, - // or Binary (not a set type). If an item contains an AttributeValue element - // of a different type than the one provided in the request, the value does - // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} - // does not compare to {"NS":["6", "2", "1"]}. - // - // GE : Greater than or equal. - // - // AttributeValueList can contain only one AttributeValue element of type String, - // Number, or Binary (not a set type). If an item contains an AttributeValue + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue // element of a different type than the one provided in the request, the value // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} // does not compare to {"NS":["6", "2", "1"]}. // - // GT : Greater than. + // GT : Greater than. // - // AttributeValueList can contain only one AttributeValue element of type String, - // Number, or Binary (not a set type). If an item contains an AttributeValue + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue // element of a different type than the one provided in the request, the value // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} // does not compare to {"NS":["6", "2", "1"]}. // - // NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, + // NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, // including lists and maps. // - // This operator tests for the existence of an attribute, not its data type. + // This operator tests for the existence of an attribute, not its data type. // If the data type of attribute "a" is null, and you evaluate it using NOT_NULL, // the result is a Boolean true. This result is because the attribute "a" exists; // its data type is not relevant to the NOT_NULL comparison operator. // - // NULL : The attribute does not exist. NULL is supported for all datatypes, + // NULL : The attribute does not exist. NULL is supported for all datatypes, // including lists and maps. // - // This operator tests for the nonexistence of an attribute, not its data type. - // If the data type of attribute "a" is null, and you evaluate it using NULL, - // the result is a Boolean false. This is because the attribute "a" exists; + // This operator tests for the nonexistence of an attribute, not its data + // type. If the data type of attribute "a" is null, and you evaluate it using + // NULL, the result is a Boolean false. This is because the attribute "a" exists; // its data type is not relevant to the NULL comparison operator. // - // CONTAINS : Checks for a subsequence, or value in a set. + // CONTAINS : Checks for a subsequence, or value in a set. // - // AttributeValueList can contain only one AttributeValue element of type String, - // Number, or Binary (not a set type). If the target attribute of the comparison - // is of type String, then the operator checks for a substring match. If the - // target attribute of the comparison is of type Binary, then the operator looks - // for a subsequence of the target that matches the input. If the target attribute - // of the comparison is a set ("SS", "NS", or "BS"), then the operator evaluates - // to true if it finds an exact match with any member of the set. + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If the target attribute of the + // comparison is of type String, then the operator checks for a substring match. + // If the target attribute of the comparison is of type Binary, then the operator + // looks for a subsequence of the target that matches the input. If the target + // attribute of the comparison is a set ("SS", "NS", or "BS"), then the operator + // evaluates to true if it finds an exact match with any member of the set. // // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can // be a list; however, "b" cannot be a set, a map, or a list. // - // NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value + // NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value // in a set. // - // AttributeValueList can contain only one AttributeValue element of type String, - // Number, or Binary (not a set type). If the target attribute of the comparison - // is a String, then the operator checks for the absence of a substring match. - // If the target attribute of the comparison is Binary, then the operator checks - // for the absence of a subsequence of the target that matches the input. If - // the target attribute of the comparison is a set ("SS", "NS", or "BS"), then - // the operator evaluates to true if it does not find an exact match with any - // member of the set. + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If the target attribute of the + // comparison is a String, then the operator checks for the absence of a substring + // match. If the target attribute of the comparison is Binary, then the operator + // checks for the absence of a subsequence of the target that matches the input. + // If the target attribute of the comparison is a set ("SS", "NS", or "BS"), + // then the operator evaluates to true if it does not find an exact match with + // any member of the set. // // NOT_CONTAINS is supported for lists: When evaluating "a NOT CONTAINS b", // "a" can be a list; however, "b" cannot be a set, a map, or a list. // - // BEGINS_WITH : Checks for a prefix. + // BEGINS_WITH : Checks for a prefix. // - // AttributeValueList can contain only one AttributeValue of type String or + // AttributeValueList can contain only one AttributeValue of type String or // Binary (not a Number or a set type). The target attribute of the comparison // must be of type String or Binary (not a Number or a set type). // - // IN : Checks for matching elements within two sets. + // IN : Checks for matching elements within two sets. // - // AttributeValueList can contain one or more AttributeValue elements of type + // AttributeValueList can contain one or more AttributeValue elements of type // String, Number, or Binary (not a set type). These attributes are compared // against an existing set type attribute of an item. If any elements of the // input set are present in the item attribute, the expression evaluates to // true. // - // BETWEEN : Greater than or equal to the first value, and less than or equal - // to the second value. + // BETWEEN : Greater than or equal to the first value, and less than or + // equal to the second value. // - // AttributeValueList must contain two AttributeValue elements of the same + // AttributeValueList must contain two AttributeValue elements of the same // type, either String, Number, or Binary (not a set type). A target attribute // matches if the target value is greater than, or equal to, the first element // and less than, or equal to, the second element. If an item contains an AttributeValue @@ -1637,32 +2013,32 @@ type CreateTableInput struct { // One or more global secondary indexes (the maximum is five) to be created // on the table. Each global secondary index in the array includes the following: // - // IndexName - The name of the global secondary index. Must be unique only + // IndexName - The name of the global secondary index. Must be unique only // for this table. // - // KeySchema - Specifies the key schema for the global secondary index. + // KeySchema - Specifies the key schema for the global secondary index. // - // Projection - Specifies attributes that are copied (projected) from the + // Projection - Specifies attributes that are copied (projected) from the // table into the index. These are in addition to the primary key attributes // and index key attributes, which are automatically projected. Each attribute // specification is composed of: // - // ProjectionType - One of the following: + // ProjectionType - One of the following: // - // KEYS_ONLY - Only the index and primary keys are projected into the index. + // KEYS_ONLY - Only the index and primary keys are projected into the index. // - // INCLUDE - Only the specified table attributes are projected into the index. - // The list of projected attributes are in NonKeyAttributes. + // INCLUDE - Only the specified table attributes are projected into the + // index. The list of projected attributes are in NonKeyAttributes. // - // ALL - All of the table attributes are projected into the index. + // ALL - All of the table attributes are projected into the index. // - // NonKeyAttributes - A list of one or more non-key attribute names that + // NonKeyAttributes - A list of one or more non-key attribute names that // are projected into the secondary index. The total count of attributes provided // in NonKeyAttributes, summed across all of the secondary indexes, must not // exceed 20. If you project the same attribute into two different indexes, // this counts as two distinct attributes when determining the total. // - // ProvisionedThroughput - The provisioned throughput settings for the + // ProvisionedThroughput - The provisioned throughput settings for the // global secondary index, consisting of read and write capacity units. GlobalSecondaryIndexes []*GlobalSecondaryIndex `type:"list"` @@ -1673,15 +2049,15 @@ type CreateTableInput struct { // // Each KeySchemaElement in the array is composed of: // - // AttributeName - The name of this key attribute. + // AttributeName - The name of this key attribute. // - // KeyType - The role that the key attribute will assume: + // KeyType - The role that the key attribute will assume: // - // HASH - partition key + // HASH - partition key // - // RANGE - sort key + // RANGE - sort key // - // The partition key of an item is also known as its hash attribute. The + // The partition key of an item is also known as its hash attribute. The // term "hash attribute" derives from DynamoDB' usage of an internal hash function // to evenly distribute data items across partitions, based on their partition // key values. @@ -1690,8 +2066,8 @@ type CreateTableInput struct { // attribute" derives from the way DynamoDB stores items with the same partition // key physically close together, in sorted order by the sort key value. // - // For a simple primary key (partition key), you must provide exactly one element - // with a KeyType of HASH. + // For a simple primary key (partition key), you must provide exactly one + // element with a KeyType of HASH. // // For a composite primary key (partition key and sort key), you must provide // exactly two elements, in this order: The first element must have a KeyType @@ -1708,27 +2084,27 @@ type CreateTableInput struct { // // Each local secondary index in the array includes the following: // - // IndexName - The name of the local secondary index. Must be unique only + // IndexName - The name of the local secondary index. Must be unique only // for this table. // - // KeySchema - Specifies the key schema for the local secondary index. The - // key schema must begin with the same partition key as the table. + // KeySchema - Specifies the key schema for the local secondary index. + // The key schema must begin with the same partition key as the table. // - // Projection - Specifies attributes that are copied (projected) from the + // Projection - Specifies attributes that are copied (projected) from the // table into the index. These are in addition to the primary key attributes // and index key attributes, which are automatically projected. Each attribute // specification is composed of: // - // ProjectionType - One of the following: + // ProjectionType - One of the following: // - // KEYS_ONLY - Only the index and primary keys are projected into the index. + // KEYS_ONLY - Only the index and primary keys are projected into the index. // - // INCLUDE - Only the specified table attributes are projected into the index. - // The list of projected attributes are in NonKeyAttributes. + // INCLUDE - Only the specified table attributes are projected into the + // index. The list of projected attributes are in NonKeyAttributes. // - // ALL - All of the table attributes are projected into the index. + // ALL - All of the table attributes are projected into the index. // - // NonKeyAttributes - A list of one or more non-key attribute names that + // NonKeyAttributes - A list of one or more non-key attribute names that // are projected into the secondary index. The total count of attributes provided // in NonKeyAttributes, summed across all of the secondary indexes, must not // exceed 20. If you project the same attribute into two different indexes, @@ -1745,24 +2121,24 @@ type CreateTableInput struct { // The settings for DynamoDB Streams on the table. These settings consist of: // - // StreamEnabled - Indicates whether Streams is to be enabled (true) or disabled - // (false). + // StreamEnabled - Indicates whether Streams is to be enabled (true) or + // disabled (false). // - // StreamViewType - When an item in the table is modified, StreamViewType + // StreamViewType - When an item in the table is modified, StreamViewType // determines what information is written to the table's stream. Valid values // for StreamViewType are: // - // KEYS_ONLY - Only the key attributes of the modified item are written to - // the stream. - // - // NEW_IMAGE - The entire item, as it appears after it was modified, is written + // KEYS_ONLY - Only the key attributes of the modified item are written // to the stream. // - // OLD_IMAGE - The entire item, as it appeared before it was modified, is written - // to the stream. - // - // NEW_AND_OLD_IMAGES - Both the new and the old item images of the item are + // NEW_IMAGE - The entire item, as it appears after it was modified, is // written to the stream. + // + // OLD_IMAGE - The entire item, as it appeared before it was modified, is + // written to the stream. + // + // NEW_AND_OLD_IMAGES - Both the new and the old item images of the item + // are written to the stream. StreamSpecification *StreamSpecification `type:"structure"` // The name of the table to create. @@ -1918,7 +2294,8 @@ type DeleteItemInput struct { // // These function names are case-sensitive. // - // Comparison operators: = | | | | = | = | BETWEEN | IN + // Comparison operators: = | <> | < | > | <= | + // >= | BETWEEN | IN // // Logical operators: AND | OR | NOT // @@ -1926,7 +2303,7 @@ type DeleteItemInput struct { // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) // in the Amazon DynamoDB Developer Guide. // - // ConditionExpression replaces the legacy ConditionalOperator and Expected + // ConditionExpression replaces the legacy ConditionalOperator and Expected // parameters. ConditionExpression *string `type:"string"` @@ -1937,17 +2314,17 @@ type DeleteItemInput struct { // // A logical operator to apply to the conditions in the Expected map: // - // AND - If all of the conditions evaluate to true, then the entire map evaluates - // to true. + // AND - If all of the conditions evaluate to true, then the entire map + // evaluates to true. // - // OR - If at least one of the conditions evaluate to true, then the entire + // OR - If at least one of the conditions evaluate to true, then the entire // map evaluates to true. // - // If you omit ConditionalOperator, then AND is the default. + // If you omit ConditionalOperator, then AND is the default. // // The operation will succeed only if the entire map evaluates to true. // - // This parameter does not support attributes of type List or Map. + // This parameter does not support attributes of type List or Map. ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` // This is a legacy parameter, for backward compatibility. New applications @@ -1972,9 +2349,9 @@ type DeleteItemInput struct { // If the Expected map evaluates to true, then the conditional operation succeeds; // otherwise, it fails. // - // Expected contains the following: + // Expected contains the following: // - // AttributeValueList - One or more values to evaluate against the supplied + // AttributeValueList - One or more values to evaluate against the supplied // attribute. The number of values in the list depends on the ComparisonOperator // being used. // @@ -1982,132 +2359,133 @@ type DeleteItemInput struct { // // String value comparisons for greater than, equals, or less than are based // on ASCII character code values. For example, a is greater than A, and a is - // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters. + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters + // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). // // For type Binary, DynamoDB treats each byte of the binary data as unsigned // when it compares binary values. // - // ComparisonOperator - A comparator for evaluating attributes in the AttributeValueList. + // ComparisonOperator - A comparator for evaluating attributes in the AttributeValueList. // When performing the comparison, DynamoDB uses strongly consistent reads. // // The following comparison operators are available: // - // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS // | BEGINS_WITH | IN | BETWEEN // // The following are descriptions of each comparison operator. // - // EQ : Equal. EQ is supported for all datatypes, including lists and maps. + // EQ : Equal. EQ is supported for all datatypes, including lists and maps. // - // AttributeValueList can contain only one AttributeValue element of type String, - // Number, Binary, String Set, Number Set, or Binary Set. If an item contains - // an AttributeValue element of a different type than the one provided in the - // request, the value does not match. For example, {"S":"6"} does not equal - // {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, Binary, String Set, Number Set, or Binary Set. If an item + // contains an AttributeValue element of a different type than the one provided + // in the request, the value does not match. For example, {"S":"6"} does not + // equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. // - // NE : Not equal. NE is supported for all datatypes, including lists and + // NE : Not equal. NE is supported for all datatypes, including lists and // maps. // - // AttributeValueList can contain only one AttributeValue of type String, Number, - // Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue - // of a different type than the one provided in the request, the value does - // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} - // does not equal {"NS":["6", "2", "1"]}. + // AttributeValueList can contain only one AttributeValue of type String, + // Number, Binary, String Set, Number Set, or Binary Set. If an item contains + // an AttributeValue of a different type than the one provided in the request, + // the value does not match. For example, {"S":"6"} does not equal {"N":"6"}. + // Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. // - // LE : Less than or equal. + // LE : Less than or equal. // - // AttributeValueList can contain only one AttributeValue element of type String, + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // LT : Less than. + // + // AttributeValueList can contain only one AttributeValue of type String, // Number, or Binary (not a set type). If an item contains an AttributeValue // element of a different type than the one provided in the request, the value // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} // does not compare to {"NS":["6", "2", "1"]}. // - // LT : Less than. + // GE : Greater than or equal. // - // AttributeValueList can contain only one AttributeValue of type String, Number, - // or Binary (not a set type). If an item contains an AttributeValue element - // of a different type than the one provided in the request, the value does - // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} - // does not compare to {"NS":["6", "2", "1"]}. - // - // GE : Greater than or equal. - // - // AttributeValueList can contain only one AttributeValue element of type String, - // Number, or Binary (not a set type). If an item contains an AttributeValue + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue // element of a different type than the one provided in the request, the value // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} // does not compare to {"NS":["6", "2", "1"]}. // - // GT : Greater than. + // GT : Greater than. // - // AttributeValueList can contain only one AttributeValue element of type String, - // Number, or Binary (not a set type). If an item contains an AttributeValue + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue // element of a different type than the one provided in the request, the value // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} // does not compare to {"NS":["6", "2", "1"]}. // - // NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, + // NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, // including lists and maps. // - // This operator tests for the existence of an attribute, not its data type. + // This operator tests for the existence of an attribute, not its data type. // If the data type of attribute "a" is null, and you evaluate it using NOT_NULL, // the result is a Boolean true. This result is because the attribute "a" exists; // its data type is not relevant to the NOT_NULL comparison operator. // - // NULL : The attribute does not exist. NULL is supported for all datatypes, + // NULL : The attribute does not exist. NULL is supported for all datatypes, // including lists and maps. // - // This operator tests for the nonexistence of an attribute, not its data type. - // If the data type of attribute "a" is null, and you evaluate it using NULL, - // the result is a Boolean false. This is because the attribute "a" exists; + // This operator tests for the nonexistence of an attribute, not its data + // type. If the data type of attribute "a" is null, and you evaluate it using + // NULL, the result is a Boolean false. This is because the attribute "a" exists; // its data type is not relevant to the NULL comparison operator. // - // CONTAINS : Checks for a subsequence, or value in a set. + // CONTAINS : Checks for a subsequence, or value in a set. // - // AttributeValueList can contain only one AttributeValue element of type String, - // Number, or Binary (not a set type). If the target attribute of the comparison - // is of type String, then the operator checks for a substring match. If the - // target attribute of the comparison is of type Binary, then the operator looks - // for a subsequence of the target that matches the input. If the target attribute - // of the comparison is a set ("SS", "NS", or "BS"), then the operator evaluates - // to true if it finds an exact match with any member of the set. + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If the target attribute of the + // comparison is of type String, then the operator checks for a substring match. + // If the target attribute of the comparison is of type Binary, then the operator + // looks for a subsequence of the target that matches the input. If the target + // attribute of the comparison is a set ("SS", "NS", or "BS"), then the operator + // evaluates to true if it finds an exact match with any member of the set. // // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can // be a list; however, "b" cannot be a set, a map, or a list. // - // NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value + // NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value // in a set. // - // AttributeValueList can contain only one AttributeValue element of type String, - // Number, or Binary (not a set type). If the target attribute of the comparison - // is a String, then the operator checks for the absence of a substring match. - // If the target attribute of the comparison is Binary, then the operator checks - // for the absence of a subsequence of the target that matches the input. If - // the target attribute of the comparison is a set ("SS", "NS", or "BS"), then - // the operator evaluates to true if it does not find an exact match with any - // member of the set. + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If the target attribute of the + // comparison is a String, then the operator checks for the absence of a substring + // match. If the target attribute of the comparison is Binary, then the operator + // checks for the absence of a subsequence of the target that matches the input. + // If the target attribute of the comparison is a set ("SS", "NS", or "BS"), + // then the operator evaluates to true if it does not find an exact match with + // any member of the set. // // NOT_CONTAINS is supported for lists: When evaluating "a NOT CONTAINS b", // "a" can be a list; however, "b" cannot be a set, a map, or a list. // - // BEGINS_WITH : Checks for a prefix. + // BEGINS_WITH : Checks for a prefix. // - // AttributeValueList can contain only one AttributeValue of type String or + // AttributeValueList can contain only one AttributeValue of type String or // Binary (not a Number or a set type). The target attribute of the comparison // must be of type String or Binary (not a Number or a set type). // - // IN : Checks for matching elements within two sets. + // IN : Checks for matching elements within two sets. // - // AttributeValueList can contain one or more AttributeValue elements of type + // AttributeValueList can contain one or more AttributeValue elements of type // String, Number, or Binary (not a set type). These attributes are compared // against an existing set type attribute of an item. If any elements of the // input set are present in the item attribute, the expression evaluates to // true. // - // BETWEEN : Greater than or equal to the first value, and less than or equal - // to the second value. + // BETWEEN : Greater than or equal to the first value, and less than or + // equal to the second value. // - // AttributeValueList must contain two AttributeValue elements of the same + // AttributeValueList must contain two AttributeValue elements of the same // type, either String, Number, or Binary (not a set type). A target attribute // matches if the target value is greater than, or equal to, the first element // and less than, or equal to, the second element. If an item contains an AttributeValue @@ -2122,27 +2500,27 @@ type DeleteItemInput struct { // For backward compatibility with previous DynamoDB releases, the following // parameters can be used instead of AttributeValueList and ComparisonOperator: // - // Value - A value for DynamoDB to compare with an attribute. + // Value - A value for DynamoDB to compare with an attribute. // - // Exists - A Boolean value that causes DynamoDB to evaluate the value before + // Exists - A Boolean value that causes DynamoDB to evaluate the value before // attempting the conditional operation: // // If Exists is true, DynamoDB will check to see if that attribute value // already exists in the table. If it is found, then the condition evaluates // to true; otherwise the condition evaluate to false. // - // If Exists is false, DynamoDB assumes that the attribute value does not + // If Exists is false, DynamoDB assumes that the attribute value does not // exist in the table. If in fact the value does not exist, then the assumption // is valid and the condition evaluates to true. If the value is found, despite // the assumption that it does not exist, the condition evaluates to false. // - // Note that the default value for Exists is true. + // Note that the default value for Exists is true. // // The Value and Exists parameters are incompatible with AttributeValueList // and ComparisonOperator. Note that if you use both sets of parameters at once, // DynamoDB will return a ValidationException exception. // - // This parameter does not support attributes of type List or Map. + // This parameter does not support attributes of type List or Map. Expected map[string]*ExpectedAttributeValue `type:"map"` // One or more substitution tokens for attribute names in an expression. The @@ -2159,25 +2537,25 @@ type DeleteItemInput struct { // Use the # character in an expression to dereference an attribute name. // For example, consider the following attribute name: // - // Percentile + // Percentile // - // The name of this attribute conflicts with a reserved word, so it cannot + // The name of this attribute conflicts with a reserved word, so it cannot // be used directly in an expression. (For the complete list of reserved words, // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) // in the Amazon DynamoDB Developer Guide). To work around this, you could specify // the following for ExpressionAttributeNames: // - // {"#P":"Percentile"} + // {"#P":"Percentile"} // - // You could then use this substitution in an expression, as in this example: + // You could then use this substitution in an expression, as in this example: // - // #P = :val + // #P = :val // - // Tokens that begin with the : character are expression attribute values, + // Tokens that begin with the : character are expression attribute values, // which are placeholders for the actual value at runtime. // - // For more information on expression attribute names, see Accessing Item Attributes - // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // For more information on expression attribute names, see Accessing Item + // Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) // in the Amazon DynamoDB Developer Guide. ExpressionAttributeNames map[string]*string `type:"map"` @@ -2187,16 +2565,16 @@ type DeleteItemInput struct { // value. For example, suppose that you wanted to check whether the value of // the ProductStatus attribute was one of the following: // - // Available | Backordered | Discontinued + // Available | Backordered | Discontinued // // You would first need to specify ExpressionAttributeValues as follows: // - // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} // } // // You could then use these values in an expression, such as this: // - // ProductStatus IN (:avail, :back, :disc) + // ProductStatus IN (:avail, :back, :disc) // // For more information on expression attribute values, see Specifying Conditions // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) @@ -2215,7 +2593,7 @@ type DeleteItemInput struct { // Determines the level of detail about provisioned throughput consumption that // is returned in the response: // - // INDEXES - The response includes the aggregate ConsumedCapacity for the + // INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary index // that was accessed. // @@ -2223,10 +2601,10 @@ type DeleteItemInput struct { // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity // information for table(s). // - // TOTAL - The response includes only the aggregate ConsumedCapacity for the - // operation. + // TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. // - // NONE - No ConsumedCapacity details are included in the response. + // NONE - No ConsumedCapacity details are included in the response. ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` // Determines whether item collection metrics are returned. If set to SIZE, @@ -2238,10 +2616,13 @@ type DeleteItemInput struct { // Use ReturnValues if you want to get the item attributes as they appeared // before they were deleted. For DeleteItem, the valid values are: // - // NONE - If ReturnValues is not specified, or if its value is NONE, then + // NONE - If ReturnValues is not specified, or if its value is NONE, then // nothing is returned. (This setting is the default for ReturnValues.) // - // ALL_OLD - The content of the old item is returned. + // ALL_OLD - The content of the old item is returned. + // + // The ReturnValues parameter is used by several DynamoDB operations; however, + // DeleteItem does not recognize any values other than NONE or ALL_OLD. ReturnValues *string `type:"string" enum:"ReturnValue"` // The name of the table from which to delete the item. @@ -2301,11 +2682,11 @@ type DeleteItemOutput struct { // // Each ItemCollectionMetrics element consists of: // - // ItemCollectionKey - The partition key value of the item collection. This + // ItemCollectionKey - The partition key value of the item collection. This // is the same as the partition key value of the item itself. // - // SizeEstimateRange - An estimate of item collection size, in gigabytes. This - // value is a two-element array containing a lower bound and an upper bound + // SizeEstimateRange - An estimate of item collection size, in gigabytes. + // This value is a two-element array containing a lower bound and an upper bound // for the estimate. The estimate includes the size of all the items in the // table, plus the size of all attributes projected into all of the local secondary // indexes on that table. Use this estimate to measure whether a local secondary @@ -2515,7 +2896,7 @@ func (s DescribeTableOutput) GoString() string { // In this case, the conditional operation succeeds only if the comparison evaluates // to false. // -// Value and Exists are incompatible with AttributeValueList and ComparisonOperator. +// Value and Exists are incompatible with AttributeValueList and ComparisonOperator. // Note that if you use both sets of parameters at once, DynamoDB will return // a ValidationException exception. type ExpectedAttributeValue struct { @@ -2543,122 +2924,122 @@ type ExpectedAttributeValue struct { // // The following comparison operators are available: // - // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS // | BEGINS_WITH | IN | BETWEEN // // The following are descriptions of each comparison operator. // - // EQ : Equal. EQ is supported for all datatypes, including lists and maps. + // EQ : Equal. EQ is supported for all datatypes, including lists and maps. // - // AttributeValueList can contain only one AttributeValue element of type String, - // Number, Binary, String Set, Number Set, or Binary Set. If an item contains - // an AttributeValue element of a different type than the one provided in the - // request, the value does not match. For example, {"S":"6"} does not equal - // {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, Binary, String Set, Number Set, or Binary Set. If an item + // contains an AttributeValue element of a different type than the one provided + // in the request, the value does not match. For example, {"S":"6"} does not + // equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. // - // NE : Not equal. NE is supported for all datatypes, including lists and + // NE : Not equal. NE is supported for all datatypes, including lists and // maps. // - // AttributeValueList can contain only one AttributeValue of type String, Number, - // Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue - // of a different type than the one provided in the request, the value does - // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} - // does not equal {"NS":["6", "2", "1"]}. + // AttributeValueList can contain only one AttributeValue of type String, + // Number, Binary, String Set, Number Set, or Binary Set. If an item contains + // an AttributeValue of a different type than the one provided in the request, + // the value does not match. For example, {"S":"6"} does not equal {"N":"6"}. + // Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. // - // LE : Less than or equal. + // LE : Less than or equal. // - // AttributeValueList can contain only one AttributeValue element of type String, + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // LT : Less than. + // + // AttributeValueList can contain only one AttributeValue of type String, // Number, or Binary (not a set type). If an item contains an AttributeValue // element of a different type than the one provided in the request, the value // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} // does not compare to {"NS":["6", "2", "1"]}. // - // LT : Less than. + // GE : Greater than or equal. // - // AttributeValueList can contain only one AttributeValue of type String, Number, - // or Binary (not a set type). If an item contains an AttributeValue element - // of a different type than the one provided in the request, the value does - // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} - // does not compare to {"NS":["6", "2", "1"]}. - // - // GE : Greater than or equal. - // - // AttributeValueList can contain only one AttributeValue element of type String, - // Number, or Binary (not a set type). If an item contains an AttributeValue + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue // element of a different type than the one provided in the request, the value // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} // does not compare to {"NS":["6", "2", "1"]}. // - // GT : Greater than. + // GT : Greater than. // - // AttributeValueList can contain only one AttributeValue element of type String, - // Number, or Binary (not a set type). If an item contains an AttributeValue + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue // element of a different type than the one provided in the request, the value // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} // does not compare to {"NS":["6", "2", "1"]}. // - // NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, + // NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, // including lists and maps. // - // This operator tests for the existence of an attribute, not its data type. + // This operator tests for the existence of an attribute, not its data type. // If the data type of attribute "a" is null, and you evaluate it using NOT_NULL, // the result is a Boolean true. This result is because the attribute "a" exists; // its data type is not relevant to the NOT_NULL comparison operator. // - // NULL : The attribute does not exist. NULL is supported for all datatypes, + // NULL : The attribute does not exist. NULL is supported for all datatypes, // including lists and maps. // - // This operator tests for the nonexistence of an attribute, not its data type. - // If the data type of attribute "a" is null, and you evaluate it using NULL, - // the result is a Boolean false. This is because the attribute "a" exists; + // This operator tests for the nonexistence of an attribute, not its data + // type. If the data type of attribute "a" is null, and you evaluate it using + // NULL, the result is a Boolean false. This is because the attribute "a" exists; // its data type is not relevant to the NULL comparison operator. // - // CONTAINS : Checks for a subsequence, or value in a set. + // CONTAINS : Checks for a subsequence, or value in a set. // - // AttributeValueList can contain only one AttributeValue element of type String, - // Number, or Binary (not a set type). If the target attribute of the comparison - // is of type String, then the operator checks for a substring match. If the - // target attribute of the comparison is of type Binary, then the operator looks - // for a subsequence of the target that matches the input. If the target attribute - // of the comparison is a set ("SS", "NS", or "BS"), then the operator evaluates - // to true if it finds an exact match with any member of the set. + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If the target attribute of the + // comparison is of type String, then the operator checks for a substring match. + // If the target attribute of the comparison is of type Binary, then the operator + // looks for a subsequence of the target that matches the input. If the target + // attribute of the comparison is a set ("SS", "NS", or "BS"), then the operator + // evaluates to true if it finds an exact match with any member of the set. // // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can // be a list; however, "b" cannot be a set, a map, or a list. // - // NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value + // NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value // in a set. // - // AttributeValueList can contain only one AttributeValue element of type String, - // Number, or Binary (not a set type). If the target attribute of the comparison - // is a String, then the operator checks for the absence of a substring match. - // If the target attribute of the comparison is Binary, then the operator checks - // for the absence of a subsequence of the target that matches the input. If - // the target attribute of the comparison is a set ("SS", "NS", or "BS"), then - // the operator evaluates to true if it does not find an exact match with any - // member of the set. + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If the target attribute of the + // comparison is a String, then the operator checks for the absence of a substring + // match. If the target attribute of the comparison is Binary, then the operator + // checks for the absence of a subsequence of the target that matches the input. + // If the target attribute of the comparison is a set ("SS", "NS", or "BS"), + // then the operator evaluates to true if it does not find an exact match with + // any member of the set. // // NOT_CONTAINS is supported for lists: When evaluating "a NOT CONTAINS b", // "a" can be a list; however, "b" cannot be a set, a map, or a list. // - // BEGINS_WITH : Checks for a prefix. + // BEGINS_WITH : Checks for a prefix. // - // AttributeValueList can contain only one AttributeValue of type String or + // AttributeValueList can contain only one AttributeValue of type String or // Binary (not a Number or a set type). The target attribute of the comparison // must be of type String or Binary (not a Number or a set type). // - // IN : Checks for matching elements within two sets. + // IN : Checks for matching elements within two sets. // - // AttributeValueList can contain one or more AttributeValue elements of type + // AttributeValueList can contain one or more AttributeValue elements of type // String, Number, or Binary (not a set type). These attributes are compared // against an existing set type attribute of an item. If any elements of the // input set are present in the item attribute, the expression evaluates to // true. // - // BETWEEN : Greater than or equal to the first value, and less than or equal - // to the second value. + // BETWEEN : Greater than or equal to the first value, and less than or + // equal to the second value. // - // AttributeValueList must contain two AttributeValue elements of the same + // AttributeValueList must contain two AttributeValue elements of the same // type, either String, Number, or Binary (not a set type). A target attribute // matches if the target value is greater than, or equal to, the first element // and less than, or equal to, the second element. If an item contains an AttributeValue @@ -2684,10 +3065,10 @@ type ExpectedAttributeValue struct { // // DynamoDB returns a ValidationException if: // - // Exists is true but there is no Value to check. (You expect a value to + // Exists is true but there is no Value to check. (You expect a value to // exist, but don't specify what that value is.) // - // Exists is false but you also provide a Value. (You cannot expect an attribute + // Exists is false but you also provide a Value. (You cannot expect an attribute // to have a value, while also expecting it not to exist.) Exists *bool `type:"boolean"` @@ -2723,9 +3104,9 @@ type GetItemInput struct { // This parameter allows you to retrieve attributes of type List or Map; however, // it cannot retrieve individual elements within a List or a Map. // - // The names of one or more attributes to retrieve. If no attribute names are - // provided, then all attributes will be returned. If any of the requested attributes - // are not found, they will not appear in the result. + // The names of one or more attributes to retrieve. If no attribute names + // are provided, then all attributes will be returned. If any of the requested + // attributes are not found, they will not appear in the result. // // Note that AttributesToGet has no effect on provisioned throughput consumption. // DynamoDB determines capacity units consumed based on item size, not on the @@ -2751,25 +3132,25 @@ type GetItemInput struct { // Use the # character in an expression to dereference an attribute name. // For example, consider the following attribute name: // - // Percentile + // Percentile // - // The name of this attribute conflicts with a reserved word, so it cannot + // The name of this attribute conflicts with a reserved word, so it cannot // be used directly in an expression. (For the complete list of reserved words, // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) // in the Amazon DynamoDB Developer Guide). To work around this, you could specify // the following for ExpressionAttributeNames: // - // {"#P":"Percentile"} + // {"#P":"Percentile"} // - // You could then use this substitution in an expression, as in this example: + // You could then use this substitution in an expression, as in this example: // - // #P = :val + // #P = :val // - // Tokens that begin with the : character are expression attribute values, + // Tokens that begin with the : character are expression attribute values, // which are placeholders for the actual value at runtime. // - // For more information on expression attribute names, see Accessing Item Attributes - // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // For more information on expression attribute names, see Accessing Item + // Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) // in the Amazon DynamoDB Developer Guide. ExpressionAttributeNames map[string]*string `type:"map"` @@ -2793,13 +3174,13 @@ type GetItemInput struct { // For more information, see Accessing Item Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) // in the Amazon DynamoDB Developer Guide. // - // ProjectionExpression replaces the legacy AttributesToGet parameter. + // ProjectionExpression replaces the legacy AttributesToGet parameter. ProjectionExpression *string `type:"string"` // Determines the level of detail about provisioned throughput consumption that // is returned in the response: // - // INDEXES - The response includes the aggregate ConsumedCapacity for the + // INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary index // that was accessed. // @@ -2807,10 +3188,10 @@ type GetItemInput struct { // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity // information for table(s). // - // TOTAL - The response includes only the aggregate ConsumedCapacity for the - // operation. + // TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. // - // NONE - No ConsumedCapacity details are included in the response. + // NONE - No ConsumedCapacity details are included in the response. ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` // The name of the table containing the requested item. @@ -2886,11 +3267,11 @@ type GlobalSecondaryIndex struct { // The complete key schema for a global secondary index, which consists of one // or more pairs of attribute names and key types: // - // HASH - partition key + // HASH - partition key // - // RANGE - sort key + // RANGE - sort key // - // The partition key of an item is also known as its hash attribute. The + // The partition key of an item is also known as its hash attribute. The // term "hash attribute" derives from DynamoDB' usage of an internal hash function // to evenly distribute data items across partitions, based on their partition // key values. @@ -2983,7 +3364,7 @@ type GlobalSecondaryIndexDescription struct { // DynamoDB will do so. After all items have been processed, the backfilling // operation is complete and Backfilling is false. // - // For indexes that were created during a CreateTable operation, the Backfilling + // For indexes that were created during a CreateTable operation, the Backfilling // attribute does not appear in the DescribeTable output. Backfilling *bool `type:"boolean"` @@ -3000,13 +3381,13 @@ type GlobalSecondaryIndexDescription struct { // The current state of the global secondary index: // - // CREATING - The index is being created. + // CREATING - The index is being created. // - // UPDATING - The index is being updated. + // UPDATING - The index is being updated. // - // DELETING - The index is being deleted. + // DELETING - The index is being deleted. // - // ACTIVE - The index is ready for use. + // ACTIVE - The index is ready for use. IndexStatus *string `type:"string" enum:"IndexStatus"` // The number of items in the specified index. DynamoDB updates this value approximately @@ -3016,11 +3397,11 @@ type GlobalSecondaryIndexDescription struct { // The complete key schema for a global secondary index, which consists of one // or more pairs of attribute names and key types: // - // HASH - partition key + // HASH - partition key // - // RANGE - sort key + // RANGE - sort key // - // The partition key of an item is also known as its hash attribute. The + // The partition key of an item is also known as its hash attribute. The // term "hash attribute" derives from DynamoDB' usage of an internal hash function // to evenly distribute data items across partitions, based on their partition // key values. @@ -3052,26 +3433,27 @@ func (s GlobalSecondaryIndexDescription) GoString() string { // Represents one of the following: // -// A new global secondary index to be added to an existing table. +// A new global secondary index to be added to an existing table. // -// New provisioned throughput parameters for an existing global secondary index. +// New provisioned throughput parameters for an existing global secondary +// index. // -// An existing global secondary index to be removed from an existing table. +// An existing global secondary index to be removed from an existing table. type GlobalSecondaryIndexUpdate struct { _ struct{} `type:"structure"` // The parameters required for creating a global secondary index on an existing // table: // - // IndexName + // IndexName // - // KeySchema + // KeySchema // - // AttributeDefinitions + // AttributeDefinitions // - // Projection + // Projection // - // ProvisionedThroughput + // ProvisionedThroughput Create *CreateGlobalSecondaryIndexAction `type:"structure"` // The name of an existing global secondary index to be removed. @@ -3169,11 +3551,11 @@ type KeySchemaElement struct { // The role that this key attribute will assume: // - // HASH - partition key + // HASH - partition key // - // RANGE - sort key + // RANGE - sort key // - // The partition key of an item is also known as its hash attribute. The + // The partition key of an item is also known as its hash attribute. The // term "hash attribute" derives from DynamoDB' usage of an internal hash function // to evenly distribute data items across partitions, based on their partition // key values. @@ -3246,25 +3628,25 @@ type KeysAndAttributes struct { // Use the # character in an expression to dereference an attribute name. // For example, consider the following attribute name: // - // Percentile + // Percentile // - // The name of this attribute conflicts with a reserved word, so it cannot + // The name of this attribute conflicts with a reserved word, so it cannot // be used directly in an expression. (For the complete list of reserved words, // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) // in the Amazon DynamoDB Developer Guide). To work around this, you could specify // the following for ExpressionAttributeNames: // - // {"#P":"Percentile"} + // {"#P":"Percentile"} // - // You could then use this substitution in an expression, as in this example: + // You could then use this substitution in an expression, as in this example: // - // #P = :val + // #P = :val // - // Tokens that begin with the : character are expression attribute values, + // Tokens that begin with the : character are expression attribute values, // which are placeholders for the actual value at runtime. // - // For more information on expression attribute names, see Accessing Item Attributes - // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // For more information on expression attribute names, see Accessing Item + // Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) // in the Amazon DynamoDB Developer Guide. ExpressionAttributeNames map[string]*string `type:"map"` @@ -3283,7 +3665,7 @@ type KeysAndAttributes struct { // For more information, see Accessing Item Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) // in the Amazon DynamoDB Developer Guide. // - // ProjectionExpression replaces the legacy AttributesToGet parameter. + // ProjectionExpression replaces the legacy AttributesToGet parameter. ProjectionExpression *string `type:"string"` } @@ -3398,11 +3780,11 @@ type LocalSecondaryIndex struct { // The complete key schema for the local secondary index, consisting of one // or more pairs of attribute names and key types: // - // HASH - partition key + // HASH - partition key // - // RANGE - sort key + // RANGE - sort key // - // The partition key of an item is also known as its hash attribute. The + // The partition key of an item is also known as its hash attribute. The // term "hash attribute" derives from DynamoDB' usage of an internal hash function // to evenly distribute data items across partitions, based on their partition // key values. @@ -3490,11 +3872,11 @@ type LocalSecondaryIndexDescription struct { // The complete key schema for the local secondary index, consisting of one // or more pairs of attribute names and key types: // - // HASH - partition key + // HASH - partition key // - // RANGE - sort key + // RANGE - sort key // - // The partition key of an item is also known as its hash attribute. The + // The partition key of an item is also known as its hash attribute. The // term "hash attribute" derives from DynamoDB' usage of an internal hash function // to evenly distribute data items across partitions, based on their partition // key values. @@ -3536,12 +3918,12 @@ type Projection struct { // The set of attributes that are projected into the index: // - // KEYS_ONLY - Only the index and primary keys are projected into the index. + // KEYS_ONLY - Only the index and primary keys are projected into the index. // - // INCLUDE - Only the specified table attributes are projected into the index. - // The list of projected attributes are in NonKeyAttributes. + // INCLUDE - Only the specified table attributes are projected into the + // index. The list of projected attributes are in NonKeyAttributes. // - // ALL - All of the table attributes are projected into the index. + // ALL - All of the table attributes are projected into the index. ProjectionType *string `type:"string" enum:"ProjectionType"` } @@ -3674,7 +4056,8 @@ type PutItemInput struct { // // These function names are case-sensitive. // - // Comparison operators: = | | | | = | = | BETWEEN | IN + // Comparison operators: = | <> | < | > | <= | + // >= | BETWEEN | IN // // Logical operators: AND | OR | NOT // @@ -3682,7 +4065,7 @@ type PutItemInput struct { // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) // in the Amazon DynamoDB Developer Guide. // - // ConditionExpression replaces the legacy ConditionalOperator and Expected + // ConditionExpression replaces the legacy ConditionalOperator and Expected // parameters. ConditionExpression *string `type:"string"` @@ -3693,17 +4076,17 @@ type PutItemInput struct { // // A logical operator to apply to the conditions in the Expected map: // - // AND - If all of the conditions evaluate to true, then the entire map evaluates - // to true. + // AND - If all of the conditions evaluate to true, then the entire map + // evaluates to true. // - // OR - If at least one of the conditions evaluate to true, then the entire + // OR - If at least one of the conditions evaluate to true, then the entire // map evaluates to true. // - // If you omit ConditionalOperator, then AND is the default. + // If you omit ConditionalOperator, then AND is the default. // // The operation will succeed only if the entire map evaluates to true. // - // This parameter does not support attributes of type List or Map. + // This parameter does not support attributes of type List or Map. ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` // This is a legacy parameter, for backward compatibility. New applications @@ -3714,9 +4097,9 @@ type PutItemInput struct { // A map of attribute/condition pairs. Expected provides a conditional block // for the PutItem operation. // - // This parameter does not support attributes of type List or Map. + // This parameter does not support attributes of type List or Map. // - // Each element of Expected consists of an attribute name, a comparison operator, + // Each element of Expected consists of an attribute name, a comparison operator, // and one or more values. DynamoDB compares the attribute with the value(s) // you supplied, using the comparison operator. For each Expected element, the // result of the evaluation is either true or false. @@ -3730,9 +4113,9 @@ type PutItemInput struct { // If the Expected map evaluates to true, then the conditional operation succeeds; // otherwise, it fails. // - // Expected contains the following: + // Expected contains the following: // - // AttributeValueList - One or more values to evaluate against the supplied + // AttributeValueList - One or more values to evaluate against the supplied // attribute. The number of values in the list depends on the ComparisonOperator // being used. // @@ -3740,132 +4123,133 @@ type PutItemInput struct { // // String value comparisons for greater than, equals, or less than are based // on ASCII character code values. For example, a is greater than A, and a is - // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters. + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters + // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). // // For type Binary, DynamoDB treats each byte of the binary data as unsigned // when it compares binary values. // - // ComparisonOperator - A comparator for evaluating attributes in the AttributeValueList. + // ComparisonOperator - A comparator for evaluating attributes in the AttributeValueList. // When performing the comparison, DynamoDB uses strongly consistent reads. // // The following comparison operators are available: // - // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS // | BEGINS_WITH | IN | BETWEEN // // The following are descriptions of each comparison operator. // - // EQ : Equal. EQ is supported for all datatypes, including lists and maps. + // EQ : Equal. EQ is supported for all datatypes, including lists and maps. // - // AttributeValueList can contain only one AttributeValue element of type String, - // Number, Binary, String Set, Number Set, or Binary Set. If an item contains - // an AttributeValue element of a different type than the one provided in the - // request, the value does not match. For example, {"S":"6"} does not equal - // {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, Binary, String Set, Number Set, or Binary Set. If an item + // contains an AttributeValue element of a different type than the one provided + // in the request, the value does not match. For example, {"S":"6"} does not + // equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. // - // NE : Not equal. NE is supported for all datatypes, including lists and + // NE : Not equal. NE is supported for all datatypes, including lists and // maps. // - // AttributeValueList can contain only one AttributeValue of type String, Number, - // Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue - // of a different type than the one provided in the request, the value does - // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} - // does not equal {"NS":["6", "2", "1"]}. + // AttributeValueList can contain only one AttributeValue of type String, + // Number, Binary, String Set, Number Set, or Binary Set. If an item contains + // an AttributeValue of a different type than the one provided in the request, + // the value does not match. For example, {"S":"6"} does not equal {"N":"6"}. + // Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. // - // LE : Less than or equal. + // LE : Less than or equal. // - // AttributeValueList can contain only one AttributeValue element of type String, + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // LT : Less than. + // + // AttributeValueList can contain only one AttributeValue of type String, // Number, or Binary (not a set type). If an item contains an AttributeValue // element of a different type than the one provided in the request, the value // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} // does not compare to {"NS":["6", "2", "1"]}. // - // LT : Less than. + // GE : Greater than or equal. // - // AttributeValueList can contain only one AttributeValue of type String, Number, - // or Binary (not a set type). If an item contains an AttributeValue element - // of a different type than the one provided in the request, the value does - // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} - // does not compare to {"NS":["6", "2", "1"]}. - // - // GE : Greater than or equal. - // - // AttributeValueList can contain only one AttributeValue element of type String, - // Number, or Binary (not a set type). If an item contains an AttributeValue + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue // element of a different type than the one provided in the request, the value // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} // does not compare to {"NS":["6", "2", "1"]}. // - // GT : Greater than. + // GT : Greater than. // - // AttributeValueList can contain only one AttributeValue element of type String, - // Number, or Binary (not a set type). If an item contains an AttributeValue + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue // element of a different type than the one provided in the request, the value // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} // does not compare to {"NS":["6", "2", "1"]}. // - // NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, + // NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, // including lists and maps. // - // This operator tests for the existence of an attribute, not its data type. + // This operator tests for the existence of an attribute, not its data type. // If the data type of attribute "a" is null, and you evaluate it using NOT_NULL, // the result is a Boolean true. This result is because the attribute "a" exists; // its data type is not relevant to the NOT_NULL comparison operator. // - // NULL : The attribute does not exist. NULL is supported for all datatypes, + // NULL : The attribute does not exist. NULL is supported for all datatypes, // including lists and maps. // - // This operator tests for the nonexistence of an attribute, not its data type. - // If the data type of attribute "a" is null, and you evaluate it using NULL, - // the result is a Boolean false. This is because the attribute "a" exists; + // This operator tests for the nonexistence of an attribute, not its data + // type. If the data type of attribute "a" is null, and you evaluate it using + // NULL, the result is a Boolean false. This is because the attribute "a" exists; // its data type is not relevant to the NULL comparison operator. // - // CONTAINS : Checks for a subsequence, or value in a set. + // CONTAINS : Checks for a subsequence, or value in a set. // - // AttributeValueList can contain only one AttributeValue element of type String, - // Number, or Binary (not a set type). If the target attribute of the comparison - // is of type String, then the operator checks for a substring match. If the - // target attribute of the comparison is of type Binary, then the operator looks - // for a subsequence of the target that matches the input. If the target attribute - // of the comparison is a set ("SS", "NS", or "BS"), then the operator evaluates - // to true if it finds an exact match with any member of the set. + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If the target attribute of the + // comparison is of type String, then the operator checks for a substring match. + // If the target attribute of the comparison is of type Binary, then the operator + // looks for a subsequence of the target that matches the input. If the target + // attribute of the comparison is a set ("SS", "NS", or "BS"), then the operator + // evaluates to true if it finds an exact match with any member of the set. // // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can // be a list; however, "b" cannot be a set, a map, or a list. // - // NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value + // NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value // in a set. // - // AttributeValueList can contain only one AttributeValue element of type String, - // Number, or Binary (not a set type). If the target attribute of the comparison - // is a String, then the operator checks for the absence of a substring match. - // If the target attribute of the comparison is Binary, then the operator checks - // for the absence of a subsequence of the target that matches the input. If - // the target attribute of the comparison is a set ("SS", "NS", or "BS"), then - // the operator evaluates to true if it does not find an exact match with any - // member of the set. + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If the target attribute of the + // comparison is a String, then the operator checks for the absence of a substring + // match. If the target attribute of the comparison is Binary, then the operator + // checks for the absence of a subsequence of the target that matches the input. + // If the target attribute of the comparison is a set ("SS", "NS", or "BS"), + // then the operator evaluates to true if it does not find an exact match with + // any member of the set. // // NOT_CONTAINS is supported for lists: When evaluating "a NOT CONTAINS b", // "a" can be a list; however, "b" cannot be a set, a map, or a list. // - // BEGINS_WITH : Checks for a prefix. + // BEGINS_WITH : Checks for a prefix. // - // AttributeValueList can contain only one AttributeValue of type String or + // AttributeValueList can contain only one AttributeValue of type String or // Binary (not a Number or a set type). The target attribute of the comparison // must be of type String or Binary (not a Number or a set type). // - // IN : Checks for matching elements within two sets. + // IN : Checks for matching elements within two sets. // - // AttributeValueList can contain one or more AttributeValue elements of type + // AttributeValueList can contain one or more AttributeValue elements of type // String, Number, or Binary (not a set type). These attributes are compared // against an existing set type attribute of an item. If any elements of the // input set are present in the item attribute, the expression evaluates to // true. // - // BETWEEN : Greater than or equal to the first value, and less than or equal - // to the second value. + // BETWEEN : Greater than or equal to the first value, and less than or + // equal to the second value. // - // AttributeValueList must contain two AttributeValue elements of the same + // AttributeValueList must contain two AttributeValue elements of the same // type, either String, Number, or Binary (not a set type). A target attribute // matches if the target value is greater than, or equal to, the first element // and less than, or equal to, the second element. If an item contains an AttributeValue @@ -3880,21 +4264,21 @@ type PutItemInput struct { // For backward compatibility with previous DynamoDB releases, the following // parameters can be used instead of AttributeValueList and ComparisonOperator: // - // Value - A value for DynamoDB to compare with an attribute. + // Value - A value for DynamoDB to compare with an attribute. // - // Exists - A Boolean value that causes DynamoDB to evaluate the value before + // Exists - A Boolean value that causes DynamoDB to evaluate the value before // attempting the conditional operation: // // If Exists is true, DynamoDB will check to see if that attribute value // already exists in the table. If it is found, then the condition evaluates // to true; otherwise the condition evaluate to false. // - // If Exists is false, DynamoDB assumes that the attribute value does not + // If Exists is false, DynamoDB assumes that the attribute value does not // exist in the table. If in fact the value does not exist, then the assumption // is valid and the condition evaluates to true. If the value is found, despite // the assumption that it does not exist, the condition evaluates to false. // - // Note that the default value for Exists is true. + // Note that the default value for Exists is true. // // The Value and Exists parameters are incompatible with AttributeValueList // and ComparisonOperator. Note that if you use both sets of parameters at once, @@ -3915,25 +4299,25 @@ type PutItemInput struct { // Use the # character in an expression to dereference an attribute name. // For example, consider the following attribute name: // - // Percentile + // Percentile // - // The name of this attribute conflicts with a reserved word, so it cannot + // The name of this attribute conflicts with a reserved word, so it cannot // be used directly in an expression. (For the complete list of reserved words, // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) // in the Amazon DynamoDB Developer Guide). To work around this, you could specify // the following for ExpressionAttributeNames: // - // {"#P":"Percentile"} + // {"#P":"Percentile"} // - // You could then use this substitution in an expression, as in this example: + // You could then use this substitution in an expression, as in this example: // - // #P = :val + // #P = :val // - // Tokens that begin with the : character are expression attribute values, + // Tokens that begin with the : character are expression attribute values, // which are placeholders for the actual value at runtime. // - // For more information on expression attribute names, see Accessing Item Attributes - // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // For more information on expression attribute names, see Accessing Item + // Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) // in the Amazon DynamoDB Developer Guide. ExpressionAttributeNames map[string]*string `type:"map"` @@ -3943,16 +4327,16 @@ type PutItemInput struct { // value. For example, suppose that you wanted to check whether the value of // the ProductStatus attribute was one of the following: // - // Available | Backordered | Discontinued + // Available | Backordered | Discontinued // // You would first need to specify ExpressionAttributeValues as follows: // - // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} // } // // You could then use these values in an expression, such as this: // - // ProductStatus IN (:avail, :back, :disc) + // ProductStatus IN (:avail, :back, :disc) // // For more information on expression attribute values, see Specifying Conditions // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) @@ -3981,7 +4365,7 @@ type PutItemInput struct { // Determines the level of detail about provisioned throughput consumption that // is returned in the response: // - // INDEXES - The response includes the aggregate ConsumedCapacity for the + // INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary index // that was accessed. // @@ -3989,10 +4373,10 @@ type PutItemInput struct { // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity // information for table(s). // - // TOTAL - The response includes only the aggregate ConsumedCapacity for the - // operation. + // TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. // - // NONE - No ConsumedCapacity details are included in the response. + // NONE - No ConsumedCapacity details are included in the response. ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` // Determines whether item collection metrics are returned. If set to SIZE, @@ -4005,11 +4389,14 @@ type PutItemInput struct { // before they were updated with the PutItem request. For PutItem, the valid // values are: // - // NONE - If ReturnValues is not specified, or if its value is NONE, then + // NONE - If ReturnValues is not specified, or if its value is NONE, then // nothing is returned. (This setting is the default for ReturnValues.) // - // ALL_OLD - If PutItem overwrote an attribute name-value pair, then the + // ALL_OLD - If PutItem overwrote an attribute name-value pair, then the // content of the old item is returned. + // + // The ReturnValues parameter is used by several DynamoDB operations; however, + // PutItem does not recognize any values other than NONE or ALL_OLD. ReturnValues *string `type:"string" enum:"ReturnValue"` // The name of the table to contain the item. @@ -4069,11 +4456,11 @@ type PutItemOutput struct { // // Each ItemCollectionMetrics element consists of: // - // ItemCollectionKey - The partition key value of the item collection. This + // ItemCollectionKey - The partition key value of the item collection. This // is the same as the partition key value of the item itself. // - // SizeEstimateRange - An estimate of item collection size, in gigabytes. This - // value is a two-element array containing a lower bound and an upper bound + // SizeEstimateRange - An estimate of item collection size, in gigabytes. + // This value is a two-element array containing a lower bound and an upper bound // for the estimate. The estimate includes the size of all the items in the // table, plus the size of all attributes projected into all of the local secondary // indexes on that table. Use this estimate to measure whether a local secondary @@ -4128,9 +4515,9 @@ type QueryInput struct { // This parameter allows you to retrieve attributes of type List or Map; however, // it cannot retrieve individual elements within a List or a Map. // - // The names of one or more attributes to retrieve. If no attribute names are - // provided, then all attributes will be returned. If any of the requested attributes - // are not found, they will not appear in the result. + // The names of one or more attributes to retrieve. If no attribute names + // are provided, then all attributes will be returned. If any of the requested + // attributes are not found, they will not appear in the result. // // Note that AttributesToGet has no effect on provisioned throughput consumption. // DynamoDB determines capacity units consumed based on item size, not on the @@ -4159,17 +4546,17 @@ type QueryInput struct { // // A logical operator to apply to the conditions in a QueryFilter map: // - // AND - If all of the conditions evaluate to true, then the entire map evaluates - // to true. + // AND - If all of the conditions evaluate to true, then the entire map + // evaluates to true. // - // OR - If at least one of the conditions evaluate to true, then the entire + // OR - If at least one of the conditions evaluate to true, then the entire // map evaluates to true. // - // If you omit ConditionalOperator, then AND is the default. + // If you omit ConditionalOperator, then AND is the default. // // The operation will succeed only if the entire map evaluates to true. // - // This parameter does not support attributes of type List or Map. + // This parameter does not support attributes of type List or Map. ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` // Determines the read consistency model: If set to true, then the operation @@ -4202,25 +4589,25 @@ type QueryInput struct { // Use the # character in an expression to dereference an attribute name. // For example, consider the following attribute name: // - // Percentile + // Percentile // - // The name of this attribute conflicts with a reserved word, so it cannot + // The name of this attribute conflicts with a reserved word, so it cannot // be used directly in an expression. (For the complete list of reserved words, // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) // in the Amazon DynamoDB Developer Guide). To work around this, you could specify // the following for ExpressionAttributeNames: // - // {"#P":"Percentile"} + // {"#P":"Percentile"} // - // You could then use this substitution in an expression, as in this example: + // You could then use this substitution in an expression, as in this example: // - // #P = :val + // #P = :val // - // Tokens that begin with the : character are expression attribute values, + // Tokens that begin with the : character are expression attribute values, // which are placeholders for the actual value at runtime. // - // For more information on expression attribute names, see Accessing Item Attributes - // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // For more information on expression attribute names, see Accessing Item + // Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) // in the Amazon DynamoDB Developer Guide. ExpressionAttributeNames map[string]*string `type:"map"` @@ -4230,16 +4617,16 @@ type QueryInput struct { // value. For example, suppose that you wanted to check whether the value of // the ProductStatus attribute was one of the following: // - // Available | Backordered | Discontinued + // Available | Backordered | Discontinued // // You would first need to specify ExpressionAttributeValues as follows: // - // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} // } // // You could then use these values in an expression, such as this: // - // ProductStatus IN (:avail, :back, :disc) + // ProductStatus IN (:avail, :back, :disc) // // For more information on expression attribute values, see Specifying Conditions // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) @@ -4256,7 +4643,7 @@ type QueryInput struct { // For more information, see Filter Expressions (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#FilteringResults) // in the Amazon DynamoDB Developer Guide. // - // FilterExpression replaces the legacy QueryFilter and ConditionalOperator + // FilterExpression replaces the legacy QueryFilter and ConditionalOperator // parameters. FilterExpression *string `type:"string"` @@ -4287,26 +4674,26 @@ type QueryInput struct { // // Valid comparisons for the sort key condition are as follows: // - // sortKeyName = :sortkeyval - true if the sort key value is equal to :sortkeyval. + // sortKeyName = :sortkeyval - true if the sort key value is equal to :sortkeyval. // - // sortKeyName :sortkeyval - true if the sort key value is less than :sortkeyval. + // sortKeyName < :sortkeyval - true if the sort key value is less than :sortkeyval. // - // sortKeyName = :sortkeyval - true if the sort key value is less than or - // equal to :sortkeyval. - // - // sortKeyName :sortkeyval - true if the sort key value is greater than - // :sortkeyval. - // - // sortKeyName = :sortkeyval - true if the sort key value is greater than + // sortKeyName <= :sortkeyval - true if the sort key value is less than // or equal to :sortkeyval. // - // sortKeyName BETWEEN :sortkeyval1 AND :sortkeyval2 - true if the sort key - // value is greater than or equal to :sortkeyval1, and less than or equal to - // :sortkeyval2. + // sortKeyName > :sortkeyval - true if the sort key value is greater than + // :sortkeyval. // - // begins_with (sortKeyName, :sortkeyval) - true if the sort key value begins - // with a particular operand. (You cannot use this function with a sort key - // that is of type Number.) Note that the function name begins_with is case-sensitive. + // sortKeyName >= :sortkeyval - true if the sort key value is greater than + // or equal to :sortkeyval. + // + // sortKeyName BETWEEN :sortkeyval1 AND :sortkeyval2 - true if the sort + // key value is greater than or equal to :sortkeyval1, and less than or equal + // to :sortkeyval2. + // + // begins_with ( sortKeyName, :sortkeyval ) - true if the sort key value + // begins with a particular operand. (You cannot use this function with a sort + // key that is of type Number.) Note that the function name begins_with is case-sensitive. // // Use the ExpressionAttributeValues parameter to replace tokens such as // :partitionval and :sortval with actual values at runtime. @@ -4317,17 +4704,21 @@ type QueryInput struct { // reserved word. For example, the following KeyConditionExpression parameter // causes an error because Size is a reserved word: // - // Size = :myval To work around this, define a placeholder (such a #S) - // to represent the attribute name Size. KeyConditionExpression then is as follows: + // Size = :myval // - // #S = :myval For a list of reserved words, see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // To work around this, define a placeholder (such a #S) to represent the + // attribute name Size. KeyConditionExpression then is as follows: + // + // #S = :myval + // + // For a list of reserved words, see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) // in the Amazon DynamoDB Developer Guide. // // For more information on ExpressionAttributeNames and ExpressionAttributeValues, // see Using Placeholders for Attribute Names and Values (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ExpressionPlaceholders.html) // in the Amazon DynamoDB Developer Guide. // - // KeyConditionExpression replaces the legacy KeyConditions parameter. + // KeyConditionExpression replaces the legacy KeyConditions parameter. KeyConditionExpression *string `type:"string"` // This is a legacy parameter, for backward compatibility. New applications @@ -4344,15 +4735,15 @@ type QueryInput struct { // the partition key will be retrieved. If a FilterExpression or QueryFilter // is present, it will be applied after the items are retrieved. // - // For a query on an index, you can have conditions only on the index key attributes. - // You must provide the index partition key name and value as an EQ condition. - // You can optionally provide a second condition, referring to the index sort - // key. + // For a query on an index, you can have conditions only on the index key + // attributes. You must provide the index partition key name and value as an + // EQ condition. You can optionally provide a second condition, referring to + // the index sort key. // // Each KeyConditions element consists of an attribute name to compare, along // with the following: // - // AttributeValueList - One or more values to evaluate against the supplied + // AttributeValueList - One or more values to evaluate against the supplied // attribute. The number of values in the list depends on the ComparisonOperator // being used. // @@ -4366,7 +4757,7 @@ type QueryInput struct { // For Binary, DynamoDB treats each byte of the binary data as unsigned when // it compares binary values. // - // ComparisonOperator - A comparator for evaluating attributes, for example, + // ComparisonOperator - A comparator for evaluating attributes, for example, // equals, greater than, less than, and so on. // // For KeyConditions, only the following comparison operators are supported: @@ -4375,56 +4766,56 @@ type QueryInput struct { // // The following are descriptions of these comparison operators. // - // EQ : Equal. + // EQ : Equal. // - // AttributeValueList can contain only one AttributeValue of type String, Number, - // or Binary (not a set type). If an item contains an AttributeValue element - // of a different type than the one specified in the request, the value does - // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // AttributeValueList can contain only one AttributeValue of type String, + // Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one specified in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} // does not equal {"NS":["6", "2", "1"]}. // - // LE : Less than or equal. + // LE : Less than or equal. // - // AttributeValueList can contain only one AttributeValue element of type String, + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // LT : Less than. + // + // AttributeValueList can contain only one AttributeValue of type String, // Number, or Binary (not a set type). If an item contains an AttributeValue // element of a different type than the one provided in the request, the value // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} // does not compare to {"NS":["6", "2", "1"]}. // - // LT : Less than. + // GE : Greater than or equal. // - // AttributeValueList can contain only one AttributeValue of type String, Number, - // or Binary (not a set type). If an item contains an AttributeValue element - // of a different type than the one provided in the request, the value does - // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} - // does not compare to {"NS":["6", "2", "1"]}. - // - // GE : Greater than or equal. - // - // AttributeValueList can contain only one AttributeValue element of type String, - // Number, or Binary (not a set type). If an item contains an AttributeValue + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue // element of a different type than the one provided in the request, the value // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} // does not compare to {"NS":["6", "2", "1"]}. // - // GT : Greater than. + // GT : Greater than. // - // AttributeValueList can contain only one AttributeValue element of type String, - // Number, or Binary (not a set type). If an item contains an AttributeValue + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue // element of a different type than the one provided in the request, the value // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} // does not compare to {"NS":["6", "2", "1"]}. // - // BEGINS_WITH : Checks for a prefix. + // BEGINS_WITH : Checks for a prefix. // - // AttributeValueList can contain only one AttributeValue of type String or + // AttributeValueList can contain only one AttributeValue of type String or // Binary (not a Number or a set type). The target attribute of the comparison // must be of type String or Binary (not a Number or a set type). // - // BETWEEN : Greater than or equal to the first value, and less than or + // BETWEEN : Greater than or equal to the first value, and less than or // equal to the second value. // - // AttributeValueList must contain two AttributeValue elements of the same + // AttributeValueList must contain two AttributeValue elements of the same // type, either String, Number, or Binary (not a set type). A target attribute // matches if the target value is greater than, or equal to, the first element // and less than, or equal to, the second element. If an item contains an AttributeValue @@ -4445,7 +4836,8 @@ type QueryInput struct { // size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation // and returns the matching values up to the limit, and a key in LastEvaluatedKey // to apply in a subsequent operation to continue the operation. For more information, - // see Query and Scan in the Amazon DynamoDB Developer Guide. + // see Query and Scan (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html) + // in the Amazon DynamoDB Developer Guide. Limit *int64 `min:"1" type:"integer"` // A string that identifies one or more attributes to retrieve from the table. @@ -4459,7 +4851,7 @@ type QueryInput struct { // For more information, see Accessing Item Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) // in the Amazon DynamoDB Developer Guide. // - // ProjectionExpression replaces the legacy AttributesToGet parameter. + // ProjectionExpression replaces the legacy AttributesToGet parameter. ProjectionExpression *string `type:"string"` // This is a legacy parameter, for backward compatibility. New applications @@ -4472,13 +4864,13 @@ type QueryInput struct { // // This parameter does not support attributes of type List or Map. // - // A QueryFilter is applied after the items have already been read; the process + // A QueryFilter is applied after the items have already been read; the process // of filtering does not consume any additional read capacity units. // - // If you provide more than one condition in the QueryFilter map, then by default - // all of the conditions must evaluate to true. In other words, the conditions - // are ANDed together. (You can use the ConditionalOperator parameter to OR - // the conditions instead. If you do this, then at least one of the conditions + // If you provide more than one condition in the QueryFilter map, then by + // default all of the conditions must evaluate to true. In other words, the + // conditions are ANDed together. (You can use the ConditionalOperator parameter + // to OR the conditions instead. If you do this, then at least one of the conditions // must evaluate to true, rather than all of them.) // // Note that QueryFilter does not allow key attributes. You cannot define a @@ -4487,7 +4879,7 @@ type QueryInput struct { // Each QueryFilter element consists of an attribute name to compare, along // with the following: // - // AttributeValueList - One or more values to evaluate against the supplied + // AttributeValueList - One or more values to evaluate against the supplied // attribute. The number of values in the list depends on the operator specified // in ComparisonOperator. // @@ -4504,12 +4896,12 @@ type QueryInput struct { // For information on specifying data types in JSON, see JSON Data Format (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataFormat.html) // in the Amazon DynamoDB Developer Guide. // - // ComparisonOperator - A comparator for evaluating attributes. For example, + // ComparisonOperator - A comparator for evaluating attributes. For example, // equals, greater than, less than, etc. // // The following comparison operators are available: // - // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS // | BEGINS_WITH | IN | BETWEEN // // For complete descriptions of all comparison operators, see the Condition @@ -4520,7 +4912,7 @@ type QueryInput struct { // Determines the level of detail about provisioned throughput consumption that // is returned in the response: // - // INDEXES - The response includes the aggregate ConsumedCapacity for the + // INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary index // that was accessed. // @@ -4528,10 +4920,10 @@ type QueryInput struct { // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity // information for table(s). // - // TOTAL - The response includes only the aggregate ConsumedCapacity for the - // operation. + // TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. // - // NONE - No ConsumedCapacity details are included in the response. + // NONE - No ConsumedCapacity details are included in the response. ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` // Specifies the order for index traversal: If true (default), the traversal @@ -4554,18 +4946,18 @@ type QueryInput struct { // specific item attributes, the count of matching items, or in the case of // an index, some or all of the attributes projected into the index. // - // ALL_ATTRIBUTES - Returns all of the item attributes from the specified + // ALL_ATTRIBUTES - Returns all of the item attributes from the specified // table or index. If you query a local secondary index, then for each matching // item in the index DynamoDB will fetch the entire item from the parent table. // If the index is configured to project all item attributes, then all of the // data can be obtained from the local secondary index, and no fetching is required. // - // ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves + // ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves // all attributes that have been projected into the index. If the index is configured // to project all attributes, this return value is equivalent to specifying // ALL_ATTRIBUTES. // - // COUNT - Returns the number of matching items, rather than the matching + // COUNT - Returns the number of matching items, rather than the matching // items themselves. // // SPECIFIC_ATTRIBUTES - Returns only the attributes listed in AttributesToGet. @@ -4590,7 +4982,7 @@ type QueryInput struct { // (This usage is equivalent to specifying AttributesToGet without any value // for Select.) // - // If you use the ProjectionExpression parameter, then the value for Select + // If you use the ProjectionExpression parameter, then the value for Select // can only be SPECIFIC_ATTRIBUTES. Any other value for Select will return an // error. Select *string `type:"string" enum:"Select"` @@ -4724,9 +5116,9 @@ type ScanInput struct { // This parameter allows you to retrieve attributes of type List or Map; however, // it cannot retrieve individual elements within a List or a Map. // - // The names of one or more attributes to retrieve. If no attribute names are - // provided, then all attributes will be returned. If any of the requested attributes - // are not found, they will not appear in the result. + // The names of one or more attributes to retrieve. If no attribute names + // are provided, then all attributes will be returned. If any of the requested + // attributes are not found, they will not appear in the result. // // Note that AttributesToGet has no effect on provisioned throughput consumption. // DynamoDB determines capacity units consumed based on item size, not on the @@ -4740,17 +5132,17 @@ type ScanInput struct { // // A logical operator to apply to the conditions in a ScanFilter map: // - // AND - If all of the conditions evaluate to true, then the entire map evaluates - // to true. + // AND - If all of the conditions evaluate to true, then the entire map + // evaluates to true. // - // OR - If at least one of the conditions evaluate to true, then the entire + // OR - If at least one of the conditions evaluate to true, then the entire // map evaluates to true. // - // If you omit ConditionalOperator, then AND is the default. + // If you omit ConditionalOperator, then AND is the default. // // The operation will succeed only if the entire map evaluates to true. // - // This parameter does not support attributes of type List or Map. + // This parameter does not support attributes of type List or Map. ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` // A Boolean value that determines the read consistency model during the scan: @@ -4794,25 +5186,25 @@ type ScanInput struct { // Use the # character in an expression to dereference an attribute name. // For example, consider the following attribute name: // - // Percentile + // Percentile // - // The name of this attribute conflicts with a reserved word, so it cannot + // The name of this attribute conflicts with a reserved word, so it cannot // be used directly in an expression. (For the complete list of reserved words, // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) // in the Amazon DynamoDB Developer Guide). To work around this, you could specify // the following for ExpressionAttributeNames: // - // {"#P":"Percentile"} + // {"#P":"Percentile"} // - // You could then use this substitution in an expression, as in this example: + // You could then use this substitution in an expression, as in this example: // - // #P = :val + // #P = :val // - // Tokens that begin with the : character are expression attribute values, + // Tokens that begin with the : character are expression attribute values, // which are placeholders for the actual value at runtime. // - // For more information on expression attribute names, see Accessing Item Attributes - // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // For more information on expression attribute names, see Accessing Item + // Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) // in the Amazon DynamoDB Developer Guide. ExpressionAttributeNames map[string]*string `type:"map"` @@ -4822,16 +5214,16 @@ type ScanInput struct { // value. For example, suppose that you wanted to check whether the value of // the ProductStatus attribute was one of the following: // - // Available | Backordered | Discontinued + // Available | Backordered | Discontinued // // You would first need to specify ExpressionAttributeValues as follows: // - // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} // } // // You could then use these values in an expression, such as this: // - // ProductStatus IN (:avail, :back, :disc) + // ProductStatus IN (:avail, :back, :disc) // // For more information on expression attribute values, see Specifying Conditions // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) @@ -4845,10 +5237,10 @@ type ScanInput struct { // A FilterExpression is applied after the items have already been read; the // process of filtering does not consume any additional read capacity units. // - // For more information, see Filter Expressions (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#FilteringResults) + // For more information, see Filter Expressions (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#FilteringResults) // in the Amazon DynamoDB Developer Guide. // - // FilterExpression replaces the legacy ScanFilter and ConditionalOperator + // FilterExpression replaces the legacy ScanFilter and ConditionalOperator // parameters. FilterExpression *string `type:"string"` @@ -4865,7 +5257,8 @@ type ScanInput struct { // size exceeds 1 MB before DynamoDB reaches this limit, it stops the operation // and returns the matching values up to the limit, and a key in LastEvaluatedKey // to apply in a subsequent operation to continue the operation. For more information, - // see Query and Scan in the Amazon DynamoDB Developer Guide. + // see Query and Scan (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html) + // in the Amazon DynamoDB Developer Guide. Limit *int64 `min:"1" type:"integer"` // A string that identifies one or more attributes to retrieve from the specified @@ -4879,13 +5272,13 @@ type ScanInput struct { // For more information, see Accessing Item Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) // in the Amazon DynamoDB Developer Guide. // - // ProjectionExpression replaces the legacy AttributesToGet parameter. + // ProjectionExpression replaces the legacy AttributesToGet parameter. ProjectionExpression *string `type:"string"` // Determines the level of detail about provisioned throughput consumption that // is returned in the response: // - // INDEXES - The response includes the aggregate ConsumedCapacity for the + // INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary index // that was accessed. // @@ -4893,10 +5286,10 @@ type ScanInput struct { // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity // information for table(s). // - // TOTAL - The response includes only the aggregate ConsumedCapacity for the - // operation. + // TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. // - // NONE - No ConsumedCapacity details are included in the response. + // NONE - No ConsumedCapacity details are included in the response. ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` // This is a legacy parameter, for backward compatibility. New applications @@ -4907,9 +5300,9 @@ type ScanInput struct { // A condition that evaluates the scan results and returns only the desired // values. // - // This parameter does not support attributes of type List or Map. + // This parameter does not support attributes of type List or Map. // - // If you specify more than one condition in the ScanFilter map, then by default + // If you specify more than one condition in the ScanFilter map, then by default // all of the conditions must evaluate to true. In other words, the conditions // are ANDed together. (You can use the ConditionalOperator parameter to OR // the conditions instead. If you do this, then at least one of the conditions @@ -4918,7 +5311,7 @@ type ScanInput struct { // Each ScanFilter element consists of an attribute name to compare, along // with the following: // - // AttributeValueList - One or more values to evaluate against the supplied + // AttributeValueList - One or more values to evaluate against the supplied // attribute. The number of values in the list depends on the operator specified // in ComparisonOperator . // @@ -4935,12 +5328,12 @@ type ScanInput struct { // For information on specifying data types in JSON, see JSON Data Format (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataFormat.html) // in the Amazon DynamoDB Developer Guide. // - // ComparisonOperator - A comparator for evaluating attributes. For example, + // ComparisonOperator - A comparator for evaluating attributes. For example, // equals, greater than, less than, etc. // // The following comparison operators are available: // - // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS // | BEGINS_WITH | IN | BETWEEN // // For complete descriptions of all comparison operators, see Condition (http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Condition.html). @@ -4967,9 +5360,14 @@ type ScanInput struct { // The attributes to be returned in the result. You can retrieve all item attributes, // specific item attributes, or the count of matching items. // - // ALL_ATTRIBUTES - Returns all of the item attributes. + // ALL_ATTRIBUTES - Returns all of the item attributes. // - // COUNT - Returns the number of matching items, rather than the matching + // ALL_PROJECTED_ATTRIBUTES - Allowed only when querying an index. Retrieves + // all attributes that have been projected into the index. If the index is configured + // to project all attributes, this return value is equivalent to specifying + // ALL_ATTRIBUTES. + // + // COUNT - Returns the number of matching items, rather than the matching // items themselves. // // SPECIFIC_ATTRIBUTES - Returns only the attributes listed in AttributesToGet. @@ -5116,24 +5514,24 @@ type StreamSpecification struct { // The DynamoDB Streams settings for the table. These settings consist of: // - // StreamEnabled - Indicates whether DynamoDB Streams is enabled (true) or - // disabled (false) on the table. + // StreamEnabled - Indicates whether DynamoDB Streams is enabled (true) + // or disabled (false) on the table. // - // StreamViewType - When an item in the table is modified, StreamViewType + // StreamViewType - When an item in the table is modified, StreamViewType // determines what information is written to the stream for this table. Valid // values for StreamViewType are: // - // KEYS_ONLY - Only the key attributes of the modified item are written to - // the stream. - // - // NEW_IMAGE - The entire item, as it appears after it was modified, is written + // KEYS_ONLY - Only the key attributes of the modified item are written // to the stream. // - // OLD_IMAGE - The entire item, as it appeared before it was modified, is written - // to the stream. - // - // NEW_AND_OLD_IMAGES - Both the new and the old item images of the item are + // NEW_IMAGE - The entire item, as it appears after it was modified, is // written to the stream. + // + // OLD_IMAGE - The entire item, as it appeared before it was modified, is + // written to the stream. + // + // NEW_AND_OLD_IMAGES - Both the new and the old item images of the item + // are written to the stream. StreamViewType *string `type:"string" enum:"StreamViewType"` } @@ -5156,9 +5554,9 @@ type TableDescription struct { // // Each AttributeDefinition object in this array is composed of: // - // AttributeName - The name of the attribute. + // AttributeName - The name of the attribute. // - // AttributeType - The data type for the attribute. + // AttributeType - The data type for the attribute. AttributeDefinitions []*AttributeDefinition `type:"list"` // The date and time when the table was created, in UNIX epoch time (http://www.epochconverter.com/) @@ -5168,57 +5566,57 @@ type TableDescription struct { // The global secondary indexes, if any, on the table. Each index is scoped // to a given partition key value. Each element is composed of: // - // Backfilling - If true, then the index is currently in the backfilling + // Backfilling - If true, then the index is currently in the backfilling // phase. Backfilling occurs only when a new global secondary index is added // to the table; it is the process by which DynamoDB populates the new index // with data from the table. (This attribute does not appear for indexes that // were created during a CreateTable operation.) // - // IndexName - The name of the global secondary index. + // IndexName - The name of the global secondary index. // - // IndexSizeBytes - The total size of the global secondary index, in bytes. + // IndexSizeBytes - The total size of the global secondary index, in bytes. // DynamoDB updates this value approximately every six hours. Recent changes // might not be reflected in this value. // - // IndexStatus - The current status of the global secondary index: + // IndexStatus - The current status of the global secondary index: // - // CREATING - The index is being created. + // CREATING - The index is being created. // - // UPDATING - The index is being updated. + // UPDATING - The index is being updated. // - // DELETING - The index is being deleted. + // DELETING - The index is being deleted. // - // ACTIVE - The index is ready for use. + // ACTIVE - The index is ready for use. // - // ItemCount - The number of items in the global secondary index. DynamoDB + // ItemCount - The number of items in the global secondary index. DynamoDB // updates this value approximately every six hours. Recent changes might not // be reflected in this value. // - // KeySchema - Specifies the complete index key schema. The attribute names + // KeySchema - Specifies the complete index key schema. The attribute names // in the key schema must be between 1 and 255 characters (inclusive). The key // schema must begin with the same partition key as the table. // - // Projection - Specifies attributes that are copied (projected) from the + // Projection - Specifies attributes that are copied (projected) from the // table into the index. These are in addition to the primary key attributes // and index key attributes, which are automatically projected. Each attribute // specification is composed of: // - // ProjectionType - One of the following: + // ProjectionType - One of the following: // - // KEYS_ONLY - Only the index and primary keys are projected into the index. + // KEYS_ONLY - Only the index and primary keys are projected into the index. // - // INCLUDE - Only the specified table attributes are projected into the index. - // The list of projected attributes are in NonKeyAttributes. + // INCLUDE - Only the specified table attributes are projected into the + // index. The list of projected attributes are in NonKeyAttributes. // - // ALL - All of the table attributes are projected into the index. + // ALL - All of the table attributes are projected into the index. // - // NonKeyAttributes - A list of one or more non-key attribute names that + // NonKeyAttributes - A list of one or more non-key attribute names that // are projected into the secondary index. The total count of attributes provided // in NonKeyAttributes, summed across all of the secondary indexes, must not // exceed 20. If you project the same attribute into two different indexes, // this counts as two distinct attributes when determining the total. // - // ProvisionedThroughput - The provisioned throughput settings for the + // ProvisionedThroughput - The provisioned throughput settings for the // global secondary index, consisting of read and write capacity units, along // with data about increases and decreases. // @@ -5232,15 +5630,15 @@ type TableDescription struct { // The primary key structure for the table. Each KeySchemaElement consists of: // - // AttributeName - The name of the attribute. + // AttributeName - The name of the attribute. // - // KeyType - The role of the attribute: + // KeyType - The role of the attribute: // - // . HASH - partition key + // HASH - partition key // - // RANGE - sort key + // RANGE - sort key // - // The partition key of an item is also known as its hash attribute. The + // The partition key of an item is also known as its hash attribute. The // term "hash attribute" derives from DynamoDB' usage of an internal hash function // to evenly distribute data items across partitions, based on their partition // key values. @@ -5249,7 +5647,7 @@ type TableDescription struct { // attribute" derives from the way DynamoDB stores items with the same partition // key physically close together, in sorted order by the sort key value. // - // For more information about primary keys, see Primary Key (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelPrimaryKey) + // For more information about primary keys, see Primary Key (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DataModel.html#DataModelPrimaryKey) // in the Amazon DynamoDB Developer Guide. KeySchema []*KeySchemaElement `min:"1" type:"list"` @@ -5264,11 +5662,11 @@ type TableDescription struct { // However, the combination of the following three elements is guaranteed to // be unique: // - // the AWS customer ID. + // the AWS customer ID. // - // the table name. + // the table name. // - // the StreamLabel. + // the StreamLabel. LatestStreamLabel *string `type:"string"` // Represents one or more local secondary indexes on the table. Each index is @@ -5277,37 +5675,37 @@ type TableDescription struct { // data within a given item collection cannot exceed 10 GB. Each element is // composed of: // - // IndexName - The name of the local secondary index. + // IndexName - The name of the local secondary index. // - // KeySchema - Specifies the complete index key schema. The attribute names + // KeySchema - Specifies the complete index key schema. The attribute names // in the key schema must be between 1 and 255 characters (inclusive). The key // schema must begin with the same partition key as the table. // - // Projection - Specifies attributes that are copied (projected) from the + // Projection - Specifies attributes that are copied (projected) from the // table into the index. These are in addition to the primary key attributes // and index key attributes, which are automatically projected. Each attribute // specification is composed of: // - // ProjectionType - One of the following: + // ProjectionType - One of the following: // - // KEYS_ONLY - Only the index and primary keys are projected into the index. + // KEYS_ONLY - Only the index and primary keys are projected into the index. // - // INCLUDE - Only the specified table attributes are projected into the index. - // The list of projected attributes are in NonKeyAttributes. + // INCLUDE - Only the specified table attributes are projected into the + // index. The list of projected attributes are in NonKeyAttributes. // - // ALL - All of the table attributes are projected into the index. + // ALL - All of the table attributes are projected into the index. // - // NonKeyAttributes - A list of one or more non-key attribute names that + // NonKeyAttributes - A list of one or more non-key attribute names that // are projected into the secondary index. The total count of attributes provided // in NonKeyAttributes, summed across all of the secondary indexes, must not // exceed 20. If you project the same attribute into two different indexes, // this counts as two distinct attributes when determining the total. // - // IndexSizeBytes - Represents the total size of the index, in bytes. DynamoDB - // updates this value approximately every six hours. Recent changes might not - // be reflected in this value. + // IndexSizeBytes - Represents the total size of the index, in bytes. + // DynamoDB updates this value approximately every six hours. Recent changes + // might not be reflected in this value. // - // ItemCount - Represents the number of items in the index. DynamoDB updates + // ItemCount - Represents the number of items in the index. DynamoDB updates // this value approximately every six hours. Recent changes might not be reflected // in this value. // @@ -5335,13 +5733,13 @@ type TableDescription struct { // The current state of the table: // - // CREATING - The table is being created. + // CREATING - The table is being created. // - // UPDATING - The table is being updated. + // UPDATING - The table is being updated. // - // DELETING - The table is being deleted. + // DELETING - The table is being deleted. // - // ACTIVE - The table is ready for use. + // ACTIVE - The table is ready for use. TableStatus *string `type:"string" enum:"TableStatus"` } @@ -5431,19 +5829,19 @@ type UpdateItemInput struct { // Each AttributeUpdates element consists of an attribute name to modify, along // with the following: // - // Value - The new value, if applicable, for this attribute. + // Value - The new value, if applicable, for this attribute. // - // Action - A value that specifies how to perform the update. This action + // Action - A value that specifies how to perform the update. This action // is only valid for an existing attribute whose data type is Number or is a // set; do not use ADD for other data types. // // If an item with the specified primary key is found in the table, the following // values perform the following actions: // - // PUT - Adds the specified attribute to the item. If the attribute already + // PUT - Adds the specified attribute to the item. If the attribute already // exists, it is replaced by the new value. // - // DELETE - Removes the attribute and its value, if no value is specified + // DELETE - Removes the attribute and its value, if no value is specified // for DELETE. The data type of the specified value must match the existing // value's data type. // @@ -5452,7 +5850,7 @@ type UpdateItemInput struct { // DELETE action specifies [a,c], then the final attribute value is [b]. Specifying // an empty set is an error. // - // ADD - Adds the specified value to the item, if the attribute does not + // ADD - Adds the specified value to the item, if the attribute does not // already exist. If the attribute does exist, then the behavior of ADD depends // on the data type of the attribute: // @@ -5484,14 +5882,14 @@ type UpdateItemInput struct { // If no item with the specified key is found in the table, the following // values perform the following actions: // - // PUT - Causes DynamoDB to create a new item with the specified primary + // PUT - Causes DynamoDB to create a new item with the specified primary // key, and then adds the attribute. // - // DELETE - Nothing happens, because attributes cannot be deleted from a + // DELETE - Nothing happens, because attributes cannot be deleted from a // nonexistent item. The operation succeeds, but DynamoDB does not create a // new item. // - // ADD - Causes DynamoDB to create an item with the supplied primary key + // ADD - Causes DynamoDB to create an item with the supplied primary key // and number (or set of numbers) for the attribute value. The only data types // allowed are Number and Number Set. // @@ -5509,7 +5907,8 @@ type UpdateItemInput struct { // // These function names are case-sensitive. // - // Comparison operators: = | | | | = | = | BETWEEN | IN + // Comparison operators: = | <> | < | > | <= | + // >= | BETWEEN | IN // // Logical operators: AND | OR | NOT // @@ -5517,7 +5916,7 @@ type UpdateItemInput struct { // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) // in the Amazon DynamoDB Developer Guide. // - // ConditionExpression replaces the legacy ConditionalOperator and Expected + // ConditionExpression replaces the legacy ConditionalOperator and Expected // parameters. ConditionExpression *string `type:"string"` @@ -5528,17 +5927,17 @@ type UpdateItemInput struct { // // A logical operator to apply to the conditions in the Expected map: // - // AND - If all of the conditions evaluate to true, then the entire map evaluates - // to true. + // AND - If all of the conditions evaluate to true, then the entire map + // evaluates to true. // - // OR - If at least one of the conditions evaluate to true, then the entire + // OR - If at least one of the conditions evaluate to true, then the entire // map evaluates to true. // - // If you omit ConditionalOperator, then AND is the default. + // If you omit ConditionalOperator, then AND is the default. // // The operation will succeed only if the entire map evaluates to true. // - // This parameter does not support attributes of type List or Map. + // This parameter does not support attributes of type List or Map. ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` // This is a legacy parameter, for backward compatibility. New applications @@ -5563,9 +5962,9 @@ type UpdateItemInput struct { // If the Expected map evaluates to true, then the conditional operation succeeds; // otherwise, it fails. // - // Expected contains the following: + // Expected contains the following: // - // AttributeValueList - One or more values to evaluate against the supplied + // AttributeValueList - One or more values to evaluate against the supplied // attribute. The number of values in the list depends on the ComparisonOperator // being used. // @@ -5573,132 +5972,133 @@ type UpdateItemInput struct { // // String value comparisons for greater than, equals, or less than are based // on ASCII character code values. For example, a is greater than A, and a is - // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters. + // greater than B. For a list of code values, see http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters + // (http://en.wikipedia.org/wiki/ASCII#ASCII_printable_characters). // // For type Binary, DynamoDB treats each byte of the binary data as unsigned // when it compares binary values. // - // ComparisonOperator - A comparator for evaluating attributes in the AttributeValueList. + // ComparisonOperator - A comparator for evaluating attributes in the AttributeValueList. // When performing the comparison, DynamoDB uses strongly consistent reads. // // The following comparison operators are available: // - // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS + // EQ | NE | LE | LT | GE | GT | NOT_NULL | NULL | CONTAINS | NOT_CONTAINS // | BEGINS_WITH | IN | BETWEEN // // The following are descriptions of each comparison operator. // - // EQ : Equal. EQ is supported for all datatypes, including lists and maps. + // EQ : Equal. EQ is supported for all datatypes, including lists and maps. // - // AttributeValueList can contain only one AttributeValue element of type String, - // Number, Binary, String Set, Number Set, or Binary Set. If an item contains - // an AttributeValue element of a different type than the one provided in the - // request, the value does not match. For example, {"S":"6"} does not equal - // {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, Binary, String Set, Number Set, or Binary Set. If an item + // contains an AttributeValue element of a different type than the one provided + // in the request, the value does not match. For example, {"S":"6"} does not + // equal {"N":"6"}. Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. // - // NE : Not equal. NE is supported for all datatypes, including lists and + // NE : Not equal. NE is supported for all datatypes, including lists and // maps. // - // AttributeValueList can contain only one AttributeValue of type String, Number, - // Binary, String Set, Number Set, or Binary Set. If an item contains an AttributeValue - // of a different type than the one provided in the request, the value does - // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} - // does not equal {"NS":["6", "2", "1"]}. + // AttributeValueList can contain only one AttributeValue of type String, + // Number, Binary, String Set, Number Set, or Binary Set. If an item contains + // an AttributeValue of a different type than the one provided in the request, + // the value does not match. For example, {"S":"6"} does not equal {"N":"6"}. + // Also, {"N":"6"} does not equal {"NS":["6", "2", "1"]}. // - // LE : Less than or equal. + // LE : Less than or equal. // - // AttributeValueList can contain only one AttributeValue element of type String, + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue + // element of a different type than the one provided in the request, the value + // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} + // does not compare to {"NS":["6", "2", "1"]}. + // + // LT : Less than. + // + // AttributeValueList can contain only one AttributeValue of type String, // Number, or Binary (not a set type). If an item contains an AttributeValue // element of a different type than the one provided in the request, the value // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} // does not compare to {"NS":["6", "2", "1"]}. // - // LT : Less than. + // GE : Greater than or equal. // - // AttributeValueList can contain only one AttributeValue of type String, Number, - // or Binary (not a set type). If an item contains an AttributeValue element - // of a different type than the one provided in the request, the value does - // not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} - // does not compare to {"NS":["6", "2", "1"]}. - // - // GE : Greater than or equal. - // - // AttributeValueList can contain only one AttributeValue element of type String, - // Number, or Binary (not a set type). If an item contains an AttributeValue + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue // element of a different type than the one provided in the request, the value // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} // does not compare to {"NS":["6", "2", "1"]}. // - // GT : Greater than. + // GT : Greater than. // - // AttributeValueList can contain only one AttributeValue element of type String, - // Number, or Binary (not a set type). If an item contains an AttributeValue + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If an item contains an AttributeValue // element of a different type than the one provided in the request, the value // does not match. For example, {"S":"6"} does not equal {"N":"6"}. Also, {"N":"6"} // does not compare to {"NS":["6", "2", "1"]}. // - // NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, + // NOT_NULL : The attribute exists. NOT_NULL is supported for all datatypes, // including lists and maps. // - // This operator tests for the existence of an attribute, not its data type. + // This operator tests for the existence of an attribute, not its data type. // If the data type of attribute "a" is null, and you evaluate it using NOT_NULL, // the result is a Boolean true. This result is because the attribute "a" exists; // its data type is not relevant to the NOT_NULL comparison operator. // - // NULL : The attribute does not exist. NULL is supported for all datatypes, + // NULL : The attribute does not exist. NULL is supported for all datatypes, // including lists and maps. // - // This operator tests for the nonexistence of an attribute, not its data type. - // If the data type of attribute "a" is null, and you evaluate it using NULL, - // the result is a Boolean false. This is because the attribute "a" exists; + // This operator tests for the nonexistence of an attribute, not its data + // type. If the data type of attribute "a" is null, and you evaluate it using + // NULL, the result is a Boolean false. This is because the attribute "a" exists; // its data type is not relevant to the NULL comparison operator. // - // CONTAINS : Checks for a subsequence, or value in a set. + // CONTAINS : Checks for a subsequence, or value in a set. // - // AttributeValueList can contain only one AttributeValue element of type String, - // Number, or Binary (not a set type). If the target attribute of the comparison - // is of type String, then the operator checks for a substring match. If the - // target attribute of the comparison is of type Binary, then the operator looks - // for a subsequence of the target that matches the input. If the target attribute - // of the comparison is a set ("SS", "NS", or "BS"), then the operator evaluates - // to true if it finds an exact match with any member of the set. + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If the target attribute of the + // comparison is of type String, then the operator checks for a substring match. + // If the target attribute of the comparison is of type Binary, then the operator + // looks for a subsequence of the target that matches the input. If the target + // attribute of the comparison is a set ("SS", "NS", or "BS"), then the operator + // evaluates to true if it finds an exact match with any member of the set. // // CONTAINS is supported for lists: When evaluating "a CONTAINS b", "a" can // be a list; however, "b" cannot be a set, a map, or a list. // - // NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value + // NOT_CONTAINS : Checks for absence of a subsequence, or absence of a value // in a set. // - // AttributeValueList can contain only one AttributeValue element of type String, - // Number, or Binary (not a set type). If the target attribute of the comparison - // is a String, then the operator checks for the absence of a substring match. - // If the target attribute of the comparison is Binary, then the operator checks - // for the absence of a subsequence of the target that matches the input. If - // the target attribute of the comparison is a set ("SS", "NS", or "BS"), then - // the operator evaluates to true if it does not find an exact match with any - // member of the set. + // AttributeValueList can contain only one AttributeValue element of type + // String, Number, or Binary (not a set type). If the target attribute of the + // comparison is a String, then the operator checks for the absence of a substring + // match. If the target attribute of the comparison is Binary, then the operator + // checks for the absence of a subsequence of the target that matches the input. + // If the target attribute of the comparison is a set ("SS", "NS", or "BS"), + // then the operator evaluates to true if it does not find an exact match with + // any member of the set. // // NOT_CONTAINS is supported for lists: When evaluating "a NOT CONTAINS b", // "a" can be a list; however, "b" cannot be a set, a map, or a list. // - // BEGINS_WITH : Checks for a prefix. + // BEGINS_WITH : Checks for a prefix. // - // AttributeValueList can contain only one AttributeValue of type String or + // AttributeValueList can contain only one AttributeValue of type String or // Binary (not a Number or a set type). The target attribute of the comparison // must be of type String or Binary (not a Number or a set type). // - // IN : Checks for matching elements within two sets. + // IN : Checks for matching elements within two sets. // - // AttributeValueList can contain one or more AttributeValue elements of type + // AttributeValueList can contain one or more AttributeValue elements of type // String, Number, or Binary (not a set type). These attributes are compared // against an existing set type attribute of an item. If any elements of the // input set are present in the item attribute, the expression evaluates to // true. // - // BETWEEN : Greater than or equal to the first value, and less than or equal - // to the second value. + // BETWEEN : Greater than or equal to the first value, and less than or + // equal to the second value. // - // AttributeValueList must contain two AttributeValue elements of the same + // AttributeValueList must contain two AttributeValue elements of the same // type, either String, Number, or Binary (not a set type). A target attribute // matches if the target value is greater than, or equal to, the first element // and less than, or equal to, the second element. If an item contains an AttributeValue @@ -5713,27 +6113,27 @@ type UpdateItemInput struct { // For backward compatibility with previous DynamoDB releases, the following // parameters can be used instead of AttributeValueList and ComparisonOperator: // - // Value - A value for DynamoDB to compare with an attribute. + // Value - A value for DynamoDB to compare with an attribute. // - // Exists - A Boolean value that causes DynamoDB to evaluate the value before + // Exists - A Boolean value that causes DynamoDB to evaluate the value before // attempting the conditional operation: // // If Exists is true, DynamoDB will check to see if that attribute value // already exists in the table. If it is found, then the condition evaluates // to true; otherwise the condition evaluate to false. // - // If Exists is false, DynamoDB assumes that the attribute value does not + // If Exists is false, DynamoDB assumes that the attribute value does not // exist in the table. If in fact the value does not exist, then the assumption // is valid and the condition evaluates to true. If the value is found, despite // the assumption that it does not exist, the condition evaluates to false. // - // Note that the default value for Exists is true. + // Note that the default value for Exists is true. // // The Value and Exists parameters are incompatible with AttributeValueList // and ComparisonOperator. Note that if you use both sets of parameters at once, // DynamoDB will return a ValidationException exception. // - // This parameter does not support attributes of type List or Map. + // This parameter does not support attributes of type List or Map. Expected map[string]*ExpectedAttributeValue `type:"map"` // One or more substitution tokens for attribute names in an expression. The @@ -5750,25 +6150,25 @@ type UpdateItemInput struct { // Use the # character in an expression to dereference an attribute name. // For example, consider the following attribute name: // - // Percentile + // Percentile // - // The name of this attribute conflicts with a reserved word, so it cannot + // The name of this attribute conflicts with a reserved word, so it cannot // be used directly in an expression. (For the complete list of reserved words, // see Reserved Words (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) // in the Amazon DynamoDB Developer Guide). To work around this, you could specify // the following for ExpressionAttributeNames: // - // {"#P":"Percentile"} + // {"#P":"Percentile"} // - // You could then use this substitution in an expression, as in this example: + // You could then use this substitution in an expression, as in this example: // - // #P = :val + // #P = :val // - // Tokens that begin with the : character are expression attribute values, + // Tokens that begin with the : character are expression attribute values, // which are placeholders for the actual value at runtime. // - // For more information on expression attribute names, see Accessing Item Attributes - // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // For more information on expression attribute names, see Accessing Item + // Attributes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) // in the Amazon DynamoDB Developer Guide. ExpressionAttributeNames map[string]*string `type:"map"` @@ -5778,16 +6178,16 @@ type UpdateItemInput struct { // value. For example, suppose that you wanted to check whether the value of // the ProductStatus attribute was one of the following: // - // Available | Backordered | Discontinued + // Available | Backordered | Discontinued // // You would first need to specify ExpressionAttributeValues as follows: // - // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} // } // // You could then use these values in an expression, such as this: // - // ProductStatus IN (:avail, :back, :disc) + // ProductStatus IN (:avail, :back, :disc) // // For more information on expression attribute values, see Specifying Conditions // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) @@ -5806,7 +6206,7 @@ type UpdateItemInput struct { // Determines the level of detail about provisioned throughput consumption that // is returned in the response: // - // INDEXES - The response includes the aggregate ConsumedCapacity for the + // INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary index // that was accessed. // @@ -5814,10 +6214,10 @@ type UpdateItemInput struct { // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity // information for table(s). // - // TOTAL - The response includes only the aggregate ConsumedCapacity for the - // operation. + // TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. // - // NONE - No ConsumedCapacity details are included in the response. + // NONE - No ConsumedCapacity details are included in the response. ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` // Determines whether item collection metrics are returned. If set to SIZE, @@ -5830,17 +6230,17 @@ type UpdateItemInput struct { // either before or after they were updated. For UpdateItem, the valid values // are: // - // NONE - If ReturnValues is not specified, or if its value is NONE, then + // NONE - If ReturnValues is not specified, or if its value is NONE, then // nothing is returned. (This setting is the default for ReturnValues.) // - // ALL_OLD - If UpdateItem overwrote an attribute name-value pair, then the - // content of the old item is returned. + // ALL_OLD - If UpdateItem overwrote an attribute name-value pair, then + // the content of the old item is returned. // - // UPDATED_OLD - The old versions of only the updated attributes are returned. + // UPDATED_OLD - The old versions of only the updated attributes are returned. // - // ALL_NEW - All of the attributes of the new version of the item are returned. + // ALL_NEW - All of the attributes of the new version of the item are returned. // - // UPDATED_NEW - The new versions of only the updated attributes are returned. + // UPDATED_NEW - The new versions of only the updated attributes are returned. // // There is no additional cost associated with requesting a return value // aside from the small network and processing overhead of receiving a larger @@ -5857,27 +6257,27 @@ type UpdateItemInput struct { // // The following action values are available for UpdateExpression. // - // SET - Adds one or more attributes and values to an item. If any of these + // SET - Adds one or more attributes and values to an item. If any of these // attribute already exist, they are replaced by the new values. You can also // use SET to add or subtract from an attribute that is of type Number. For // example: SET myNum = myNum + :val // - // SET supports the following functions: + // SET supports the following functions: // - // if_not_exists (path, operand) - if the item does not contain an attribute + // if_not_exists (path, operand) - if the item does not contain an attribute // at the specified path, then if_not_exists evaluates to operand; otherwise, // it evaluates to path. You can use this function to avoid overwriting an attribute // that may already be present in the item. // - // list_append (operand, operand) - evaluates to a list with a new element + // list_append (operand, operand) - evaluates to a list with a new element // added to it. You can append the new element to the start or the end of the // list by reversing the order of the operands. // - // These function names are case-sensitive. + // These function names are case-sensitive. // - // REMOVE - Removes one or more attributes from an item. + // REMOVE - Removes one or more attributes from an item. // - // ADD - Adds the specified value to the item, if the attribute does not + // ADD - Adds the specified value to the item, if the attribute does not // already exist. If the attribute does exist, then the behavior of ADD depends // on the data type of the attribute: // @@ -5905,17 +6305,17 @@ type UpdateItemInput struct { // Both sets must have the same primitive data type. For example, if the existing // data type is a set of strings, the Value must also be a set of strings. // - // The ADD action only supports Number and set data types. In addition, ADD - // can only be used on top-level attributes, not nested attributes. + // The ADD action only supports Number and set data types. In addition, + // ADD can only be used on top-level attributes, not nested attributes. // - // DELETE - Deletes an element from a set. + // DELETE - Deletes an element from a set. // // If a set of values is specified, then those values are subtracted from the // old set. For example, if the attribute value was the set [a,b,c] and the // DELETE action specifies [a,c], then the final attribute value is [b]. Specifying // an empty set is an error. // - // The DELETE action only supports set data types. In addition, DELETE can + // The DELETE action only supports set data types. In addition, DELETE can // only be used on top-level attributes, not nested attributes. // // You can have many actions in a single expression, such as the following: @@ -5925,7 +6325,7 @@ type UpdateItemInput struct { // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.Modifying.html) // in the Amazon DynamoDB Developer Guide. // - // UpdateExpression replaces the legacy AttributeUpdates parameter. + // UpdateExpression replaces the legacy AttributeUpdates parameter. UpdateExpression *string `type:"string"` } @@ -6004,14 +6404,14 @@ type UpdateTableInput struct { // An array of one or more global secondary indexes for the table. For each // index in the array, you can request one action: // - // Create - add a new global secondary index to the table. + // Create - add a new global secondary index to the table. // - // Update - modify the provisioned throughput settings of an existing global + // Update - modify the provisioned throughput settings of an existing global // secondary index. // - // Delete - remove a global secondary index from the table. + // Delete - remove a global secondary index from the table. // - // For more information, see Managing Global Secondary Indexes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GSI.OnlineOps.html) + // For more information, see Managing Global Secondary Indexes (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GSI.OnlineOps.html) // in the Amazon DynamoDB Developer Guide. GlobalSecondaryIndexUpdates []*GlobalSecondaryIndexUpdate `type:"list"` @@ -6202,7 +6602,7 @@ const ( // Determines the level of detail about provisioned throughput consumption that // is returned in the response: // -// INDEXES - The response includes the aggregate ConsumedCapacity for the +// INDEXES - The response includes the aggregate ConsumedCapacity for the // operation, together with ConsumedCapacity for each table and secondary index // that was accessed. // @@ -6210,10 +6610,10 @@ const ( // any indexes at all. In these cases, specifying INDEXES will only return ConsumedCapacity // information for table(s). // -// TOTAL - The response includes only the aggregate ConsumedCapacity for the -// operation. +// TOTAL - The response includes only the aggregate ConsumedCapacity for +// the operation. // -// NONE - No ConsumedCapacity details are included in the response. +// NONE - No ConsumedCapacity details are included in the response. const ( // @enum ReturnConsumedCapacity ReturnConsumedCapacityIndexes = "INDEXES" diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go index c7accb2cb..2af4076f8 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // This is the Amazon DynamoDB API Reference. This guide provides descriptions @@ -48,23 +48,23 @@ import ( // // Managing Tables // -// CreateTable - Creates a table with user-specified provisioned throughput +// CreateTable - Creates a table with user-specified provisioned throughput // settings. You must define a primary key for the table - either a simple primary // key (partition key), or a composite primary key (partition key and sort key). // Optionally, you can create one or more secondary indexes, which provide fast // data access using non-key attributes. // -// DescribeTable - Returns metadata for a table, such as table size, status, +// DescribeTable - Returns metadata for a table, such as table size, status, // and index information. // -// UpdateTable - Modifies the provisioned throughput settings for a table. +// UpdateTable - Modifies the provisioned throughput settings for a table. // Optionally, you can modify the provisioned throughput settings for global // secondary indexes on the table. // -// ListTables - Returns a list of all tables associated with the current +// ListTables - Returns a list of all tables associated with the current // AWS account and endpoint. // -// DeleteTable - Deletes a table and all of its indexes. +// DeleteTable - Deletes a table and all of its indexes. // // For conceptual information about managing tables, see Working with Tables // (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html) @@ -72,22 +72,22 @@ import ( // // Reading Data // -// GetItem - Returns a set of attributes for the item that has a given primary +// GetItem - Returns a set of attributes for the item that has a given primary // key. By default, GetItem performs an eventually consistent read; however, // applications can request a strongly consistent read instead. // -// BatchGetItem - Performs multiple GetItem requests for data items using +// BatchGetItem - Performs multiple GetItem requests for data items using // their primary keys, from one table or multiple tables. The response from // BatchGetItem has a size limit of 16 MB and returns a maximum of 100 items. // Both eventually consistent and strongly consistent reads can be used. // -// Query - Returns one or more items from a table or a secondary index. You -// must provide a specific value for the partition key. You can narrow the scope -// of the query using comparison operators against a sort key value, or on the -// index key. Query supports either eventual or strong consistency. A single -// response has a size limit of 1 MB. +// Query - Returns one or more items from a table or a secondary index. +// You must provide a specific value for the partition key. You can narrow the +// scope of the query using comparison operators against a sort key value, or +// on the index key. Query supports either eventual or strong consistency. A +// single response has a size limit of 1 MB. // -// Scan - Reads every item in a table; the result set is eventually consistent. +// Scan - Reads every item in a table; the result set is eventually consistent. // You can limit the number of items returned by filtering the data attributes, // using conditional expressions. Scan can be used to enable ad-hoc querying // of a table against non-key attributes; however, since this is a full table @@ -101,22 +101,22 @@ import ( // // Modifying Data // -// PutItem - Creates a new item, or replaces an existing item with a new +// PutItem - Creates a new item, or replaces an existing item with a new // item (including all the attributes). By default, if an item in the table // already exists with the same primary key, the new item completely replaces // the existing item. You can use conditional operators to replace an item only // if its attribute values match certain conditions, or to insert a new item // only if that item doesn't already exist. // -// UpdateItem - Modifies the attributes of an existing item. You can also +// UpdateItem - Modifies the attributes of an existing item. You can also // use conditional operators to perform an update only if the item's attribute // values match certain conditions. // -// DeleteItem - Deletes an item in a table by primary key. You can use conditional +// DeleteItem - Deletes an item in a table by primary key. You can use conditional // operators to perform a delete an item only if the item's attribute values // match certain conditions. // -// BatchWriteItem - Performs multiple PutItem and DeleteItem requests across +// BatchWriteItem - Performs multiple PutItem and DeleteItem requests across // multiple tables in a single request. A failure of any request(s) in the batch // will not cause the entire BatchWriteItem operation to fail. Supports batches // of up to 25 items to put or delete, with a maximum total request size of @@ -174,7 +174,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index 2b968591b..170405fa1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -15,7 +15,28 @@ import ( const opAcceptVpcPeeringConnection = "AcceptVpcPeeringConnection" -// AcceptVpcPeeringConnectionRequest generates a request for the AcceptVpcPeeringConnection operation. +// AcceptVpcPeeringConnectionRequest generates a "aws/request.Request" representing the +// client's request for the AcceptVpcPeeringConnection operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AcceptVpcPeeringConnection method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AcceptVpcPeeringConnectionRequest method. +// req, resp := client.AcceptVpcPeeringConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) AcceptVpcPeeringConnectionRequest(input *AcceptVpcPeeringConnectionInput) (req *request.Request, output *AcceptVpcPeeringConnectionOutput) { op := &request.Operation{ Name: opAcceptVpcPeeringConnection, @@ -45,7 +66,28 @@ func (c *EC2) AcceptVpcPeeringConnection(input *AcceptVpcPeeringConnectionInput) const opAllocateAddress = "AllocateAddress" -// AllocateAddressRequest generates a request for the AllocateAddress operation. +// AllocateAddressRequest generates a "aws/request.Request" representing the +// client's request for the AllocateAddress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AllocateAddress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AllocateAddressRequest method. +// req, resp := client.AllocateAddressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) AllocateAddressRequest(input *AllocateAddressInput) (req *request.Request, output *AllocateAddressOutput) { op := &request.Operation{ Name: opAllocateAddress, @@ -76,7 +118,28 @@ func (c *EC2) AllocateAddress(input *AllocateAddressInput) (*AllocateAddressOutp const opAllocateHosts = "AllocateHosts" -// AllocateHostsRequest generates a request for the AllocateHosts operation. +// AllocateHostsRequest generates a "aws/request.Request" representing the +// client's request for the AllocateHosts operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AllocateHosts method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AllocateHostsRequest method. +// req, resp := client.AllocateHostsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) AllocateHostsRequest(input *AllocateHostsInput) (req *request.Request, output *AllocateHostsOutput) { op := &request.Operation{ Name: opAllocateHosts, @@ -105,7 +168,28 @@ func (c *EC2) AllocateHosts(input *AllocateHostsInput) (*AllocateHostsOutput, er const opAssignPrivateIpAddresses = "AssignPrivateIpAddresses" -// AssignPrivateIpAddressesRequest generates a request for the AssignPrivateIpAddresses operation. +// AssignPrivateIpAddressesRequest generates a "aws/request.Request" representing the +// client's request for the AssignPrivateIpAddresses operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssignPrivateIpAddresses method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssignPrivateIpAddressesRequest method. +// req, resp := client.AssignPrivateIpAddressesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) AssignPrivateIpAddressesRequest(input *AssignPrivateIpAddressesInput) (req *request.Request, output *AssignPrivateIpAddressesOutput) { op := &request.Operation{ Name: opAssignPrivateIpAddresses, @@ -144,7 +228,28 @@ func (c *EC2) AssignPrivateIpAddresses(input *AssignPrivateIpAddressesInput) (*A const opAssociateAddress = "AssociateAddress" -// AssociateAddressRequest generates a request for the AssociateAddress operation. +// AssociateAddressRequest generates a "aws/request.Request" representing the +// client's request for the AssociateAddress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssociateAddress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssociateAddressRequest method. +// req, resp := client.AssociateAddressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) AssociateAddressRequest(input *AssociateAddressInput) (req *request.Request, output *AssociateAddressOutput) { op := &request.Operation{ Name: opAssociateAddress, @@ -187,7 +292,28 @@ func (c *EC2) AssociateAddress(input *AssociateAddressInput) (*AssociateAddressO const opAssociateDhcpOptions = "AssociateDhcpOptions" -// AssociateDhcpOptionsRequest generates a request for the AssociateDhcpOptions operation. +// AssociateDhcpOptionsRequest generates a "aws/request.Request" representing the +// client's request for the AssociateDhcpOptions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssociateDhcpOptions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssociateDhcpOptionsRequest method. +// req, resp := client.AssociateDhcpOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) AssociateDhcpOptionsRequest(input *AssociateDhcpOptionsInput) (req *request.Request, output *AssociateDhcpOptionsOutput) { op := &request.Operation{ Name: opAssociateDhcpOptions, @@ -227,7 +353,28 @@ func (c *EC2) AssociateDhcpOptions(input *AssociateDhcpOptionsInput) (*Associate const opAssociateRouteTable = "AssociateRouteTable" -// AssociateRouteTableRequest generates a request for the AssociateRouteTable operation. +// AssociateRouteTableRequest generates a "aws/request.Request" representing the +// client's request for the AssociateRouteTable operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssociateRouteTable method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssociateRouteTableRequest method. +// req, resp := client.AssociateRouteTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) AssociateRouteTableRequest(input *AssociateRouteTableInput) (req *request.Request, output *AssociateRouteTableOutput) { op := &request.Operation{ Name: opAssociateRouteTable, @@ -261,7 +408,28 @@ func (c *EC2) AssociateRouteTable(input *AssociateRouteTableInput) (*AssociateRo const opAttachClassicLinkVpc = "AttachClassicLinkVpc" -// AttachClassicLinkVpcRequest generates a request for the AttachClassicLinkVpc operation. +// AttachClassicLinkVpcRequest generates a "aws/request.Request" representing the +// client's request for the AttachClassicLinkVpc operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AttachClassicLinkVpc method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AttachClassicLinkVpcRequest method. +// req, resp := client.AttachClassicLinkVpcRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) AttachClassicLinkVpcRequest(input *AttachClassicLinkVpcInput) (req *request.Request, output *AttachClassicLinkVpcOutput) { op := &request.Operation{ Name: opAttachClassicLinkVpc, @@ -299,7 +467,28 @@ func (c *EC2) AttachClassicLinkVpc(input *AttachClassicLinkVpcInput) (*AttachCla const opAttachInternetGateway = "AttachInternetGateway" -// AttachInternetGatewayRequest generates a request for the AttachInternetGateway operation. +// AttachInternetGatewayRequest generates a "aws/request.Request" representing the +// client's request for the AttachInternetGateway operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AttachInternetGateway method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AttachInternetGatewayRequest method. +// req, resp := client.AttachInternetGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) AttachInternetGatewayRequest(input *AttachInternetGatewayInput) (req *request.Request, output *AttachInternetGatewayOutput) { op := &request.Operation{ Name: opAttachInternetGateway, @@ -330,7 +519,28 @@ func (c *EC2) AttachInternetGateway(input *AttachInternetGatewayInput) (*AttachI const opAttachNetworkInterface = "AttachNetworkInterface" -// AttachNetworkInterfaceRequest generates a request for the AttachNetworkInterface operation. +// AttachNetworkInterfaceRequest generates a "aws/request.Request" representing the +// client's request for the AttachNetworkInterface operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AttachNetworkInterface method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AttachNetworkInterfaceRequest method. +// req, resp := client.AttachNetworkInterfaceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) AttachNetworkInterfaceRequest(input *AttachNetworkInterfaceInput) (req *request.Request, output *AttachNetworkInterfaceOutput) { op := &request.Operation{ Name: opAttachNetworkInterface, @@ -357,7 +567,28 @@ func (c *EC2) AttachNetworkInterface(input *AttachNetworkInterfaceInput) (*Attac const opAttachVolume = "AttachVolume" -// AttachVolumeRequest generates a request for the AttachVolume operation. +// AttachVolumeRequest generates a "aws/request.Request" representing the +// client's request for the AttachVolume operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AttachVolume method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AttachVolumeRequest method. +// req, resp := client.AttachVolumeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) AttachVolumeRequest(input *AttachVolumeInput) (req *request.Request, output *VolumeAttachment) { op := &request.Operation{ Name: opAttachVolume, @@ -414,7 +645,28 @@ func (c *EC2) AttachVolume(input *AttachVolumeInput) (*VolumeAttachment, error) const opAttachVpnGateway = "AttachVpnGateway" -// AttachVpnGatewayRequest generates a request for the AttachVpnGateway operation. +// AttachVpnGatewayRequest generates a "aws/request.Request" representing the +// client's request for the AttachVpnGateway operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AttachVpnGateway method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AttachVpnGatewayRequest method. +// req, resp := client.AttachVpnGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) AttachVpnGatewayRequest(input *AttachVpnGatewayInput) (req *request.Request, output *AttachVpnGatewayOutput) { op := &request.Operation{ Name: opAttachVpnGateway, @@ -443,7 +695,28 @@ func (c *EC2) AttachVpnGateway(input *AttachVpnGatewayInput) (*AttachVpnGatewayO const opAuthorizeSecurityGroupEgress = "AuthorizeSecurityGroupEgress" -// AuthorizeSecurityGroupEgressRequest generates a request for the AuthorizeSecurityGroupEgress operation. +// AuthorizeSecurityGroupEgressRequest generates a "aws/request.Request" representing the +// client's request for the AuthorizeSecurityGroupEgress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AuthorizeSecurityGroupEgress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AuthorizeSecurityGroupEgressRequest method. +// req, resp := client.AuthorizeSecurityGroupEgressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) AuthorizeSecurityGroupEgressRequest(input *AuthorizeSecurityGroupEgressInput) (req *request.Request, output *AuthorizeSecurityGroupEgressOutput) { op := &request.Operation{ Name: opAuthorizeSecurityGroupEgress, @@ -490,7 +763,28 @@ func (c *EC2) AuthorizeSecurityGroupEgress(input *AuthorizeSecurityGroupEgressIn const opAuthorizeSecurityGroupIngress = "AuthorizeSecurityGroupIngress" -// AuthorizeSecurityGroupIngressRequest generates a request for the AuthorizeSecurityGroupIngress operation. +// AuthorizeSecurityGroupIngressRequest generates a "aws/request.Request" representing the +// client's request for the AuthorizeSecurityGroupIngress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AuthorizeSecurityGroupIngress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AuthorizeSecurityGroupIngressRequest method. +// req, resp := client.AuthorizeSecurityGroupIngressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) AuthorizeSecurityGroupIngressRequest(input *AuthorizeSecurityGroupIngressInput) (req *request.Request, output *AuthorizeSecurityGroupIngressOutput) { op := &request.Operation{ Name: opAuthorizeSecurityGroupIngress, @@ -537,7 +831,28 @@ func (c *EC2) AuthorizeSecurityGroupIngress(input *AuthorizeSecurityGroupIngress const opBundleInstance = "BundleInstance" -// BundleInstanceRequest generates a request for the BundleInstance operation. +// BundleInstanceRequest generates a "aws/request.Request" representing the +// client's request for the BundleInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the BundleInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the BundleInstanceRequest method. +// req, resp := client.BundleInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) BundleInstanceRequest(input *BundleInstanceInput) (req *request.Request, output *BundleInstanceOutput) { op := &request.Operation{ Name: opBundleInstance, @@ -573,7 +888,28 @@ func (c *EC2) BundleInstance(input *BundleInstanceInput) (*BundleInstanceOutput, const opCancelBundleTask = "CancelBundleTask" -// CancelBundleTaskRequest generates a request for the CancelBundleTask operation. +// CancelBundleTaskRequest generates a "aws/request.Request" representing the +// client's request for the CancelBundleTask operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CancelBundleTask method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CancelBundleTaskRequest method. +// req, resp := client.CancelBundleTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CancelBundleTaskRequest(input *CancelBundleTaskInput) (req *request.Request, output *CancelBundleTaskOutput) { op := &request.Operation{ Name: opCancelBundleTask, @@ -600,7 +936,28 @@ func (c *EC2) CancelBundleTask(input *CancelBundleTaskInput) (*CancelBundleTaskO const opCancelConversionTask = "CancelConversionTask" -// CancelConversionTaskRequest generates a request for the CancelConversionTask operation. +// CancelConversionTaskRequest generates a "aws/request.Request" representing the +// client's request for the CancelConversionTask operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CancelConversionTask method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CancelConversionTaskRequest method. +// req, resp := client.CancelConversionTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CancelConversionTaskRequest(input *CancelConversionTaskInput) (req *request.Request, output *CancelConversionTaskOutput) { op := &request.Operation{ Name: opCancelConversionTask, @@ -637,7 +994,28 @@ func (c *EC2) CancelConversionTask(input *CancelConversionTaskInput) (*CancelCon const opCancelExportTask = "CancelExportTask" -// CancelExportTaskRequest generates a request for the CancelExportTask operation. +// CancelExportTaskRequest generates a "aws/request.Request" representing the +// client's request for the CancelExportTask operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CancelExportTask method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CancelExportTaskRequest method. +// req, resp := client.CancelExportTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CancelExportTaskRequest(input *CancelExportTaskInput) (req *request.Request, output *CancelExportTaskOutput) { op := &request.Operation{ Name: opCancelExportTask, @@ -669,7 +1047,28 @@ func (c *EC2) CancelExportTask(input *CancelExportTaskInput) (*CancelExportTaskO const opCancelImportTask = "CancelImportTask" -// CancelImportTaskRequest generates a request for the CancelImportTask operation. +// CancelImportTaskRequest generates a "aws/request.Request" representing the +// client's request for the CancelImportTask operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CancelImportTask method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CancelImportTaskRequest method. +// req, resp := client.CancelImportTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CancelImportTaskRequest(input *CancelImportTaskInput) (req *request.Request, output *CancelImportTaskOutput) { op := &request.Operation{ Name: opCancelImportTask, @@ -696,7 +1095,28 @@ func (c *EC2) CancelImportTask(input *CancelImportTaskInput) (*CancelImportTaskO const opCancelReservedInstancesListing = "CancelReservedInstancesListing" -// CancelReservedInstancesListingRequest generates a request for the CancelReservedInstancesListing operation. +// CancelReservedInstancesListingRequest generates a "aws/request.Request" representing the +// client's request for the CancelReservedInstancesListing operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CancelReservedInstancesListing method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CancelReservedInstancesListingRequest method. +// req, resp := client.CancelReservedInstancesListingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CancelReservedInstancesListingRequest(input *CancelReservedInstancesListingInput) (req *request.Request, output *CancelReservedInstancesListingOutput) { op := &request.Operation{ Name: opCancelReservedInstancesListing, @@ -727,7 +1147,28 @@ func (c *EC2) CancelReservedInstancesListing(input *CancelReservedInstancesListi const opCancelSpotFleetRequests = "CancelSpotFleetRequests" -// CancelSpotFleetRequestsRequest generates a request for the CancelSpotFleetRequests operation. +// CancelSpotFleetRequestsRequest generates a "aws/request.Request" representing the +// client's request for the CancelSpotFleetRequests operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CancelSpotFleetRequests method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CancelSpotFleetRequestsRequest method. +// req, resp := client.CancelSpotFleetRequestsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CancelSpotFleetRequestsRequest(input *CancelSpotFleetRequestsInput) (req *request.Request, output *CancelSpotFleetRequestsOutput) { op := &request.Operation{ Name: opCancelSpotFleetRequests, @@ -761,7 +1202,28 @@ func (c *EC2) CancelSpotFleetRequests(input *CancelSpotFleetRequestsInput) (*Can const opCancelSpotInstanceRequests = "CancelSpotInstanceRequests" -// CancelSpotInstanceRequestsRequest generates a request for the CancelSpotInstanceRequests operation. +// CancelSpotInstanceRequestsRequest generates a "aws/request.Request" representing the +// client's request for the CancelSpotInstanceRequests operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CancelSpotInstanceRequests method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CancelSpotInstanceRequestsRequest method. +// req, resp := client.CancelSpotInstanceRequestsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CancelSpotInstanceRequestsRequest(input *CancelSpotInstanceRequestsInput) (req *request.Request, output *CancelSpotInstanceRequestsOutput) { op := &request.Operation{ Name: opCancelSpotInstanceRequests, @@ -796,7 +1258,28 @@ func (c *EC2) CancelSpotInstanceRequests(input *CancelSpotInstanceRequestsInput) const opConfirmProductInstance = "ConfirmProductInstance" -// ConfirmProductInstanceRequest generates a request for the ConfirmProductInstance operation. +// ConfirmProductInstanceRequest generates a "aws/request.Request" representing the +// client's request for the ConfirmProductInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ConfirmProductInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ConfirmProductInstanceRequest method. +// req, resp := client.ConfirmProductInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) ConfirmProductInstanceRequest(input *ConfirmProductInstanceInput) (req *request.Request, output *ConfirmProductInstanceOutput) { op := &request.Operation{ Name: opConfirmProductInstance, @@ -826,7 +1309,28 @@ func (c *EC2) ConfirmProductInstance(input *ConfirmProductInstanceInput) (*Confi const opCopyImage = "CopyImage" -// CopyImageRequest generates a request for the CopyImage operation. +// CopyImageRequest generates a "aws/request.Request" representing the +// client's request for the CopyImage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CopyImage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CopyImageRequest method. +// req, resp := client.CopyImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CopyImageRequest(input *CopyImageInput) (req *request.Request, output *CopyImageOutput) { op := &request.Operation{ Name: opCopyImage, @@ -858,7 +1362,28 @@ func (c *EC2) CopyImage(input *CopyImageInput) (*CopyImageOutput, error) { const opCopySnapshot = "CopySnapshot" -// CopySnapshotRequest generates a request for the CopySnapshot operation. +// CopySnapshotRequest generates a "aws/request.Request" representing the +// client's request for the CopySnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CopySnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CopySnapshotRequest method. +// req, resp := client.CopySnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CopySnapshotRequest(input *CopySnapshotInput) (req *request.Request, output *CopySnapshotOutput) { op := &request.Operation{ Name: opCopySnapshot, @@ -888,7 +1413,10 @@ func (c *EC2) CopySnapshotRequest(input *CopySnapshotInput) (req *request.Reques // default AWS Key Management Service (AWS KMS) customer master key (CMK); however, // you can specify a non-default CMK with the KmsKeyId parameter. // -// For more information, see Copying an Amazon EBS Snapshot (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-copy-snapshot.html) +// To copy an encrypted snapshot that has been shared from another account, +// you must have permissions for the CMK used to encrypt the snapshot. +// +// For more information, see Copying an Amazon EBS Snapshot (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-copy-snapshot.html) // in the Amazon Elastic Compute Cloud User Guide. func (c *EC2) CopySnapshot(input *CopySnapshotInput) (*CopySnapshotOutput, error) { req, out := c.CopySnapshotRequest(input) @@ -898,7 +1426,28 @@ func (c *EC2) CopySnapshot(input *CopySnapshotInput) (*CopySnapshotOutput, error const opCreateCustomerGateway = "CreateCustomerGateway" -// CreateCustomerGatewayRequest generates a request for the CreateCustomerGateway operation. +// CreateCustomerGatewayRequest generates a "aws/request.Request" representing the +// client's request for the CreateCustomerGateway operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateCustomerGateway method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateCustomerGatewayRequest method. +// req, resp := client.CreateCustomerGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CreateCustomerGatewayRequest(input *CreateCustomerGatewayInput) (req *request.Request, output *CreateCustomerGatewayOutput) { op := &request.Operation{ Name: opCreateCustomerGateway, @@ -949,7 +1498,28 @@ func (c *EC2) CreateCustomerGateway(input *CreateCustomerGatewayInput) (*CreateC const opCreateDhcpOptions = "CreateDhcpOptions" -// CreateDhcpOptionsRequest generates a request for the CreateDhcpOptions operation. +// CreateDhcpOptionsRequest generates a "aws/request.Request" representing the +// client's request for the CreateDhcpOptions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDhcpOptions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDhcpOptionsRequest method. +// req, resp := client.CreateDhcpOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CreateDhcpOptionsRequest(input *CreateDhcpOptionsInput) (req *request.Request, output *CreateDhcpOptionsOutput) { op := &request.Operation{ Name: opCreateDhcpOptions, @@ -973,12 +1543,12 @@ func (c *EC2) CreateDhcpOptionsRequest(input *CreateDhcpOptionsInput) (req *requ // individual DHCP options you can specify. For more information about the options, // see RFC 2132 (http://www.ietf.org/rfc/rfc2132.txt). // -// domain-name-servers - The IP addresses of up to four domain name servers, +// domain-name-servers - The IP addresses of up to four domain name servers, // or AmazonProvidedDNS. The default DHCP option set specifies AmazonProvidedDNS. // If specifying more than one domain name server, specify the IP addresses // in a single parameter, separated by commas. // -// domain-name - If you're using AmazonProvidedDNS in "us-east-1", specify +// domain-name - If you're using AmazonProvidedDNS in "us-east-1", specify // "ec2.internal". If you're using AmazonProvidedDNS in another region, specify // "region.compute.internal" (for example, "ap-northeast-1.compute.internal"). // Otherwise, specify a domain name (for example, "MyCompany.com"). Important: @@ -988,16 +1558,16 @@ func (c *EC2) CreateDhcpOptionsRequest(input *CreateDhcpOptionsInput) (req *requ // associated with a VPC that has instances with multiple operating systems, // specify only one domain name. // -// ntp-servers - The IP addresses of up to four Network Time Protocol (NTP) +// ntp-servers - The IP addresses of up to four Network Time Protocol (NTP) // servers. // -// netbios-name-servers - The IP addresses of up to four NetBIOS name servers. +// netbios-name-servers - The IP addresses of up to four NetBIOS name servers. // -// netbios-node-type - The NetBIOS node type (1, 2, 4, or 8). We recommend +// netbios-node-type - The NetBIOS node type (1, 2, 4, or 8). We recommend // that you specify 2 (broadcast and multicast are not currently supported). // For more information about these node types, see RFC 2132 (http://www.ietf.org/rfc/rfc2132.txt). // -// Your VPC automatically starts out with a set of DHCP options that includes +// Your VPC automatically starts out with a set of DHCP options that includes // only a DNS server that we provide (AmazonProvidedDNS). If you create a set // of options, and if your VPC has an Internet gateway, make sure to set the // domain-name-servers option either to AmazonProvidedDNS or to a domain name @@ -1012,7 +1582,28 @@ func (c *EC2) CreateDhcpOptions(input *CreateDhcpOptionsInput) (*CreateDhcpOptio const opCreateFlowLogs = "CreateFlowLogs" -// CreateFlowLogsRequest generates a request for the CreateFlowLogs operation. +// CreateFlowLogsRequest generates a "aws/request.Request" representing the +// client's request for the CreateFlowLogs operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateFlowLogs method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateFlowLogsRequest method. +// req, resp := client.CreateFlowLogsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CreateFlowLogsRequest(input *CreateFlowLogsInput) (req *request.Request, output *CreateFlowLogsOutput) { op := &request.Operation{ Name: opCreateFlowLogs, @@ -1048,7 +1639,28 @@ func (c *EC2) CreateFlowLogs(input *CreateFlowLogsInput) (*CreateFlowLogsOutput, const opCreateImage = "CreateImage" -// CreateImageRequest generates a request for the CreateImage operation. +// CreateImageRequest generates a "aws/request.Request" representing the +// client's request for the CreateImage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateImage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateImageRequest method. +// req, resp := client.CreateImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CreateImageRequest(input *CreateImageInput) (req *request.Request, output *CreateImageOutput) { op := &request.Operation{ Name: opCreateImage, @@ -1084,7 +1696,28 @@ func (c *EC2) CreateImage(input *CreateImageInput) (*CreateImageOutput, error) { const opCreateInstanceExportTask = "CreateInstanceExportTask" -// CreateInstanceExportTaskRequest generates a request for the CreateInstanceExportTask operation. +// CreateInstanceExportTaskRequest generates a "aws/request.Request" representing the +// client's request for the CreateInstanceExportTask operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateInstanceExportTask method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateInstanceExportTaskRequest method. +// req, resp := client.CreateInstanceExportTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CreateInstanceExportTaskRequest(input *CreateInstanceExportTaskInput) (req *request.Request, output *CreateInstanceExportTaskOutput) { op := &request.Operation{ Name: opCreateInstanceExportTask, @@ -1116,7 +1749,28 @@ func (c *EC2) CreateInstanceExportTask(input *CreateInstanceExportTaskInput) (*C const opCreateInternetGateway = "CreateInternetGateway" -// CreateInternetGatewayRequest generates a request for the CreateInternetGateway operation. +// CreateInternetGatewayRequest generates a "aws/request.Request" representing the +// client's request for the CreateInternetGateway operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateInternetGateway method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateInternetGatewayRequest method. +// req, resp := client.CreateInternetGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CreateInternetGatewayRequest(input *CreateInternetGatewayInput) (req *request.Request, output *CreateInternetGatewayOutput) { op := &request.Operation{ Name: opCreateInternetGateway, @@ -1147,7 +1801,28 @@ func (c *EC2) CreateInternetGateway(input *CreateInternetGatewayInput) (*CreateI const opCreateKeyPair = "CreateKeyPair" -// CreateKeyPairRequest generates a request for the CreateKeyPair operation. +// CreateKeyPairRequest generates a "aws/request.Request" representing the +// client's request for the CreateKeyPair operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateKeyPair method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateKeyPairRequest method. +// req, resp := client.CreateKeyPairRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CreateKeyPairRequest(input *CreateKeyPairInput) (req *request.Request, output *CreateKeyPairOutput) { op := &request.Operation{ Name: opCreateKeyPair, @@ -1185,7 +1860,28 @@ func (c *EC2) CreateKeyPair(input *CreateKeyPairInput) (*CreateKeyPairOutput, er const opCreateNatGateway = "CreateNatGateway" -// CreateNatGatewayRequest generates a request for the CreateNatGateway operation. +// CreateNatGatewayRequest generates a "aws/request.Request" representing the +// client's request for the CreateNatGateway operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateNatGateway method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateNatGatewayRequest method. +// req, resp := client.CreateNatGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CreateNatGatewayRequest(input *CreateNatGatewayInput) (req *request.Request, output *CreateNatGatewayOutput) { op := &request.Operation{ Name: opCreateNatGateway, @@ -1217,7 +1913,28 @@ func (c *EC2) CreateNatGateway(input *CreateNatGatewayInput) (*CreateNatGatewayO const opCreateNetworkAcl = "CreateNetworkAcl" -// CreateNetworkAclRequest generates a request for the CreateNetworkAcl operation. +// CreateNetworkAclRequest generates a "aws/request.Request" representing the +// client's request for the CreateNetworkAcl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateNetworkAcl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateNetworkAclRequest method. +// req, resp := client.CreateNetworkAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CreateNetworkAclRequest(input *CreateNetworkAclInput) (req *request.Request, output *CreateNetworkAclOutput) { op := &request.Operation{ Name: opCreateNetworkAcl, @@ -1248,7 +1965,28 @@ func (c *EC2) CreateNetworkAcl(input *CreateNetworkAclInput) (*CreateNetworkAclO const opCreateNetworkAclEntry = "CreateNetworkAclEntry" -// CreateNetworkAclEntryRequest generates a request for the CreateNetworkAclEntry operation. +// CreateNetworkAclEntryRequest generates a "aws/request.Request" representing the +// client's request for the CreateNetworkAclEntry operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateNetworkAclEntry method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateNetworkAclEntryRequest method. +// req, resp := client.CreateNetworkAclEntryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CreateNetworkAclEntryRequest(input *CreateNetworkAclEntryInput) (req *request.Request, output *CreateNetworkAclEntryOutput) { op := &request.Operation{ Name: opCreateNetworkAclEntry, @@ -1293,7 +2031,28 @@ func (c *EC2) CreateNetworkAclEntry(input *CreateNetworkAclEntryInput) (*CreateN const opCreateNetworkInterface = "CreateNetworkInterface" -// CreateNetworkInterfaceRequest generates a request for the CreateNetworkInterface operation. +// CreateNetworkInterfaceRequest generates a "aws/request.Request" representing the +// client's request for the CreateNetworkInterface operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateNetworkInterface method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateNetworkInterfaceRequest method. +// req, resp := client.CreateNetworkInterfaceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CreateNetworkInterfaceRequest(input *CreateNetworkInterfaceInput) (req *request.Request, output *CreateNetworkInterfaceOutput) { op := &request.Operation{ Name: opCreateNetworkInterface, @@ -1324,7 +2083,28 @@ func (c *EC2) CreateNetworkInterface(input *CreateNetworkInterfaceInput) (*Creat const opCreatePlacementGroup = "CreatePlacementGroup" -// CreatePlacementGroupRequest generates a request for the CreatePlacementGroup operation. +// CreatePlacementGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreatePlacementGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreatePlacementGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreatePlacementGroupRequest method. +// req, resp := client.CreatePlacementGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CreatePlacementGroupRequest(input *CreatePlacementGroupInput) (req *request.Request, output *CreatePlacementGroupOutput) { op := &request.Operation{ Name: opCreatePlacementGroup, @@ -1358,7 +2138,28 @@ func (c *EC2) CreatePlacementGroup(input *CreatePlacementGroupInput) (*CreatePla const opCreateReservedInstancesListing = "CreateReservedInstancesListing" -// CreateReservedInstancesListingRequest generates a request for the CreateReservedInstancesListing operation. +// CreateReservedInstancesListingRequest generates a "aws/request.Request" representing the +// client's request for the CreateReservedInstancesListing operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateReservedInstancesListing method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateReservedInstancesListingRequest method. +// req, resp := client.CreateReservedInstancesListingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CreateReservedInstancesListingRequest(input *CreateReservedInstancesListingInput) (req *request.Request, output *CreateReservedInstancesListingOutput) { op := &request.Operation{ Name: opCreateReservedInstancesListing, @@ -1404,7 +2205,28 @@ func (c *EC2) CreateReservedInstancesListing(input *CreateReservedInstancesListi const opCreateRoute = "CreateRoute" -// CreateRouteRequest generates a request for the CreateRoute operation. +// CreateRouteRequest generates a "aws/request.Request" representing the +// client's request for the CreateRoute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateRoute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateRouteRequest method. +// req, resp := client.CreateRouteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CreateRouteRequest(input *CreateRouteInput) (req *request.Request, output *CreateRouteOutput) { op := &request.Operation{ Name: opCreateRoute, @@ -1432,9 +2254,9 @@ func (c *EC2) CreateRouteRequest(input *CreateRouteInput) (req *request.Request, // match. For example, let's say the traffic is destined for 192.0.2.3, and // the route table includes the following two routes: // -// 192.0.2.0/24 (goes to some target A) +// 192.0.2.0/24 (goes to some target A) // -// 192.0.2.0/28 (goes to some target B) +// 192.0.2.0/28 (goes to some target B) // // Both routes apply to the traffic destined for 192.0.2.3. However, the // second route in the list covers a smaller number of IP addresses and is therefore @@ -1450,7 +2272,28 @@ func (c *EC2) CreateRoute(input *CreateRouteInput) (*CreateRouteOutput, error) { const opCreateRouteTable = "CreateRouteTable" -// CreateRouteTableRequest generates a request for the CreateRouteTable operation. +// CreateRouteTableRequest generates a "aws/request.Request" representing the +// client's request for the CreateRouteTable operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateRouteTable method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateRouteTableRequest method. +// req, resp := client.CreateRouteTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CreateRouteTableRequest(input *CreateRouteTableInput) (req *request.Request, output *CreateRouteTableOutput) { op := &request.Operation{ Name: opCreateRouteTable, @@ -1481,7 +2324,28 @@ func (c *EC2) CreateRouteTable(input *CreateRouteTableInput) (*CreateRouteTableO const opCreateSecurityGroup = "CreateSecurityGroup" -// CreateSecurityGroupRequest generates a request for the CreateSecurityGroup operation. +// CreateSecurityGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateSecurityGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateSecurityGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateSecurityGroupRequest method. +// req, resp := client.CreateSecurityGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CreateSecurityGroupRequest(input *CreateSecurityGroupInput) (req *request.Request, output *CreateSecurityGroupOutput) { op := &request.Operation{ Name: opCreateSecurityGroup, @@ -1534,7 +2398,28 @@ func (c *EC2) CreateSecurityGroup(input *CreateSecurityGroupInput) (*CreateSecur const opCreateSnapshot = "CreateSnapshot" -// CreateSnapshotRequest generates a request for the CreateSnapshot operation. +// CreateSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the CreateSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateSnapshotRequest method. +// req, resp := client.CreateSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CreateSnapshotRequest(input *CreateSnapshotInput) (req *request.Request, output *Snapshot) { op := &request.Operation{ Name: opCreateSnapshot, @@ -1588,7 +2473,28 @@ func (c *EC2) CreateSnapshot(input *CreateSnapshotInput) (*Snapshot, error) { const opCreateSpotDatafeedSubscription = "CreateSpotDatafeedSubscription" -// CreateSpotDatafeedSubscriptionRequest generates a request for the CreateSpotDatafeedSubscription operation. +// CreateSpotDatafeedSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the CreateSpotDatafeedSubscription operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateSpotDatafeedSubscription method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateSpotDatafeedSubscriptionRequest method. +// req, resp := client.CreateSpotDatafeedSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CreateSpotDatafeedSubscriptionRequest(input *CreateSpotDatafeedSubscriptionInput) (req *request.Request, output *CreateSpotDatafeedSubscriptionOutput) { op := &request.Operation{ Name: opCreateSpotDatafeedSubscription, @@ -1618,7 +2524,28 @@ func (c *EC2) CreateSpotDatafeedSubscription(input *CreateSpotDatafeedSubscripti const opCreateSubnet = "CreateSubnet" -// CreateSubnetRequest generates a request for the CreateSubnet operation. +// CreateSubnetRequest generates a "aws/request.Request" representing the +// client's request for the CreateSubnet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateSubnet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateSubnetRequest method. +// req, resp := client.CreateSubnetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CreateSubnetRequest(input *CreateSubnetInput) (req *request.Request, output *CreateSubnetOutput) { op := &request.Operation{ Name: opCreateSubnet, @@ -1669,7 +2596,28 @@ func (c *EC2) CreateSubnet(input *CreateSubnetInput) (*CreateSubnetOutput, error const opCreateTags = "CreateTags" -// CreateTagsRequest generates a request for the CreateTags operation. +// CreateTagsRequest generates a "aws/request.Request" representing the +// client's request for the CreateTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateTagsRequest method. +// req, resp := client.CreateTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CreateTagsRequest(input *CreateTagsInput) (req *request.Request, output *CreateTagsOutput) { op := &request.Operation{ Name: opCreateTags, @@ -1706,7 +2654,28 @@ func (c *EC2) CreateTags(input *CreateTagsInput) (*CreateTagsOutput, error) { const opCreateVolume = "CreateVolume" -// CreateVolumeRequest generates a request for the CreateVolume operation. +// CreateVolumeRequest generates a "aws/request.Request" representing the +// client's request for the CreateVolume operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateVolume method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateVolumeRequest method. +// req, resp := client.CreateVolumeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CreateVolumeRequest(input *CreateVolumeInput) (req *request.Request, output *Volume) { op := &request.Operation{ Name: opCreateVolume, @@ -1748,7 +2717,28 @@ func (c *EC2) CreateVolume(input *CreateVolumeInput) (*Volume, error) { const opCreateVpc = "CreateVpc" -// CreateVpcRequest generates a request for the CreateVpc operation. +// CreateVpcRequest generates a "aws/request.Request" representing the +// client's request for the CreateVpc operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateVpc method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateVpcRequest method. +// req, resp := client.CreateVpcRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CreateVpcRequest(input *CreateVpcInput) (req *request.Request, output *CreateVpcOutput) { op := &request.Operation{ Name: opCreateVpc, @@ -1790,7 +2780,28 @@ func (c *EC2) CreateVpc(input *CreateVpcInput) (*CreateVpcOutput, error) { const opCreateVpcEndpoint = "CreateVpcEndpoint" -// CreateVpcEndpointRequest generates a request for the CreateVpcEndpoint operation. +// CreateVpcEndpointRequest generates a "aws/request.Request" representing the +// client's request for the CreateVpcEndpoint operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateVpcEndpoint method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateVpcEndpointRequest method. +// req, resp := client.CreateVpcEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CreateVpcEndpointRequest(input *CreateVpcEndpointInput) (req *request.Request, output *CreateVpcEndpointOutput) { op := &request.Operation{ Name: opCreateVpcEndpoint, @@ -1823,7 +2834,28 @@ func (c *EC2) CreateVpcEndpoint(input *CreateVpcEndpointInput) (*CreateVpcEndpoi const opCreateVpcPeeringConnection = "CreateVpcPeeringConnection" -// CreateVpcPeeringConnectionRequest generates a request for the CreateVpcPeeringConnection operation. +// CreateVpcPeeringConnectionRequest generates a "aws/request.Request" representing the +// client's request for the CreateVpcPeeringConnection operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateVpcPeeringConnection method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateVpcPeeringConnectionRequest method. +// req, resp := client.CreateVpcPeeringConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CreateVpcPeeringConnectionRequest(input *CreateVpcPeeringConnectionInput) (req *request.Request, output *CreateVpcPeeringConnectionOutput) { op := &request.Operation{ Name: opCreateVpcPeeringConnection, @@ -1860,7 +2892,28 @@ func (c *EC2) CreateVpcPeeringConnection(input *CreateVpcPeeringConnectionInput) const opCreateVpnConnection = "CreateVpnConnection" -// CreateVpnConnectionRequest generates a request for the CreateVpnConnection operation. +// CreateVpnConnectionRequest generates a "aws/request.Request" representing the +// client's request for the CreateVpnConnection operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateVpnConnection method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateVpnConnectionRequest method. +// req, resp := client.CreateVpnConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CreateVpnConnectionRequest(input *CreateVpnConnectionInput) (req *request.Request, output *CreateVpnConnectionOutput) { op := &request.Operation{ Name: opCreateVpnConnection, @@ -1906,7 +2959,28 @@ func (c *EC2) CreateVpnConnection(input *CreateVpnConnectionInput) (*CreateVpnCo const opCreateVpnConnectionRoute = "CreateVpnConnectionRoute" -// CreateVpnConnectionRouteRequest generates a request for the CreateVpnConnectionRoute operation. +// CreateVpnConnectionRouteRequest generates a "aws/request.Request" representing the +// client's request for the CreateVpnConnectionRoute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateVpnConnectionRoute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateVpnConnectionRouteRequest method. +// req, resp := client.CreateVpnConnectionRouteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CreateVpnConnectionRouteRequest(input *CreateVpnConnectionRouteInput) (req *request.Request, output *CreateVpnConnectionRouteOutput) { op := &request.Operation{ Name: opCreateVpnConnectionRoute, @@ -1942,7 +3016,28 @@ func (c *EC2) CreateVpnConnectionRoute(input *CreateVpnConnectionRouteInput) (*C const opCreateVpnGateway = "CreateVpnGateway" -// CreateVpnGatewayRequest generates a request for the CreateVpnGateway operation. +// CreateVpnGatewayRequest generates a "aws/request.Request" representing the +// client's request for the CreateVpnGateway operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateVpnGateway method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateVpnGatewayRequest method. +// req, resp := client.CreateVpnGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) CreateVpnGatewayRequest(input *CreateVpnGatewayInput) (req *request.Request, output *CreateVpnGatewayOutput) { op := &request.Operation{ Name: opCreateVpnGateway, @@ -1975,7 +3070,28 @@ func (c *EC2) CreateVpnGateway(input *CreateVpnGatewayInput) (*CreateVpnGatewayO const opDeleteCustomerGateway = "DeleteCustomerGateway" -// DeleteCustomerGatewayRequest generates a request for the DeleteCustomerGateway operation. +// DeleteCustomerGatewayRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCustomerGateway operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteCustomerGateway method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteCustomerGatewayRequest method. +// req, resp := client.DeleteCustomerGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DeleteCustomerGatewayRequest(input *DeleteCustomerGatewayInput) (req *request.Request, output *DeleteCustomerGatewayOutput) { op := &request.Operation{ Name: opDeleteCustomerGateway, @@ -2005,7 +3121,28 @@ func (c *EC2) DeleteCustomerGateway(input *DeleteCustomerGatewayInput) (*DeleteC const opDeleteDhcpOptions = "DeleteDhcpOptions" -// DeleteDhcpOptionsRequest generates a request for the DeleteDhcpOptions operation. +// DeleteDhcpOptionsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDhcpOptions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDhcpOptions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDhcpOptionsRequest method. +// req, resp := client.DeleteDhcpOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DeleteDhcpOptionsRequest(input *DeleteDhcpOptionsInput) (req *request.Request, output *DeleteDhcpOptionsOutput) { op := &request.Operation{ Name: opDeleteDhcpOptions, @@ -2037,7 +3174,28 @@ func (c *EC2) DeleteDhcpOptions(input *DeleteDhcpOptionsInput) (*DeleteDhcpOptio const opDeleteFlowLogs = "DeleteFlowLogs" -// DeleteFlowLogsRequest generates a request for the DeleteFlowLogs operation. +// DeleteFlowLogsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteFlowLogs operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteFlowLogs method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteFlowLogsRequest method. +// req, resp := client.DeleteFlowLogsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DeleteFlowLogsRequest(input *DeleteFlowLogsInput) (req *request.Request, output *DeleteFlowLogsOutput) { op := &request.Operation{ Name: opDeleteFlowLogs, @@ -2064,7 +3222,28 @@ func (c *EC2) DeleteFlowLogs(input *DeleteFlowLogsInput) (*DeleteFlowLogsOutput, const opDeleteInternetGateway = "DeleteInternetGateway" -// DeleteInternetGatewayRequest generates a request for the DeleteInternetGateway operation. +// DeleteInternetGatewayRequest generates a "aws/request.Request" representing the +// client's request for the DeleteInternetGateway operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteInternetGateway method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteInternetGatewayRequest method. +// req, resp := client.DeleteInternetGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DeleteInternetGatewayRequest(input *DeleteInternetGatewayInput) (req *request.Request, output *DeleteInternetGatewayOutput) { op := &request.Operation{ Name: opDeleteInternetGateway, @@ -2094,7 +3273,28 @@ func (c *EC2) DeleteInternetGateway(input *DeleteInternetGatewayInput) (*DeleteI const opDeleteKeyPair = "DeleteKeyPair" -// DeleteKeyPairRequest generates a request for the DeleteKeyPair operation. +// DeleteKeyPairRequest generates a "aws/request.Request" representing the +// client's request for the DeleteKeyPair operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteKeyPair method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteKeyPairRequest method. +// req, resp := client.DeleteKeyPairRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DeleteKeyPairRequest(input *DeleteKeyPairInput) (req *request.Request, output *DeleteKeyPairOutput) { op := &request.Operation{ Name: opDeleteKeyPair, @@ -2123,7 +3323,28 @@ func (c *EC2) DeleteKeyPair(input *DeleteKeyPairInput) (*DeleteKeyPairOutput, er const opDeleteNatGateway = "DeleteNatGateway" -// DeleteNatGatewayRequest generates a request for the DeleteNatGateway operation. +// DeleteNatGatewayRequest generates a "aws/request.Request" representing the +// client's request for the DeleteNatGateway operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteNatGateway method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteNatGatewayRequest method. +// req, resp := client.DeleteNatGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DeleteNatGatewayRequest(input *DeleteNatGatewayInput) (req *request.Request, output *DeleteNatGatewayOutput) { op := &request.Operation{ Name: opDeleteNatGateway, @@ -2152,7 +3373,28 @@ func (c *EC2) DeleteNatGateway(input *DeleteNatGatewayInput) (*DeleteNatGatewayO const opDeleteNetworkAcl = "DeleteNetworkAcl" -// DeleteNetworkAclRequest generates a request for the DeleteNetworkAcl operation. +// DeleteNetworkAclRequest generates a "aws/request.Request" representing the +// client's request for the DeleteNetworkAcl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteNetworkAcl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteNetworkAclRequest method. +// req, resp := client.DeleteNetworkAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DeleteNetworkAclRequest(input *DeleteNetworkAclInput) (req *request.Request, output *DeleteNetworkAclOutput) { op := &request.Operation{ Name: opDeleteNetworkAcl, @@ -2182,7 +3424,28 @@ func (c *EC2) DeleteNetworkAcl(input *DeleteNetworkAclInput) (*DeleteNetworkAclO const opDeleteNetworkAclEntry = "DeleteNetworkAclEntry" -// DeleteNetworkAclEntryRequest generates a request for the DeleteNetworkAclEntry operation. +// DeleteNetworkAclEntryRequest generates a "aws/request.Request" representing the +// client's request for the DeleteNetworkAclEntry operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteNetworkAclEntry method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteNetworkAclEntryRequest method. +// req, resp := client.DeleteNetworkAclEntryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DeleteNetworkAclEntryRequest(input *DeleteNetworkAclEntryInput) (req *request.Request, output *DeleteNetworkAclEntryOutput) { op := &request.Operation{ Name: opDeleteNetworkAclEntry, @@ -2212,7 +3475,28 @@ func (c *EC2) DeleteNetworkAclEntry(input *DeleteNetworkAclEntryInput) (*DeleteN const opDeleteNetworkInterface = "DeleteNetworkInterface" -// DeleteNetworkInterfaceRequest generates a request for the DeleteNetworkInterface operation. +// DeleteNetworkInterfaceRequest generates a "aws/request.Request" representing the +// client's request for the DeleteNetworkInterface operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteNetworkInterface method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteNetworkInterfaceRequest method. +// req, resp := client.DeleteNetworkInterfaceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DeleteNetworkInterfaceRequest(input *DeleteNetworkInterfaceInput) (req *request.Request, output *DeleteNetworkInterfaceOutput) { op := &request.Operation{ Name: opDeleteNetworkInterface, @@ -2242,7 +3526,28 @@ func (c *EC2) DeleteNetworkInterface(input *DeleteNetworkInterfaceInput) (*Delet const opDeletePlacementGroup = "DeletePlacementGroup" -// DeletePlacementGroupRequest generates a request for the DeletePlacementGroup operation. +// DeletePlacementGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeletePlacementGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeletePlacementGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeletePlacementGroupRequest method. +// req, resp := client.DeletePlacementGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DeletePlacementGroupRequest(input *DeletePlacementGroupInput) (req *request.Request, output *DeletePlacementGroupOutput) { op := &request.Operation{ Name: opDeletePlacementGroup, @@ -2274,7 +3579,28 @@ func (c *EC2) DeletePlacementGroup(input *DeletePlacementGroupInput) (*DeletePla const opDeleteRoute = "DeleteRoute" -// DeleteRouteRequest generates a request for the DeleteRoute operation. +// DeleteRouteRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRoute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteRoute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteRouteRequest method. +// req, resp := client.DeleteRouteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DeleteRouteRequest(input *DeleteRouteInput) (req *request.Request, output *DeleteRouteOutput) { op := &request.Operation{ Name: opDeleteRoute, @@ -2303,7 +3629,28 @@ func (c *EC2) DeleteRoute(input *DeleteRouteInput) (*DeleteRouteOutput, error) { const opDeleteRouteTable = "DeleteRouteTable" -// DeleteRouteTableRequest generates a request for the DeleteRouteTable operation. +// DeleteRouteTableRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRouteTable operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteRouteTable method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteRouteTableRequest method. +// req, resp := client.DeleteRouteTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DeleteRouteTableRequest(input *DeleteRouteTableInput) (req *request.Request, output *DeleteRouteTableOutput) { op := &request.Operation{ Name: opDeleteRouteTable, @@ -2334,7 +3681,28 @@ func (c *EC2) DeleteRouteTable(input *DeleteRouteTableInput) (*DeleteRouteTableO const opDeleteSecurityGroup = "DeleteSecurityGroup" -// DeleteSecurityGroupRequest generates a request for the DeleteSecurityGroup operation. +// DeleteSecurityGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSecurityGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteSecurityGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteSecurityGroupRequest method. +// req, resp := client.DeleteSecurityGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DeleteSecurityGroupRequest(input *DeleteSecurityGroupInput) (req *request.Request, output *DeleteSecurityGroupOutput) { op := &request.Operation{ Name: opDeleteSecurityGroup, @@ -2367,7 +3735,28 @@ func (c *EC2) DeleteSecurityGroup(input *DeleteSecurityGroupInput) (*DeleteSecur const opDeleteSnapshot = "DeleteSnapshot" -// DeleteSnapshotRequest generates a request for the DeleteSnapshot operation. +// DeleteSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteSnapshotRequest method. +// req, resp := client.DeleteSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DeleteSnapshotRequest(input *DeleteSnapshotInput) (req *request.Request, output *DeleteSnapshotOutput) { op := &request.Operation{ Name: opDeleteSnapshot, @@ -2410,7 +3799,28 @@ func (c *EC2) DeleteSnapshot(input *DeleteSnapshotInput) (*DeleteSnapshotOutput, const opDeleteSpotDatafeedSubscription = "DeleteSpotDatafeedSubscription" -// DeleteSpotDatafeedSubscriptionRequest generates a request for the DeleteSpotDatafeedSubscription operation. +// DeleteSpotDatafeedSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSpotDatafeedSubscription operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteSpotDatafeedSubscription method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteSpotDatafeedSubscriptionRequest method. +// req, resp := client.DeleteSpotDatafeedSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DeleteSpotDatafeedSubscriptionRequest(input *DeleteSpotDatafeedSubscriptionInput) (req *request.Request, output *DeleteSpotDatafeedSubscriptionOutput) { op := &request.Operation{ Name: opDeleteSpotDatafeedSubscription, @@ -2439,7 +3849,28 @@ func (c *EC2) DeleteSpotDatafeedSubscription(input *DeleteSpotDatafeedSubscripti const opDeleteSubnet = "DeleteSubnet" -// DeleteSubnetRequest generates a request for the DeleteSubnet operation. +// DeleteSubnetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSubnet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteSubnet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteSubnetRequest method. +// req, resp := client.DeleteSubnetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DeleteSubnetRequest(input *DeleteSubnetInput) (req *request.Request, output *DeleteSubnetOutput) { op := &request.Operation{ Name: opDeleteSubnet, @@ -2469,7 +3900,28 @@ func (c *EC2) DeleteSubnet(input *DeleteSubnetInput) (*DeleteSubnetOutput, error const opDeleteTags = "DeleteTags" -// DeleteTagsRequest generates a request for the DeleteTags operation. +// DeleteTagsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteTagsRequest method. +// req, resp := client.DeleteTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Request, output *DeleteTagsOutput) { op := &request.Operation{ Name: opDeleteTags, @@ -2502,7 +3954,28 @@ func (c *EC2) DeleteTags(input *DeleteTagsInput) (*DeleteTagsOutput, error) { const opDeleteVolume = "DeleteVolume" -// DeleteVolumeRequest generates a request for the DeleteVolume operation. +// DeleteVolumeRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVolume operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteVolume method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteVolumeRequest method. +// req, resp := client.DeleteVolumeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DeleteVolumeRequest(input *DeleteVolumeInput) (req *request.Request, output *DeleteVolumeOutput) { op := &request.Operation{ Name: opDeleteVolume, @@ -2537,7 +4010,28 @@ func (c *EC2) DeleteVolume(input *DeleteVolumeInput) (*DeleteVolumeOutput, error const opDeleteVpc = "DeleteVpc" -// DeleteVpcRequest generates a request for the DeleteVpc operation. +// DeleteVpcRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVpc operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteVpc method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteVpcRequest method. +// req, resp := client.DeleteVpcRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DeleteVpcRequest(input *DeleteVpcInput) (req *request.Request, output *DeleteVpcOutput) { op := &request.Operation{ Name: opDeleteVpc, @@ -2570,7 +4064,28 @@ func (c *EC2) DeleteVpc(input *DeleteVpcInput) (*DeleteVpcOutput, error) { const opDeleteVpcEndpoints = "DeleteVpcEndpoints" -// DeleteVpcEndpointsRequest generates a request for the DeleteVpcEndpoints operation. +// DeleteVpcEndpointsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVpcEndpoints operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteVpcEndpoints method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteVpcEndpointsRequest method. +// req, resp := client.DeleteVpcEndpointsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DeleteVpcEndpointsRequest(input *DeleteVpcEndpointsInput) (req *request.Request, output *DeleteVpcEndpointsOutput) { op := &request.Operation{ Name: opDeleteVpcEndpoints, @@ -2598,7 +4113,28 @@ func (c *EC2) DeleteVpcEndpoints(input *DeleteVpcEndpointsInput) (*DeleteVpcEndp const opDeleteVpcPeeringConnection = "DeleteVpcPeeringConnection" -// DeleteVpcPeeringConnectionRequest generates a request for the DeleteVpcPeeringConnection operation. +// DeleteVpcPeeringConnectionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVpcPeeringConnection operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteVpcPeeringConnection method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteVpcPeeringConnectionRequest method. +// req, resp := client.DeleteVpcPeeringConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DeleteVpcPeeringConnectionRequest(input *DeleteVpcPeeringConnectionInput) (req *request.Request, output *DeleteVpcPeeringConnectionOutput) { op := &request.Operation{ Name: opDeleteVpcPeeringConnection, @@ -2628,7 +4164,28 @@ func (c *EC2) DeleteVpcPeeringConnection(input *DeleteVpcPeeringConnectionInput) const opDeleteVpnConnection = "DeleteVpnConnection" -// DeleteVpnConnectionRequest generates a request for the DeleteVpnConnection operation. +// DeleteVpnConnectionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVpnConnection operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteVpnConnection method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteVpnConnectionRequest method. +// req, resp := client.DeleteVpnConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DeleteVpnConnectionRequest(input *DeleteVpnConnectionInput) (req *request.Request, output *DeleteVpnConnectionOutput) { op := &request.Operation{ Name: opDeleteVpnConnection, @@ -2666,7 +4223,28 @@ func (c *EC2) DeleteVpnConnection(input *DeleteVpnConnectionInput) (*DeleteVpnCo const opDeleteVpnConnectionRoute = "DeleteVpnConnectionRoute" -// DeleteVpnConnectionRouteRequest generates a request for the DeleteVpnConnectionRoute operation. +// DeleteVpnConnectionRouteRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVpnConnectionRoute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteVpnConnectionRoute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteVpnConnectionRouteRequest method. +// req, resp := client.DeleteVpnConnectionRouteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DeleteVpnConnectionRouteRequest(input *DeleteVpnConnectionRouteInput) (req *request.Request, output *DeleteVpnConnectionRouteOutput) { op := &request.Operation{ Name: opDeleteVpnConnectionRoute, @@ -2698,7 +4276,28 @@ func (c *EC2) DeleteVpnConnectionRoute(input *DeleteVpnConnectionRouteInput) (*D const opDeleteVpnGateway = "DeleteVpnGateway" -// DeleteVpnGatewayRequest generates a request for the DeleteVpnGateway operation. +// DeleteVpnGatewayRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVpnGateway operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteVpnGateway method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteVpnGatewayRequest method. +// req, resp := client.DeleteVpnGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DeleteVpnGatewayRequest(input *DeleteVpnGatewayInput) (req *request.Request, output *DeleteVpnGatewayOutput) { op := &request.Operation{ Name: opDeleteVpnGateway, @@ -2731,7 +4330,28 @@ func (c *EC2) DeleteVpnGateway(input *DeleteVpnGatewayInput) (*DeleteVpnGatewayO const opDeregisterImage = "DeregisterImage" -// DeregisterImageRequest generates a request for the DeregisterImage operation. +// DeregisterImageRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterImage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeregisterImage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeregisterImageRequest method. +// req, resp := client.DeregisterImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DeregisterImageRequest(input *DeregisterImageInput) (req *request.Request, output *DeregisterImageOutput) { op := &request.Operation{ Name: opDeregisterImage, @@ -2763,7 +4383,28 @@ func (c *EC2) DeregisterImage(input *DeregisterImageInput) (*DeregisterImageOutp const opDescribeAccountAttributes = "DescribeAccountAttributes" -// DescribeAccountAttributesRequest generates a request for the DescribeAccountAttributes operation. +// DescribeAccountAttributesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAccountAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAccountAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAccountAttributesRequest method. +// req, resp := client.DescribeAccountAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeAccountAttributesRequest(input *DescribeAccountAttributesInput) (req *request.Request, output *DescribeAccountAttributesOutput) { op := &request.Operation{ Name: opDescribeAccountAttributes, @@ -2784,22 +4425,22 @@ func (c *EC2) DescribeAccountAttributesRequest(input *DescribeAccountAttributesI // Describes attributes of your AWS account. The following are the supported // account attributes: // -// supported-platforms: Indicates whether your account can launch instances +// supported-platforms: Indicates whether your account can launch instances // into EC2-Classic and EC2-VPC, or only into EC2-VPC. // -// default-vpc: The ID of the default VPC for your account, or none. +// default-vpc: The ID of the default VPC for your account, or none. // -// max-instances: The maximum number of On-Demand instances that you can +// max-instances: The maximum number of On-Demand instances that you can // run. // -// vpc-max-security-groups-per-interface: The maximum number of security +// vpc-max-security-groups-per-interface: The maximum number of security // groups that you can assign to a network interface. // -// max-elastic-ips: The maximum number of Elastic IP addresses that you can -// allocate for use with EC2-Classic. +// max-elastic-ips: The maximum number of Elastic IP addresses that you +// can allocate for use with EC2-Classic. // -// vpc-max-elastic-ips: The maximum number of Elastic IP addresses that you -// can allocate for use with EC2-VPC. +// vpc-max-elastic-ips: The maximum number of Elastic IP addresses that +// you can allocate for use with EC2-VPC. func (c *EC2) DescribeAccountAttributes(input *DescribeAccountAttributesInput) (*DescribeAccountAttributesOutput, error) { req, out := c.DescribeAccountAttributesRequest(input) err := req.Send() @@ -2808,7 +4449,28 @@ func (c *EC2) DescribeAccountAttributes(input *DescribeAccountAttributesInput) ( const opDescribeAddresses = "DescribeAddresses" -// DescribeAddressesRequest generates a request for the DescribeAddresses operation. +// DescribeAddressesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAddresses operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAddresses method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAddressesRequest method. +// req, resp := client.DescribeAddressesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeAddressesRequest(input *DescribeAddressesInput) (req *request.Request, output *DescribeAddressesOutput) { op := &request.Operation{ Name: opDescribeAddresses, @@ -2839,7 +4501,28 @@ func (c *EC2) DescribeAddresses(input *DescribeAddressesInput) (*DescribeAddress const opDescribeAvailabilityZones = "DescribeAvailabilityZones" -// DescribeAvailabilityZonesRequest generates a request for the DescribeAvailabilityZones operation. +// DescribeAvailabilityZonesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAvailabilityZones operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAvailabilityZones method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAvailabilityZonesRequest method. +// req, resp := client.DescribeAvailabilityZonesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeAvailabilityZonesRequest(input *DescribeAvailabilityZonesInput) (req *request.Request, output *DescribeAvailabilityZonesOutput) { op := &request.Operation{ Name: opDescribeAvailabilityZones, @@ -2872,7 +4555,28 @@ func (c *EC2) DescribeAvailabilityZones(input *DescribeAvailabilityZonesInput) ( const opDescribeBundleTasks = "DescribeBundleTasks" -// DescribeBundleTasksRequest generates a request for the DescribeBundleTasks operation. +// DescribeBundleTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeBundleTasks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeBundleTasks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeBundleTasksRequest method. +// req, resp := client.DescribeBundleTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeBundleTasksRequest(input *DescribeBundleTasksInput) (req *request.Request, output *DescribeBundleTasksOutput) { op := &request.Operation{ Name: opDescribeBundleTasks, @@ -2892,7 +4596,7 @@ func (c *EC2) DescribeBundleTasksRequest(input *DescribeBundleTasksInput) (req * // Describes one or more of your bundling tasks. // -// Completed bundle tasks are listed for only a limited time. If your bundle +// Completed bundle tasks are listed for only a limited time. If your bundle // task is no longer in the list, you can still register an AMI from it. Just // use RegisterImage with the Amazon S3 bucket name and image manifest name // you provided to the bundle task. @@ -2904,7 +4608,28 @@ func (c *EC2) DescribeBundleTasks(input *DescribeBundleTasksInput) (*DescribeBun const opDescribeClassicLinkInstances = "DescribeClassicLinkInstances" -// DescribeClassicLinkInstancesRequest generates a request for the DescribeClassicLinkInstances operation. +// DescribeClassicLinkInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeClassicLinkInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeClassicLinkInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeClassicLinkInstancesRequest method. +// req, resp := client.DescribeClassicLinkInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeClassicLinkInstancesRequest(input *DescribeClassicLinkInstancesInput) (req *request.Request, output *DescribeClassicLinkInstancesOutput) { op := &request.Operation{ Name: opDescribeClassicLinkInstances, @@ -2934,7 +4659,28 @@ func (c *EC2) DescribeClassicLinkInstances(input *DescribeClassicLinkInstancesIn const opDescribeConversionTasks = "DescribeConversionTasks" -// DescribeConversionTasksRequest generates a request for the DescribeConversionTasks operation. +// DescribeConversionTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeConversionTasks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeConversionTasks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeConversionTasksRequest method. +// req, resp := client.DescribeConversionTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeConversionTasksRequest(input *DescribeConversionTasksInput) (req *request.Request, output *DescribeConversionTasksOutput) { op := &request.Operation{ Name: opDescribeConversionTasks, @@ -2967,7 +4713,28 @@ func (c *EC2) DescribeConversionTasks(input *DescribeConversionTasksInput) (*Des const opDescribeCustomerGateways = "DescribeCustomerGateways" -// DescribeCustomerGatewaysRequest generates a request for the DescribeCustomerGateways operation. +// DescribeCustomerGatewaysRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCustomerGateways operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeCustomerGateways method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeCustomerGatewaysRequest method. +// req, resp := client.DescribeCustomerGatewaysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeCustomerGatewaysRequest(input *DescribeCustomerGatewaysInput) (req *request.Request, output *DescribeCustomerGatewaysOutput) { op := &request.Operation{ Name: opDescribeCustomerGateways, @@ -2998,7 +4765,28 @@ func (c *EC2) DescribeCustomerGateways(input *DescribeCustomerGatewaysInput) (*D const opDescribeDhcpOptions = "DescribeDhcpOptions" -// DescribeDhcpOptionsRequest generates a request for the DescribeDhcpOptions operation. +// DescribeDhcpOptionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDhcpOptions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDhcpOptions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDhcpOptionsRequest method. +// req, resp := client.DescribeDhcpOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeDhcpOptionsRequest(input *DescribeDhcpOptionsInput) (req *request.Request, output *DescribeDhcpOptionsOutput) { op := &request.Operation{ Name: opDescribeDhcpOptions, @@ -3028,7 +4816,28 @@ func (c *EC2) DescribeDhcpOptions(input *DescribeDhcpOptionsInput) (*DescribeDhc const opDescribeExportTasks = "DescribeExportTasks" -// DescribeExportTasksRequest generates a request for the DescribeExportTasks operation. +// DescribeExportTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeExportTasks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeExportTasks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeExportTasksRequest method. +// req, resp := client.DescribeExportTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeExportTasksRequest(input *DescribeExportTasksInput) (req *request.Request, output *DescribeExportTasksOutput) { op := &request.Operation{ Name: opDescribeExportTasks, @@ -3055,7 +4864,28 @@ func (c *EC2) DescribeExportTasks(input *DescribeExportTasksInput) (*DescribeExp const opDescribeFlowLogs = "DescribeFlowLogs" -// DescribeFlowLogsRequest generates a request for the DescribeFlowLogs operation. +// DescribeFlowLogsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFlowLogs operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeFlowLogs method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeFlowLogsRequest method. +// req, resp := client.DescribeFlowLogsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeFlowLogsRequest(input *DescribeFlowLogsInput) (req *request.Request, output *DescribeFlowLogsOutput) { op := &request.Operation{ Name: opDescribeFlowLogs, @@ -3084,7 +4914,28 @@ func (c *EC2) DescribeFlowLogs(input *DescribeFlowLogsInput) (*DescribeFlowLogsO const opDescribeHosts = "DescribeHosts" -// DescribeHostsRequest generates a request for the DescribeHosts operation. +// DescribeHostsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeHosts operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeHosts method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeHostsRequest method. +// req, resp := client.DescribeHostsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeHostsRequest(input *DescribeHostsInput) (req *request.Request, output *DescribeHostsOutput) { op := &request.Operation{ Name: opDescribeHosts, @@ -3115,7 +4966,28 @@ func (c *EC2) DescribeHosts(input *DescribeHostsInput) (*DescribeHostsOutput, er const opDescribeIdFormat = "DescribeIdFormat" -// DescribeIdFormatRequest generates a request for the DescribeIdFormat operation. +// DescribeIdFormatRequest generates a "aws/request.Request" representing the +// client's request for the DescribeIdFormat operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeIdFormat method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeIdFormatRequest method. +// req, resp := client.DescribeIdFormatRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeIdFormatRequest(input *DescribeIdFormatInput) (req *request.Request, output *DescribeIdFormatOutput) { op := &request.Operation{ Name: opDescribeIdFormat, @@ -3154,9 +5026,89 @@ func (c *EC2) DescribeIdFormat(input *DescribeIdFormatInput) (*DescribeIdFormatO return out, err } +const opDescribeIdentityIdFormat = "DescribeIdentityIdFormat" + +// DescribeIdentityIdFormatRequest generates a "aws/request.Request" representing the +// client's request for the DescribeIdentityIdFormat operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeIdentityIdFormat method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeIdentityIdFormatRequest method. +// req, resp := client.DescribeIdentityIdFormatRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeIdentityIdFormatRequest(input *DescribeIdentityIdFormatInput) (req *request.Request, output *DescribeIdentityIdFormatOutput) { + op := &request.Operation{ + Name: opDescribeIdentityIdFormat, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeIdentityIdFormatInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeIdentityIdFormatOutput{} + req.Data = output + return +} + +// Describes the ID format settings for resources for the specified IAM user, +// IAM role, or root user. For example, you can view the resource types that +// are enabled for longer IDs. This request only returns information about resource +// types whose ID formats can be modified; it does not return information about +// other resource types. For more information, see Resource IDs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/resource-ids.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// The following resource types support longer IDs: instance | reservation +// | snapshot | volume. +// +// These settings apply to the principal specified in the request. They do +// not apply to the principal that makes the request. +func (c *EC2) DescribeIdentityIdFormat(input *DescribeIdentityIdFormatInput) (*DescribeIdentityIdFormatOutput, error) { + req, out := c.DescribeIdentityIdFormatRequest(input) + err := req.Send() + return out, err +} + const opDescribeImageAttribute = "DescribeImageAttribute" -// DescribeImageAttributeRequest generates a request for the DescribeImageAttribute operation. +// DescribeImageAttributeRequest generates a "aws/request.Request" representing the +// client's request for the DescribeImageAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeImageAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeImageAttributeRequest method. +// req, resp := client.DescribeImageAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeImageAttributeRequest(input *DescribeImageAttributeInput) (req *request.Request, output *DescribeImageAttributeOutput) { op := &request.Operation{ Name: opDescribeImageAttribute, @@ -3184,7 +5136,28 @@ func (c *EC2) DescribeImageAttribute(input *DescribeImageAttributeInput) (*Descr const opDescribeImages = "DescribeImages" -// DescribeImagesRequest generates a request for the DescribeImages operation. +// DescribeImagesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeImages operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeImages method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeImagesRequest method. +// req, resp := client.DescribeImagesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeImagesRequest(input *DescribeImagesInput) (req *request.Request, output *DescribeImagesOutput) { op := &request.Operation{ Name: opDescribeImages, @@ -3207,7 +5180,7 @@ func (c *EC2) DescribeImagesRequest(input *DescribeImagesInput) (req *request.Re // and private images owned by other AWS accounts but for which you have explicit // launch permissions. // -// Deregistered images are included in the returned results for an unspecified +// Deregistered images are included in the returned results for an unspecified // interval after deregistration. func (c *EC2) DescribeImages(input *DescribeImagesInput) (*DescribeImagesOutput, error) { req, out := c.DescribeImagesRequest(input) @@ -3217,7 +5190,28 @@ func (c *EC2) DescribeImages(input *DescribeImagesInput) (*DescribeImagesOutput, const opDescribeImportImageTasks = "DescribeImportImageTasks" -// DescribeImportImageTasksRequest generates a request for the DescribeImportImageTasks operation. +// DescribeImportImageTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeImportImageTasks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeImportImageTasks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeImportImageTasksRequest method. +// req, resp := client.DescribeImportImageTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeImportImageTasksRequest(input *DescribeImportImageTasksInput) (req *request.Request, output *DescribeImportImageTasksOutput) { op := &request.Operation{ Name: opDescribeImportImageTasks, @@ -3245,7 +5239,28 @@ func (c *EC2) DescribeImportImageTasks(input *DescribeImportImageTasksInput) (*D const opDescribeImportSnapshotTasks = "DescribeImportSnapshotTasks" -// DescribeImportSnapshotTasksRequest generates a request for the DescribeImportSnapshotTasks operation. +// DescribeImportSnapshotTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeImportSnapshotTasks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeImportSnapshotTasks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeImportSnapshotTasksRequest method. +// req, resp := client.DescribeImportSnapshotTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeImportSnapshotTasksRequest(input *DescribeImportSnapshotTasksInput) (req *request.Request, output *DescribeImportSnapshotTasksOutput) { op := &request.Operation{ Name: opDescribeImportSnapshotTasks, @@ -3272,7 +5287,28 @@ func (c *EC2) DescribeImportSnapshotTasks(input *DescribeImportSnapshotTasksInpu const opDescribeInstanceAttribute = "DescribeInstanceAttribute" -// DescribeInstanceAttributeRequest generates a request for the DescribeInstanceAttribute operation. +// DescribeInstanceAttributeRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstanceAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeInstanceAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeInstanceAttributeRequest method. +// req, resp := client.DescribeInstanceAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeInstanceAttributeRequest(input *DescribeInstanceAttributeInput) (req *request.Request, output *DescribeInstanceAttributeOutput) { op := &request.Operation{ Name: opDescribeInstanceAttribute, @@ -3303,7 +5339,28 @@ func (c *EC2) DescribeInstanceAttribute(input *DescribeInstanceAttributeInput) ( const opDescribeInstanceStatus = "DescribeInstanceStatus" -// DescribeInstanceStatusRequest generates a request for the DescribeInstanceStatus operation. +// DescribeInstanceStatusRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstanceStatus operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeInstanceStatus method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeInstanceStatusRequest method. +// req, resp := client.DescribeInstanceStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeInstanceStatusRequest(input *DescribeInstanceStatusInput) (req *request.Request, output *DescribeInstanceStatusOutput) { op := &request.Operation{ Name: opDescribeInstanceStatus, @@ -3332,19 +5389,19 @@ func (c *EC2) DescribeInstanceStatusRequest(input *DescribeInstanceStatusInput) // // Instance status includes the following components: // -// Status checks - Amazon EC2 performs status checks on running EC2 instances +// Status checks - Amazon EC2 performs status checks on running EC2 instances // to identify hardware and software issues. For more information, see Status // Checks for Your Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-system-instance-status-check.html) // and Troubleshooting Instances with Failed Status Checks (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstances.html) // in the Amazon Elastic Compute Cloud User Guide. // -// Scheduled events - Amazon EC2 can schedule events (such as reboot, stop, +// Scheduled events - Amazon EC2 can schedule events (such as reboot, stop, // or terminate) for your instances related to hardware issues, software updates, // or system maintenance. For more information, see Scheduled Events for Your // Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-instances-status-check_sched.html) // in the Amazon Elastic Compute Cloud User Guide. // -// Instance state - You can manage your instances from the moment you launch +// Instance state - You can manage your instances from the moment you launch // them through their termination. For more information, see Instance Lifecycle // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) // in the Amazon Elastic Compute Cloud User Guide. @@ -3354,6 +5411,23 @@ func (c *EC2) DescribeInstanceStatus(input *DescribeInstanceStatusInput) (*Descr return out, err } +// DescribeInstanceStatusPages iterates over the pages of a DescribeInstanceStatus operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeInstanceStatus method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeInstanceStatus operation. +// pageNum := 0 +// err := client.DescribeInstanceStatusPages(params, +// func(page *DescribeInstanceStatusOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *EC2) DescribeInstanceStatusPages(input *DescribeInstanceStatusInput, fn func(p *DescribeInstanceStatusOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeInstanceStatusRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -3364,7 +5438,28 @@ func (c *EC2) DescribeInstanceStatusPages(input *DescribeInstanceStatusInput, fn const opDescribeInstances = "DescribeInstances" -// DescribeInstancesRequest generates a request for the DescribeInstances operation. +// DescribeInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeInstancesRequest method. +// req, resp := client.DescribeInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeInstancesRequest(input *DescribeInstancesInput) (req *request.Request, output *DescribeInstancesOutput) { op := &request.Operation{ Name: opDescribeInstances, @@ -3398,12 +5493,35 @@ func (c *EC2) DescribeInstancesRequest(input *DescribeInstancesInput) (req *requ // // Recently terminated instances might appear in the returned results. This // interval is usually less than one hour. +// +// If you describe instances in the rare case where an Availability Zone is +// experiencing a service disruption and you specify instance IDs that are in +// the affected zone, or do not specify any instance IDs at all, the call fails. +// If you describe instances and specify only instance IDs that are in an unaffected +// zone, the call works normally. func (c *EC2) DescribeInstances(input *DescribeInstancesInput) (*DescribeInstancesOutput, error) { req, out := c.DescribeInstancesRequest(input) err := req.Send() return out, err } +// DescribeInstancesPages iterates over the pages of a DescribeInstances operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeInstances method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeInstances operation. +// pageNum := 0 +// err := client.DescribeInstancesPages(params, +// func(page *DescribeInstancesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *EC2) DescribeInstancesPages(input *DescribeInstancesInput, fn func(p *DescribeInstancesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeInstancesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -3414,7 +5532,28 @@ func (c *EC2) DescribeInstancesPages(input *DescribeInstancesInput, fn func(p *D const opDescribeInternetGateways = "DescribeInternetGateways" -// DescribeInternetGatewaysRequest generates a request for the DescribeInternetGateways operation. +// DescribeInternetGatewaysRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInternetGateways operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeInternetGateways method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeInternetGatewaysRequest method. +// req, resp := client.DescribeInternetGatewaysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeInternetGatewaysRequest(input *DescribeInternetGatewaysInput) (req *request.Request, output *DescribeInternetGatewaysOutput) { op := &request.Operation{ Name: opDescribeInternetGateways, @@ -3441,7 +5580,28 @@ func (c *EC2) DescribeInternetGateways(input *DescribeInternetGatewaysInput) (*D const opDescribeKeyPairs = "DescribeKeyPairs" -// DescribeKeyPairsRequest generates a request for the DescribeKeyPairs operation. +// DescribeKeyPairsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeKeyPairs operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeKeyPairs method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeKeyPairsRequest method. +// req, resp := client.DescribeKeyPairsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeKeyPairsRequest(input *DescribeKeyPairsInput) (req *request.Request, output *DescribeKeyPairsOutput) { op := &request.Operation{ Name: opDescribeKeyPairs, @@ -3471,7 +5631,28 @@ func (c *EC2) DescribeKeyPairs(input *DescribeKeyPairsInput) (*DescribeKeyPairsO const opDescribeMovingAddresses = "DescribeMovingAddresses" -// DescribeMovingAddressesRequest generates a request for the DescribeMovingAddresses operation. +// DescribeMovingAddressesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMovingAddresses operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeMovingAddresses method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeMovingAddressesRequest method. +// req, resp := client.DescribeMovingAddressesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeMovingAddressesRequest(input *DescribeMovingAddressesInput) (req *request.Request, output *DescribeMovingAddressesOutput) { op := &request.Operation{ Name: opDescribeMovingAddresses, @@ -3500,7 +5681,28 @@ func (c *EC2) DescribeMovingAddresses(input *DescribeMovingAddressesInput) (*Des const opDescribeNatGateways = "DescribeNatGateways" -// DescribeNatGatewaysRequest generates a request for the DescribeNatGateways operation. +// DescribeNatGatewaysRequest generates a "aws/request.Request" representing the +// client's request for the DescribeNatGateways operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeNatGateways method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeNatGatewaysRequest method. +// req, resp := client.DescribeNatGatewaysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeNatGatewaysRequest(input *DescribeNatGatewaysInput) (req *request.Request, output *DescribeNatGatewaysOutput) { op := &request.Operation{ Name: opDescribeNatGateways, @@ -3527,7 +5729,28 @@ func (c *EC2) DescribeNatGateways(input *DescribeNatGatewaysInput) (*DescribeNat const opDescribeNetworkAcls = "DescribeNetworkAcls" -// DescribeNetworkAclsRequest generates a request for the DescribeNetworkAcls operation. +// DescribeNetworkAclsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeNetworkAcls operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeNetworkAcls method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeNetworkAclsRequest method. +// req, resp := client.DescribeNetworkAclsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeNetworkAclsRequest(input *DescribeNetworkAclsInput) (req *request.Request, output *DescribeNetworkAclsOutput) { op := &request.Operation{ Name: opDescribeNetworkAcls, @@ -3557,7 +5780,28 @@ func (c *EC2) DescribeNetworkAcls(input *DescribeNetworkAclsInput) (*DescribeNet const opDescribeNetworkInterfaceAttribute = "DescribeNetworkInterfaceAttribute" -// DescribeNetworkInterfaceAttributeRequest generates a request for the DescribeNetworkInterfaceAttribute operation. +// DescribeNetworkInterfaceAttributeRequest generates a "aws/request.Request" representing the +// client's request for the DescribeNetworkInterfaceAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeNetworkInterfaceAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeNetworkInterfaceAttributeRequest method. +// req, resp := client.DescribeNetworkInterfaceAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeNetworkInterfaceAttributeRequest(input *DescribeNetworkInterfaceAttributeInput) (req *request.Request, output *DescribeNetworkInterfaceAttributeOutput) { op := &request.Operation{ Name: opDescribeNetworkInterfaceAttribute, @@ -3585,7 +5829,28 @@ func (c *EC2) DescribeNetworkInterfaceAttribute(input *DescribeNetworkInterfaceA const opDescribeNetworkInterfaces = "DescribeNetworkInterfaces" -// DescribeNetworkInterfacesRequest generates a request for the DescribeNetworkInterfaces operation. +// DescribeNetworkInterfacesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeNetworkInterfaces operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeNetworkInterfaces method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeNetworkInterfacesRequest method. +// req, resp := client.DescribeNetworkInterfacesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeNetworkInterfacesRequest(input *DescribeNetworkInterfacesInput) (req *request.Request, output *DescribeNetworkInterfacesOutput) { op := &request.Operation{ Name: opDescribeNetworkInterfaces, @@ -3612,7 +5877,28 @@ func (c *EC2) DescribeNetworkInterfaces(input *DescribeNetworkInterfacesInput) ( const opDescribePlacementGroups = "DescribePlacementGroups" -// DescribePlacementGroupsRequest generates a request for the DescribePlacementGroups operation. +// DescribePlacementGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribePlacementGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribePlacementGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribePlacementGroupsRequest method. +// req, resp := client.DescribePlacementGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribePlacementGroupsRequest(input *DescribePlacementGroupsInput) (req *request.Request, output *DescribePlacementGroupsOutput) { op := &request.Operation{ Name: opDescribePlacementGroups, @@ -3641,7 +5927,28 @@ func (c *EC2) DescribePlacementGroups(input *DescribePlacementGroupsInput) (*Des const opDescribePrefixLists = "DescribePrefixLists" -// DescribePrefixListsRequest generates a request for the DescribePrefixLists operation. +// DescribePrefixListsRequest generates a "aws/request.Request" representing the +// client's request for the DescribePrefixLists operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribePrefixLists method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribePrefixListsRequest method. +// req, resp := client.DescribePrefixListsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribePrefixListsRequest(input *DescribePrefixListsInput) (req *request.Request, output *DescribePrefixListsOutput) { op := &request.Operation{ Name: opDescribePrefixLists, @@ -3672,7 +5979,28 @@ func (c *EC2) DescribePrefixLists(input *DescribePrefixListsInput) (*DescribePre const opDescribeRegions = "DescribeRegions" -// DescribeRegionsRequest generates a request for the DescribeRegions operation. +// DescribeRegionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeRegions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeRegions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeRegionsRequest method. +// req, resp := client.DescribeRegionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeRegionsRequest(input *DescribeRegionsInput) (req *request.Request, output *DescribeRegionsOutput) { op := &request.Operation{ Name: opDescribeRegions, @@ -3702,7 +6030,28 @@ func (c *EC2) DescribeRegions(input *DescribeRegionsInput) (*DescribeRegionsOutp const opDescribeReservedInstances = "DescribeReservedInstances" -// DescribeReservedInstancesRequest generates a request for the DescribeReservedInstances operation. +// DescribeReservedInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReservedInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeReservedInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeReservedInstancesRequest method. +// req, resp := client.DescribeReservedInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeReservedInstancesRequest(input *DescribeReservedInstancesInput) (req *request.Request, output *DescribeReservedInstancesOutput) { op := &request.Operation{ Name: opDescribeReservedInstances, @@ -3732,7 +6081,28 @@ func (c *EC2) DescribeReservedInstances(input *DescribeReservedInstancesInput) ( const opDescribeReservedInstancesListings = "DescribeReservedInstancesListings" -// DescribeReservedInstancesListingsRequest generates a request for the DescribeReservedInstancesListings operation. +// DescribeReservedInstancesListingsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReservedInstancesListings operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeReservedInstancesListings method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeReservedInstancesListingsRequest method. +// req, resp := client.DescribeReservedInstancesListingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeReservedInstancesListingsRequest(input *DescribeReservedInstancesListingsInput) (req *request.Request, output *DescribeReservedInstancesListingsOutput) { op := &request.Operation{ Name: opDescribeReservedInstancesListings, @@ -3780,7 +6150,28 @@ func (c *EC2) DescribeReservedInstancesListings(input *DescribeReservedInstances const opDescribeReservedInstancesModifications = "DescribeReservedInstancesModifications" -// DescribeReservedInstancesModificationsRequest generates a request for the DescribeReservedInstancesModifications operation. +// DescribeReservedInstancesModificationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReservedInstancesModifications operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeReservedInstancesModifications method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeReservedInstancesModificationsRequest method. +// req, resp := client.DescribeReservedInstancesModificationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeReservedInstancesModificationsRequest(input *DescribeReservedInstancesModificationsInput) (req *request.Request, output *DescribeReservedInstancesModificationsOutput) { op := &request.Operation{ Name: opDescribeReservedInstancesModifications, @@ -3817,6 +6208,23 @@ func (c *EC2) DescribeReservedInstancesModifications(input *DescribeReservedInst return out, err } +// DescribeReservedInstancesModificationsPages iterates over the pages of a DescribeReservedInstancesModifications operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeReservedInstancesModifications method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeReservedInstancesModifications operation. +// pageNum := 0 +// err := client.DescribeReservedInstancesModificationsPages(params, +// func(page *DescribeReservedInstancesModificationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *EC2) DescribeReservedInstancesModificationsPages(input *DescribeReservedInstancesModificationsInput, fn func(p *DescribeReservedInstancesModificationsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeReservedInstancesModificationsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -3827,7 +6235,28 @@ func (c *EC2) DescribeReservedInstancesModificationsPages(input *DescribeReserve const opDescribeReservedInstancesOfferings = "DescribeReservedInstancesOfferings" -// DescribeReservedInstancesOfferingsRequest generates a request for the DescribeReservedInstancesOfferings operation. +// DescribeReservedInstancesOfferingsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReservedInstancesOfferings operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeReservedInstancesOfferings method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeReservedInstancesOfferingsRequest method. +// req, resp := client.DescribeReservedInstancesOfferingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeReservedInstancesOfferingsRequest(input *DescribeReservedInstancesOfferingsInput) (req *request.Request, output *DescribeReservedInstancesOfferingsOutput) { op := &request.Operation{ Name: opDescribeReservedInstancesOfferings, @@ -3869,6 +6298,23 @@ func (c *EC2) DescribeReservedInstancesOfferings(input *DescribeReservedInstance return out, err } +// DescribeReservedInstancesOfferingsPages iterates over the pages of a DescribeReservedInstancesOfferings operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeReservedInstancesOfferings method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeReservedInstancesOfferings operation. +// pageNum := 0 +// err := client.DescribeReservedInstancesOfferingsPages(params, +// func(page *DescribeReservedInstancesOfferingsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *EC2) DescribeReservedInstancesOfferingsPages(input *DescribeReservedInstancesOfferingsInput, fn func(p *DescribeReservedInstancesOfferingsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeReservedInstancesOfferingsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -3879,7 +6325,28 @@ func (c *EC2) DescribeReservedInstancesOfferingsPages(input *DescribeReservedIns const opDescribeRouteTables = "DescribeRouteTables" -// DescribeRouteTablesRequest generates a request for the DescribeRouteTables operation. +// DescribeRouteTablesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeRouteTables operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeRouteTables method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeRouteTablesRequest method. +// req, resp := client.DescribeRouteTablesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeRouteTablesRequest(input *DescribeRouteTablesInput) (req *request.Request, output *DescribeRouteTablesOutput) { op := &request.Operation{ Name: opDescribeRouteTables, @@ -3914,7 +6381,28 @@ func (c *EC2) DescribeRouteTables(input *DescribeRouteTablesInput) (*DescribeRou const opDescribeScheduledInstanceAvailability = "DescribeScheduledInstanceAvailability" -// DescribeScheduledInstanceAvailabilityRequest generates a request for the DescribeScheduledInstanceAvailability operation. +// DescribeScheduledInstanceAvailabilityRequest generates a "aws/request.Request" representing the +// client's request for the DescribeScheduledInstanceAvailability operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeScheduledInstanceAvailability method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeScheduledInstanceAvailabilityRequest method. +// req, resp := client.DescribeScheduledInstanceAvailabilityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeScheduledInstanceAvailabilityRequest(input *DescribeScheduledInstanceAvailabilityInput) (req *request.Request, output *DescribeScheduledInstanceAvailabilityOutput) { op := &request.Operation{ Name: opDescribeScheduledInstanceAvailability, @@ -3949,7 +6437,28 @@ func (c *EC2) DescribeScheduledInstanceAvailability(input *DescribeScheduledInst const opDescribeScheduledInstances = "DescribeScheduledInstances" -// DescribeScheduledInstancesRequest generates a request for the DescribeScheduledInstances operation. +// DescribeScheduledInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeScheduledInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeScheduledInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeScheduledInstancesRequest method. +// req, resp := client.DescribeScheduledInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeScheduledInstancesRequest(input *DescribeScheduledInstancesInput) (req *request.Request, output *DescribeScheduledInstancesOutput) { op := &request.Operation{ Name: opDescribeScheduledInstances, @@ -3974,9 +6483,79 @@ func (c *EC2) DescribeScheduledInstances(input *DescribeScheduledInstancesInput) return out, err } +const opDescribeSecurityGroupReferences = "DescribeSecurityGroupReferences" + +// DescribeSecurityGroupReferencesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSecurityGroupReferences operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSecurityGroupReferences method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSecurityGroupReferencesRequest method. +// req, resp := client.DescribeSecurityGroupReferencesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeSecurityGroupReferencesRequest(input *DescribeSecurityGroupReferencesInput) (req *request.Request, output *DescribeSecurityGroupReferencesOutput) { + op := &request.Operation{ + Name: opDescribeSecurityGroupReferences, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeSecurityGroupReferencesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeSecurityGroupReferencesOutput{} + req.Data = output + return +} + +// [EC2-VPC only] Describes the VPCs on the other side of a VPC peering connection +// that are referencing the security groups you've specified in this request. +func (c *EC2) DescribeSecurityGroupReferences(input *DescribeSecurityGroupReferencesInput) (*DescribeSecurityGroupReferencesOutput, error) { + req, out := c.DescribeSecurityGroupReferencesRequest(input) + err := req.Send() + return out, err +} + const opDescribeSecurityGroups = "DescribeSecurityGroups" -// DescribeSecurityGroupsRequest generates a request for the DescribeSecurityGroups operation. +// DescribeSecurityGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSecurityGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSecurityGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSecurityGroupsRequest method. +// req, resp := client.DescribeSecurityGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeSecurityGroupsRequest(input *DescribeSecurityGroupsInput) (req *request.Request, output *DescribeSecurityGroupsOutput) { op := &request.Operation{ Name: opDescribeSecurityGroups, @@ -4010,7 +6589,28 @@ func (c *EC2) DescribeSecurityGroups(input *DescribeSecurityGroupsInput) (*Descr const opDescribeSnapshotAttribute = "DescribeSnapshotAttribute" -// DescribeSnapshotAttributeRequest generates a request for the DescribeSnapshotAttribute operation. +// DescribeSnapshotAttributeRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSnapshotAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSnapshotAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSnapshotAttributeRequest method. +// req, resp := client.DescribeSnapshotAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeSnapshotAttributeRequest(input *DescribeSnapshotAttributeInput) (req *request.Request, output *DescribeSnapshotAttributeOutput) { op := &request.Operation{ Name: opDescribeSnapshotAttribute, @@ -4031,8 +6631,8 @@ func (c *EC2) DescribeSnapshotAttributeRequest(input *DescribeSnapshotAttributeI // Describes the specified attribute of the specified snapshot. You can specify // only one attribute at a time. // -// For more information about EBS snapshots, see Amazon EBS Snapshots in the -// Amazon Elastic Compute Cloud User Guide. +// For more information about EBS snapshots, see Amazon EBS Snapshots (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSSnapshots.html) +// in the Amazon Elastic Compute Cloud User Guide. func (c *EC2) DescribeSnapshotAttribute(input *DescribeSnapshotAttributeInput) (*DescribeSnapshotAttributeOutput, error) { req, out := c.DescribeSnapshotAttributeRequest(input) err := req.Send() @@ -4041,7 +6641,28 @@ func (c *EC2) DescribeSnapshotAttribute(input *DescribeSnapshotAttributeInput) ( const opDescribeSnapshots = "DescribeSnapshots" -// DescribeSnapshotsRequest generates a request for the DescribeSnapshots operation. +// DescribeSnapshotsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSnapshots operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSnapshots method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSnapshotsRequest method. +// req, resp := client.DescribeSnapshotsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *request.Request, output *DescribeSnapshotsOutput) { op := &request.Operation{ Name: opDescribeSnapshots, @@ -4072,14 +6693,14 @@ func (c *EC2) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *requ // // The create volume permissions fall into the following categories: // -// public: The owner of the snapshot granted create volume permissions for +// public: The owner of the snapshot granted create volume permissions for // the snapshot to the all group. All AWS accounts have create volume permissions // for these snapshots. // -// explicit: The owner of the snapshot granted create volume permissions +// explicit: The owner of the snapshot granted create volume permissions // to a specific AWS account. // -// implicit: An AWS account has implicit create volume permissions for all +// implicit: An AWS account has implicit create volume permissions for all // snapshots it owns. // // The list of snapshots returned can be modified by specifying snapshot @@ -4092,10 +6713,10 @@ func (c *EC2) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *requ // If you specify a snapshot ID for which you do not have access, it is not // included in the returned results. // -// If you specify one or more snapshot owners, only snapshots from the specified -// owners and for which you have access are returned. The results can include -// the AWS account IDs of the specified owners, amazon for snapshots owned by -// Amazon, or self for snapshots that you own. +// If you specify one or more snapshot owners using the OwnerIds option, only +// snapshots from the specified owners and for which you have access are returned. +// The results can include the AWS account IDs of the specified owners, amazon +// for snapshots owned by Amazon, or self for snapshots that you own. // // If you specify a list of restorable users, only snapshots with create snapshot // permissions for those users are returned. You can specify AWS account IDs @@ -4109,14 +6730,31 @@ func (c *EC2) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *requ // a NextToken value that can be passed to a subsequent DescribeSnapshots request // to retrieve the remaining results. // -// For more information about EBS snapshots, see Amazon EBS Snapshots in the -// Amazon Elastic Compute Cloud User Guide. +// For more information about EBS snapshots, see Amazon EBS Snapshots (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSSnapshots.html) +// in the Amazon Elastic Compute Cloud User Guide. func (c *EC2) DescribeSnapshots(input *DescribeSnapshotsInput) (*DescribeSnapshotsOutput, error) { req, out := c.DescribeSnapshotsRequest(input) err := req.Send() return out, err } +// DescribeSnapshotsPages iterates over the pages of a DescribeSnapshots operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeSnapshots method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeSnapshots operation. +// pageNum := 0 +// err := client.DescribeSnapshotsPages(params, +// func(page *DescribeSnapshotsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *EC2) DescribeSnapshotsPages(input *DescribeSnapshotsInput, fn func(p *DescribeSnapshotsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeSnapshotsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -4127,7 +6765,28 @@ func (c *EC2) DescribeSnapshotsPages(input *DescribeSnapshotsInput, fn func(p *D const opDescribeSpotDatafeedSubscription = "DescribeSpotDatafeedSubscription" -// DescribeSpotDatafeedSubscriptionRequest generates a request for the DescribeSpotDatafeedSubscription operation. +// DescribeSpotDatafeedSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSpotDatafeedSubscription operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSpotDatafeedSubscription method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSpotDatafeedSubscriptionRequest method. +// req, resp := client.DescribeSpotDatafeedSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeSpotDatafeedSubscriptionRequest(input *DescribeSpotDatafeedSubscriptionInput) (req *request.Request, output *DescribeSpotDatafeedSubscriptionOutput) { op := &request.Operation{ Name: opDescribeSpotDatafeedSubscription, @@ -4156,7 +6815,28 @@ func (c *EC2) DescribeSpotDatafeedSubscription(input *DescribeSpotDatafeedSubscr const opDescribeSpotFleetInstances = "DescribeSpotFleetInstances" -// DescribeSpotFleetInstancesRequest generates a request for the DescribeSpotFleetInstances operation. +// DescribeSpotFleetInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSpotFleetInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSpotFleetInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSpotFleetInstancesRequest method. +// req, resp := client.DescribeSpotFleetInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeSpotFleetInstancesRequest(input *DescribeSpotFleetInstancesInput) (req *request.Request, output *DescribeSpotFleetInstancesOutput) { op := &request.Operation{ Name: opDescribeSpotFleetInstances, @@ -4183,7 +6863,28 @@ func (c *EC2) DescribeSpotFleetInstances(input *DescribeSpotFleetInstancesInput) const opDescribeSpotFleetRequestHistory = "DescribeSpotFleetRequestHistory" -// DescribeSpotFleetRequestHistoryRequest generates a request for the DescribeSpotFleetRequestHistory operation. +// DescribeSpotFleetRequestHistoryRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSpotFleetRequestHistory operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSpotFleetRequestHistory method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSpotFleetRequestHistoryRequest method. +// req, resp := client.DescribeSpotFleetRequestHistoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeSpotFleetRequestHistoryRequest(input *DescribeSpotFleetRequestHistoryInput) (req *request.Request, output *DescribeSpotFleetRequestHistoryOutput) { op := &request.Operation{ Name: opDescribeSpotFleetRequestHistory, @@ -4215,12 +6916,39 @@ func (c *EC2) DescribeSpotFleetRequestHistory(input *DescribeSpotFleetRequestHis const opDescribeSpotFleetRequests = "DescribeSpotFleetRequests" -// DescribeSpotFleetRequestsRequest generates a request for the DescribeSpotFleetRequests operation. +// DescribeSpotFleetRequestsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSpotFleetRequests operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSpotFleetRequests method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSpotFleetRequestsRequest method. +// req, resp := client.DescribeSpotFleetRequestsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeSpotFleetRequestsRequest(input *DescribeSpotFleetRequestsInput) (req *request.Request, output *DescribeSpotFleetRequestsOutput) { op := &request.Operation{ Name: opDescribeSpotFleetRequests, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { @@ -4240,9 +6968,55 @@ func (c *EC2) DescribeSpotFleetRequests(input *DescribeSpotFleetRequestsInput) ( return out, err } +// DescribeSpotFleetRequestsPages iterates over the pages of a DescribeSpotFleetRequests operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeSpotFleetRequests method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeSpotFleetRequests operation. +// pageNum := 0 +// err := client.DescribeSpotFleetRequestsPages(params, +// func(page *DescribeSpotFleetRequestsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *EC2) DescribeSpotFleetRequestsPages(input *DescribeSpotFleetRequestsInput, fn func(p *DescribeSpotFleetRequestsOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.DescribeSpotFleetRequestsRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*DescribeSpotFleetRequestsOutput), lastPage) + }) +} + const opDescribeSpotInstanceRequests = "DescribeSpotInstanceRequests" -// DescribeSpotInstanceRequestsRequest generates a request for the DescribeSpotInstanceRequests operation. +// DescribeSpotInstanceRequestsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSpotInstanceRequests operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSpotInstanceRequests method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSpotInstanceRequestsRequest method. +// req, resp := client.DescribeSpotInstanceRequestsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeSpotInstanceRequestsRequest(input *DescribeSpotInstanceRequestsInput) (req *request.Request, output *DescribeSpotInstanceRequestsOutput) { op := &request.Operation{ Name: opDescribeSpotInstanceRequests, @@ -4280,7 +7054,28 @@ func (c *EC2) DescribeSpotInstanceRequests(input *DescribeSpotInstanceRequestsIn const opDescribeSpotPriceHistory = "DescribeSpotPriceHistory" -// DescribeSpotPriceHistoryRequest generates a request for the DescribeSpotPriceHistory operation. +// DescribeSpotPriceHistoryRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSpotPriceHistory operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSpotPriceHistory method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSpotPriceHistoryRequest method. +// req, resp := client.DescribeSpotPriceHistoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeSpotPriceHistoryRequest(input *DescribeSpotPriceHistoryInput) (req *request.Request, output *DescribeSpotPriceHistoryOutput) { op := &request.Operation{ Name: opDescribeSpotPriceHistory, @@ -4319,6 +7114,23 @@ func (c *EC2) DescribeSpotPriceHistory(input *DescribeSpotPriceHistoryInput) (*D return out, err } +// DescribeSpotPriceHistoryPages iterates over the pages of a DescribeSpotPriceHistory operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeSpotPriceHistory method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeSpotPriceHistory operation. +// pageNum := 0 +// err := client.DescribeSpotPriceHistoryPages(params, +// func(page *DescribeSpotPriceHistoryOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *EC2) DescribeSpotPriceHistoryPages(input *DescribeSpotPriceHistoryInput, fn func(p *DescribeSpotPriceHistoryOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeSpotPriceHistoryRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -4327,9 +7139,81 @@ func (c *EC2) DescribeSpotPriceHistoryPages(input *DescribeSpotPriceHistoryInput }) } +const opDescribeStaleSecurityGroups = "DescribeStaleSecurityGroups" + +// DescribeStaleSecurityGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeStaleSecurityGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeStaleSecurityGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeStaleSecurityGroupsRequest method. +// req, resp := client.DescribeStaleSecurityGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) DescribeStaleSecurityGroupsRequest(input *DescribeStaleSecurityGroupsInput) (req *request.Request, output *DescribeStaleSecurityGroupsOutput) { + op := &request.Operation{ + Name: opDescribeStaleSecurityGroups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeStaleSecurityGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeStaleSecurityGroupsOutput{} + req.Data = output + return +} + +// [EC2-VPC only] Describes the stale security group rules for security groups +// in a specified VPC. Rules are stale when they reference a deleted security +// group in a peer VPC, or a security group in a peer VPC for which the VPC +// peering connection has been deleted. +func (c *EC2) DescribeStaleSecurityGroups(input *DescribeStaleSecurityGroupsInput) (*DescribeStaleSecurityGroupsOutput, error) { + req, out := c.DescribeStaleSecurityGroupsRequest(input) + err := req.Send() + return out, err +} + const opDescribeSubnets = "DescribeSubnets" -// DescribeSubnetsRequest generates a request for the DescribeSubnets operation. +// DescribeSubnetsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSubnets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSubnets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSubnetsRequest method. +// req, resp := client.DescribeSubnetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeSubnetsRequest(input *DescribeSubnetsInput) (req *request.Request, output *DescribeSubnetsOutput) { op := &request.Operation{ Name: opDescribeSubnets, @@ -4359,7 +7243,28 @@ func (c *EC2) DescribeSubnets(input *DescribeSubnetsInput) (*DescribeSubnetsOutp const opDescribeTags = "DescribeTags" -// DescribeTagsRequest generates a request for the DescribeTags operation. +// DescribeTagsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTagsRequest method. +// req, resp := client.DescribeTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Request, output *DescribeTagsOutput) { op := &request.Operation{ Name: opDescribeTags, @@ -4393,6 +7298,23 @@ func (c *EC2) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error return out, err } +// DescribeTagsPages iterates over the pages of a DescribeTags operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeTags method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeTags operation. +// pageNum := 0 +// err := client.DescribeTagsPages(params, +// func(page *DescribeTagsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *EC2) DescribeTagsPages(input *DescribeTagsInput, fn func(p *DescribeTagsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeTagsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -4403,7 +7325,28 @@ func (c *EC2) DescribeTagsPages(input *DescribeTagsInput, fn func(p *DescribeTag const opDescribeVolumeAttribute = "DescribeVolumeAttribute" -// DescribeVolumeAttributeRequest generates a request for the DescribeVolumeAttribute operation. +// DescribeVolumeAttributeRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVolumeAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeVolumeAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeVolumeAttributeRequest method. +// req, resp := client.DescribeVolumeAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeVolumeAttributeRequest(input *DescribeVolumeAttributeInput) (req *request.Request, output *DescribeVolumeAttributeOutput) { op := &request.Operation{ Name: opDescribeVolumeAttribute, @@ -4424,8 +7367,8 @@ func (c *EC2) DescribeVolumeAttributeRequest(input *DescribeVolumeAttributeInput // Describes the specified attribute of the specified volume. You can specify // only one attribute at a time. // -// For more information about EBS volumes, see Amazon EBS Volumes in the Amazon -// Elastic Compute Cloud User Guide. +// For more information about EBS volumes, see Amazon EBS Volumes (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumes.html) +// in the Amazon Elastic Compute Cloud User Guide. func (c *EC2) DescribeVolumeAttribute(input *DescribeVolumeAttributeInput) (*DescribeVolumeAttributeOutput, error) { req, out := c.DescribeVolumeAttributeRequest(input) err := req.Send() @@ -4434,7 +7377,28 @@ func (c *EC2) DescribeVolumeAttribute(input *DescribeVolumeAttributeInput) (*Des const opDescribeVolumeStatus = "DescribeVolumeStatus" -// DescribeVolumeStatusRequest generates a request for the DescribeVolumeStatus operation. +// DescribeVolumeStatusRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVolumeStatus operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeVolumeStatus method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeVolumeStatusRequest method. +// req, resp := client.DescribeVolumeStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeVolumeStatusRequest(input *DescribeVolumeStatusInput) (req *request.Request, output *DescribeVolumeStatusOutput) { op := &request.Operation{ Name: opDescribeVolumeStatus, @@ -4470,21 +7434,21 @@ func (c *EC2) DescribeVolumeStatusRequest(input *DescribeVolumeStatusInput) (req // The DescribeVolumeStatus operation provides the following information about // the specified volumes: // -// Status: Reflects the current status of the volume. The possible values are -// ok, impaired , warning, or insufficient-data. If all checks pass, the overall -// status of the volume is ok. If the check fails, the overall status is impaired. -// If the status is insufficient-data, then the checks may still be taking place -// on your volume at the time. We recommend that you retry the request. For -// more information on volume status, see Monitoring the Status of Your Volumes -// (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-volume-status.html). +// Status: Reflects the current status of the volume. The possible values +// are ok, impaired , warning, or insufficient-data. If all checks pass, the +// overall status of the volume is ok. If the check fails, the overall status +// is impaired. If the status is insufficient-data, then the checks may still +// be taking place on your volume at the time. We recommend that you retry the +// request. For more information on volume status, see Monitoring the Status +// of Your Volumes (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/monitoring-volume-status.html). // -// Events: Reflect the cause of a volume status and may require you to take +// Events: Reflect the cause of a volume status and may require you to take // action. For example, if your volume returns an impaired status, then the // volume event might be potential-data-inconsistency. This means that your // volume has been affected by an issue with the underlying host, has all I/O // operations disabled, and may have inconsistent data. // -// Actions: Reflect the actions you may have to take in response to an event. +// Actions: Reflect the actions you may have to take in response to an event. // For example, if the status of the volume is impaired and the volume event // shows potential-data-inconsistency, then the action shows enable-volume-io. // This means that you may want to enable the I/O operations for the volume @@ -4499,6 +7463,23 @@ func (c *EC2) DescribeVolumeStatus(input *DescribeVolumeStatusInput) (*DescribeV return out, err } +// DescribeVolumeStatusPages iterates over the pages of a DescribeVolumeStatus operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeVolumeStatus method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeVolumeStatus operation. +// pageNum := 0 +// err := client.DescribeVolumeStatusPages(params, +// func(page *DescribeVolumeStatusOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *EC2) DescribeVolumeStatusPages(input *DescribeVolumeStatusInput, fn func(p *DescribeVolumeStatusOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeVolumeStatusRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -4509,7 +7490,28 @@ func (c *EC2) DescribeVolumeStatusPages(input *DescribeVolumeStatusInput, fn fun const opDescribeVolumes = "DescribeVolumes" -// DescribeVolumesRequest generates a request for the DescribeVolumes operation. +// DescribeVolumesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVolumes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeVolumes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeVolumesRequest method. +// req, resp := client.DescribeVolumesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeVolumesRequest(input *DescribeVolumesInput) (req *request.Request, output *DescribeVolumesOutput) { op := &request.Operation{ Name: opDescribeVolumes, @@ -4542,14 +7544,31 @@ func (c *EC2) DescribeVolumesRequest(input *DescribeVolumesInput) (req *request. // a NextToken value that can be passed to a subsequent DescribeVolumes request // to retrieve the remaining results. // -// For more information about EBS volumes, see Amazon EBS Volumes in the Amazon -// Elastic Compute Cloud User Guide. +// For more information about EBS volumes, see Amazon EBS Volumes (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumes.html) +// in the Amazon Elastic Compute Cloud User Guide. func (c *EC2) DescribeVolumes(input *DescribeVolumesInput) (*DescribeVolumesOutput, error) { req, out := c.DescribeVolumesRequest(input) err := req.Send() return out, err } +// DescribeVolumesPages iterates over the pages of a DescribeVolumes operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeVolumes method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeVolumes operation. +// pageNum := 0 +// err := client.DescribeVolumesPages(params, +// func(page *DescribeVolumesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *EC2) DescribeVolumesPages(input *DescribeVolumesInput, fn func(p *DescribeVolumesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeVolumesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -4560,7 +7579,28 @@ func (c *EC2) DescribeVolumesPages(input *DescribeVolumesInput, fn func(p *Descr const opDescribeVpcAttribute = "DescribeVpcAttribute" -// DescribeVpcAttributeRequest generates a request for the DescribeVpcAttribute operation. +// DescribeVpcAttributeRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpcAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeVpcAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeVpcAttributeRequest method. +// req, resp := client.DescribeVpcAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeVpcAttributeRequest(input *DescribeVpcAttributeInput) (req *request.Request, output *DescribeVpcAttributeOutput) { op := &request.Operation{ Name: opDescribeVpcAttribute, @@ -4588,7 +7628,28 @@ func (c *EC2) DescribeVpcAttribute(input *DescribeVpcAttributeInput) (*DescribeV const opDescribeVpcClassicLink = "DescribeVpcClassicLink" -// DescribeVpcClassicLinkRequest generates a request for the DescribeVpcClassicLink operation. +// DescribeVpcClassicLinkRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpcClassicLink operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeVpcClassicLink method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeVpcClassicLinkRequest method. +// req, resp := client.DescribeVpcClassicLinkRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeVpcClassicLinkRequest(input *DescribeVpcClassicLinkInput) (req *request.Request, output *DescribeVpcClassicLinkOutput) { op := &request.Operation{ Name: opDescribeVpcClassicLink, @@ -4615,7 +7676,28 @@ func (c *EC2) DescribeVpcClassicLink(input *DescribeVpcClassicLinkInput) (*Descr const opDescribeVpcClassicLinkDnsSupport = "DescribeVpcClassicLinkDnsSupport" -// DescribeVpcClassicLinkDnsSupportRequest generates a request for the DescribeVpcClassicLinkDnsSupport operation. +// DescribeVpcClassicLinkDnsSupportRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpcClassicLinkDnsSupport operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeVpcClassicLinkDnsSupport method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeVpcClassicLinkDnsSupportRequest method. +// req, resp := client.DescribeVpcClassicLinkDnsSupportRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeVpcClassicLinkDnsSupportRequest(input *DescribeVpcClassicLinkDnsSupportInput) (req *request.Request, output *DescribeVpcClassicLinkDnsSupportOutput) { op := &request.Operation{ Name: opDescribeVpcClassicLinkDnsSupport, @@ -4648,7 +7730,28 @@ func (c *EC2) DescribeVpcClassicLinkDnsSupport(input *DescribeVpcClassicLinkDnsS const opDescribeVpcEndpointServices = "DescribeVpcEndpointServices" -// DescribeVpcEndpointServicesRequest generates a request for the DescribeVpcEndpointServices operation. +// DescribeVpcEndpointServicesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpcEndpointServices operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeVpcEndpointServices method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeVpcEndpointServicesRequest method. +// req, resp := client.DescribeVpcEndpointServicesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeVpcEndpointServicesRequest(input *DescribeVpcEndpointServicesInput) (req *request.Request, output *DescribeVpcEndpointServicesOutput) { op := &request.Operation{ Name: opDescribeVpcEndpointServices, @@ -4676,7 +7779,28 @@ func (c *EC2) DescribeVpcEndpointServices(input *DescribeVpcEndpointServicesInpu const opDescribeVpcEndpoints = "DescribeVpcEndpoints" -// DescribeVpcEndpointsRequest generates a request for the DescribeVpcEndpoints operation. +// DescribeVpcEndpointsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpcEndpoints operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeVpcEndpoints method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeVpcEndpointsRequest method. +// req, resp := client.DescribeVpcEndpointsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeVpcEndpointsRequest(input *DescribeVpcEndpointsInput) (req *request.Request, output *DescribeVpcEndpointsOutput) { op := &request.Operation{ Name: opDescribeVpcEndpoints, @@ -4703,7 +7827,28 @@ func (c *EC2) DescribeVpcEndpoints(input *DescribeVpcEndpointsInput) (*DescribeV const opDescribeVpcPeeringConnections = "DescribeVpcPeeringConnections" -// DescribeVpcPeeringConnectionsRequest generates a request for the DescribeVpcPeeringConnections operation. +// DescribeVpcPeeringConnectionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpcPeeringConnections operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeVpcPeeringConnections method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeVpcPeeringConnectionsRequest method. +// req, resp := client.DescribeVpcPeeringConnectionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeVpcPeeringConnectionsRequest(input *DescribeVpcPeeringConnectionsInput) (req *request.Request, output *DescribeVpcPeeringConnectionsOutput) { op := &request.Operation{ Name: opDescribeVpcPeeringConnections, @@ -4730,7 +7875,28 @@ func (c *EC2) DescribeVpcPeeringConnections(input *DescribeVpcPeeringConnections const opDescribeVpcs = "DescribeVpcs" -// DescribeVpcsRequest generates a request for the DescribeVpcs operation. +// DescribeVpcsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpcs operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeVpcs method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeVpcsRequest method. +// req, resp := client.DescribeVpcsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeVpcsRequest(input *DescribeVpcsInput) (req *request.Request, output *DescribeVpcsOutput) { op := &request.Operation{ Name: opDescribeVpcs, @@ -4757,7 +7923,28 @@ func (c *EC2) DescribeVpcs(input *DescribeVpcsInput) (*DescribeVpcsOutput, error const opDescribeVpnConnections = "DescribeVpnConnections" -// DescribeVpnConnectionsRequest generates a request for the DescribeVpnConnections operation. +// DescribeVpnConnectionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpnConnections operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeVpnConnections method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeVpnConnectionsRequest method. +// req, resp := client.DescribeVpnConnectionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeVpnConnectionsRequest(input *DescribeVpnConnectionsInput) (req *request.Request, output *DescribeVpnConnectionsOutput) { op := &request.Operation{ Name: opDescribeVpnConnections, @@ -4788,7 +7975,28 @@ func (c *EC2) DescribeVpnConnections(input *DescribeVpnConnectionsInput) (*Descr const opDescribeVpnGateways = "DescribeVpnGateways" -// DescribeVpnGatewaysRequest generates a request for the DescribeVpnGateways operation. +// DescribeVpnGatewaysRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVpnGateways operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeVpnGateways method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeVpnGatewaysRequest method. +// req, resp := client.DescribeVpnGatewaysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DescribeVpnGatewaysRequest(input *DescribeVpnGatewaysInput) (req *request.Request, output *DescribeVpnGatewaysOutput) { op := &request.Operation{ Name: opDescribeVpnGateways, @@ -4819,7 +8027,28 @@ func (c *EC2) DescribeVpnGateways(input *DescribeVpnGatewaysInput) (*DescribeVpn const opDetachClassicLinkVpc = "DetachClassicLinkVpc" -// DetachClassicLinkVpcRequest generates a request for the DetachClassicLinkVpc operation. +// DetachClassicLinkVpcRequest generates a "aws/request.Request" representing the +// client's request for the DetachClassicLinkVpc operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DetachClassicLinkVpc method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DetachClassicLinkVpcRequest method. +// req, resp := client.DetachClassicLinkVpcRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DetachClassicLinkVpcRequest(input *DetachClassicLinkVpcInput) (req *request.Request, output *DetachClassicLinkVpcOutput) { op := &request.Operation{ Name: opDetachClassicLinkVpc, @@ -4848,7 +8077,28 @@ func (c *EC2) DetachClassicLinkVpc(input *DetachClassicLinkVpcInput) (*DetachCla const opDetachInternetGateway = "DetachInternetGateway" -// DetachInternetGatewayRequest generates a request for the DetachInternetGateway operation. +// DetachInternetGatewayRequest generates a "aws/request.Request" representing the +// client's request for the DetachInternetGateway operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DetachInternetGateway method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DetachInternetGatewayRequest method. +// req, resp := client.DetachInternetGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DetachInternetGatewayRequest(input *DetachInternetGatewayInput) (req *request.Request, output *DetachInternetGatewayOutput) { op := &request.Operation{ Name: opDetachInternetGateway, @@ -4879,7 +8129,28 @@ func (c *EC2) DetachInternetGateway(input *DetachInternetGatewayInput) (*DetachI const opDetachNetworkInterface = "DetachNetworkInterface" -// DetachNetworkInterfaceRequest generates a request for the DetachNetworkInterface operation. +// DetachNetworkInterfaceRequest generates a "aws/request.Request" representing the +// client's request for the DetachNetworkInterface operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DetachNetworkInterface method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DetachNetworkInterfaceRequest method. +// req, resp := client.DetachNetworkInterfaceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DetachNetworkInterfaceRequest(input *DetachNetworkInterfaceInput) (req *request.Request, output *DetachNetworkInterfaceOutput) { op := &request.Operation{ Name: opDetachNetworkInterface, @@ -4908,7 +8179,28 @@ func (c *EC2) DetachNetworkInterface(input *DetachNetworkInterfaceInput) (*Detac const opDetachVolume = "DetachVolume" -// DetachVolumeRequest generates a request for the DetachVolume operation. +// DetachVolumeRequest generates a "aws/request.Request" representing the +// client's request for the DetachVolume operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DetachVolume method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DetachVolumeRequest method. +// req, resp := client.DetachVolumeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DetachVolumeRequest(input *DetachVolumeInput) (req *request.Request, output *VolumeAttachment) { op := &request.Operation{ Name: opDetachVolume, @@ -4947,7 +8239,28 @@ func (c *EC2) DetachVolume(input *DetachVolumeInput) (*VolumeAttachment, error) const opDetachVpnGateway = "DetachVpnGateway" -// DetachVpnGatewayRequest generates a request for the DetachVpnGateway operation. +// DetachVpnGatewayRequest generates a "aws/request.Request" representing the +// client's request for the DetachVpnGateway operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DetachVpnGateway method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DetachVpnGatewayRequest method. +// req, resp := client.DetachVpnGatewayRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DetachVpnGatewayRequest(input *DetachVpnGatewayInput) (req *request.Request, output *DetachVpnGatewayOutput) { op := &request.Operation{ Name: opDetachVpnGateway, @@ -4983,7 +8296,28 @@ func (c *EC2) DetachVpnGateway(input *DetachVpnGatewayInput) (*DetachVpnGatewayO const opDisableVgwRoutePropagation = "DisableVgwRoutePropagation" -// DisableVgwRoutePropagationRequest generates a request for the DisableVgwRoutePropagation operation. +// DisableVgwRoutePropagationRequest generates a "aws/request.Request" representing the +// client's request for the DisableVgwRoutePropagation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableVgwRoutePropagation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableVgwRoutePropagationRequest method. +// req, resp := client.DisableVgwRoutePropagationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DisableVgwRoutePropagationRequest(input *DisableVgwRoutePropagationInput) (req *request.Request, output *DisableVgwRoutePropagationOutput) { op := &request.Operation{ Name: opDisableVgwRoutePropagation, @@ -5013,7 +8347,28 @@ func (c *EC2) DisableVgwRoutePropagation(input *DisableVgwRoutePropagationInput) const opDisableVpcClassicLink = "DisableVpcClassicLink" -// DisableVpcClassicLinkRequest generates a request for the DisableVpcClassicLink operation. +// DisableVpcClassicLinkRequest generates a "aws/request.Request" representing the +// client's request for the DisableVpcClassicLink operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableVpcClassicLink method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableVpcClassicLinkRequest method. +// req, resp := client.DisableVpcClassicLinkRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DisableVpcClassicLinkRequest(input *DisableVpcClassicLinkInput) (req *request.Request, output *DisableVpcClassicLinkOutput) { op := &request.Operation{ Name: opDisableVpcClassicLink, @@ -5041,7 +8396,28 @@ func (c *EC2) DisableVpcClassicLink(input *DisableVpcClassicLinkInput) (*Disable const opDisableVpcClassicLinkDnsSupport = "DisableVpcClassicLinkDnsSupport" -// DisableVpcClassicLinkDnsSupportRequest generates a request for the DisableVpcClassicLinkDnsSupport operation. +// DisableVpcClassicLinkDnsSupportRequest generates a "aws/request.Request" representing the +// client's request for the DisableVpcClassicLinkDnsSupport operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableVpcClassicLinkDnsSupport method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableVpcClassicLinkDnsSupportRequest method. +// req, resp := client.DisableVpcClassicLinkDnsSupportRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DisableVpcClassicLinkDnsSupportRequest(input *DisableVpcClassicLinkDnsSupportInput) (req *request.Request, output *DisableVpcClassicLinkDnsSupportOutput) { op := &request.Operation{ Name: opDisableVpcClassicLinkDnsSupport, @@ -5072,7 +8448,28 @@ func (c *EC2) DisableVpcClassicLinkDnsSupport(input *DisableVpcClassicLinkDnsSup const opDisassociateAddress = "DisassociateAddress" -// DisassociateAddressRequest generates a request for the DisassociateAddress operation. +// DisassociateAddressRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateAddress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisassociateAddress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisassociateAddressRequest method. +// req, resp := client.DisassociateAddressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DisassociateAddressRequest(input *DisassociateAddressInput) (req *request.Request, output *DisassociateAddressOutput) { op := &request.Operation{ Name: opDisassociateAddress, @@ -5109,7 +8506,28 @@ func (c *EC2) DisassociateAddress(input *DisassociateAddressInput) (*Disassociat const opDisassociateRouteTable = "DisassociateRouteTable" -// DisassociateRouteTableRequest generates a request for the DisassociateRouteTable operation. +// DisassociateRouteTableRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateRouteTable operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisassociateRouteTable method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisassociateRouteTableRequest method. +// req, resp := client.DisassociateRouteTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) DisassociateRouteTableRequest(input *DisassociateRouteTableInput) (req *request.Request, output *DisassociateRouteTableOutput) { op := &request.Operation{ Name: opDisassociateRouteTable, @@ -5143,7 +8561,28 @@ func (c *EC2) DisassociateRouteTable(input *DisassociateRouteTableInput) (*Disas const opEnableVgwRoutePropagation = "EnableVgwRoutePropagation" -// EnableVgwRoutePropagationRequest generates a request for the EnableVgwRoutePropagation operation. +// EnableVgwRoutePropagationRequest generates a "aws/request.Request" representing the +// client's request for the EnableVgwRoutePropagation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableVgwRoutePropagation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableVgwRoutePropagationRequest method. +// req, resp := client.EnableVgwRoutePropagationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) EnableVgwRoutePropagationRequest(input *EnableVgwRoutePropagationInput) (req *request.Request, output *EnableVgwRoutePropagationOutput) { op := &request.Operation{ Name: opEnableVgwRoutePropagation, @@ -5173,7 +8612,28 @@ func (c *EC2) EnableVgwRoutePropagation(input *EnableVgwRoutePropagationInput) ( const opEnableVolumeIO = "EnableVolumeIO" -// EnableVolumeIORequest generates a request for the EnableVolumeIO operation. +// EnableVolumeIORequest generates a "aws/request.Request" representing the +// client's request for the EnableVolumeIO operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableVolumeIO method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableVolumeIORequest method. +// req, resp := client.EnableVolumeIORequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) EnableVolumeIORequest(input *EnableVolumeIOInput) (req *request.Request, output *EnableVolumeIOOutput) { op := &request.Operation{ Name: opEnableVolumeIO, @@ -5203,7 +8663,28 @@ func (c *EC2) EnableVolumeIO(input *EnableVolumeIOInput) (*EnableVolumeIOOutput, const opEnableVpcClassicLink = "EnableVpcClassicLink" -// EnableVpcClassicLinkRequest generates a request for the EnableVpcClassicLink operation. +// EnableVpcClassicLinkRequest generates a "aws/request.Request" representing the +// client's request for the EnableVpcClassicLink operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableVpcClassicLink method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableVpcClassicLinkRequest method. +// req, resp := client.EnableVpcClassicLinkRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) EnableVpcClassicLinkRequest(input *EnableVpcClassicLinkInput) (req *request.Request, output *EnableVpcClassicLinkOutput) { op := &request.Operation{ Name: opEnableVpcClassicLink, @@ -5236,7 +8717,28 @@ func (c *EC2) EnableVpcClassicLink(input *EnableVpcClassicLinkInput) (*EnableVpc const opEnableVpcClassicLinkDnsSupport = "EnableVpcClassicLinkDnsSupport" -// EnableVpcClassicLinkDnsSupportRequest generates a request for the EnableVpcClassicLinkDnsSupport operation. +// EnableVpcClassicLinkDnsSupportRequest generates a "aws/request.Request" representing the +// client's request for the EnableVpcClassicLinkDnsSupport operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableVpcClassicLinkDnsSupport method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableVpcClassicLinkDnsSupportRequest method. +// req, resp := client.EnableVpcClassicLinkDnsSupportRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) EnableVpcClassicLinkDnsSupportRequest(input *EnableVpcClassicLinkDnsSupportInput) (req *request.Request, output *EnableVpcClassicLinkDnsSupportOutput) { op := &request.Operation{ Name: opEnableVpcClassicLinkDnsSupport, @@ -5269,7 +8771,28 @@ func (c *EC2) EnableVpcClassicLinkDnsSupport(input *EnableVpcClassicLinkDnsSuppo const opGetConsoleOutput = "GetConsoleOutput" -// GetConsoleOutputRequest generates a request for the GetConsoleOutput operation. +// GetConsoleOutputRequest generates a "aws/request.Request" representing the +// client's request for the GetConsoleOutput operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetConsoleOutput method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetConsoleOutputRequest method. +// req, resp := client.GetConsoleOutputRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) GetConsoleOutputRequest(input *GetConsoleOutputInput) (req *request.Request, output *GetConsoleOutputOutput) { op := &request.Operation{ Name: opGetConsoleOutput, @@ -5311,9 +8834,80 @@ func (c *EC2) GetConsoleOutput(input *GetConsoleOutputInput) (*GetConsoleOutputO return out, err } +const opGetConsoleScreenshot = "GetConsoleScreenshot" + +// GetConsoleScreenshotRequest generates a "aws/request.Request" representing the +// client's request for the GetConsoleScreenshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetConsoleScreenshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetConsoleScreenshotRequest method. +// req, resp := client.GetConsoleScreenshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) GetConsoleScreenshotRequest(input *GetConsoleScreenshotInput) (req *request.Request, output *GetConsoleScreenshotOutput) { + op := &request.Operation{ + Name: opGetConsoleScreenshot, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetConsoleScreenshotInput{} + } + + req = c.newRequest(op, input, output) + output = &GetConsoleScreenshotOutput{} + req.Data = output + return +} + +// Retrieve a JPG-format screenshot of a running instance to help with troubleshooting. +// +// The returned content is Base64-encoded. +func (c *EC2) GetConsoleScreenshot(input *GetConsoleScreenshotInput) (*GetConsoleScreenshotOutput, error) { + req, out := c.GetConsoleScreenshotRequest(input) + err := req.Send() + return out, err +} + const opGetPasswordData = "GetPasswordData" -// GetPasswordDataRequest generates a request for the GetPasswordData operation. +// GetPasswordDataRequest generates a "aws/request.Request" representing the +// client's request for the GetPasswordData operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetPasswordData method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetPasswordDataRequest method. +// req, resp := client.GetPasswordDataRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) GetPasswordDataRequest(input *GetPasswordDataInput) (req *request.Request, output *GetPasswordDataOutput) { op := &request.Operation{ Name: opGetPasswordData, @@ -5353,7 +8947,28 @@ func (c *EC2) GetPasswordData(input *GetPasswordDataInput) (*GetPasswordDataOutp const opImportImage = "ImportImage" -// ImportImageRequest generates a request for the ImportImage operation. +// ImportImageRequest generates a "aws/request.Request" representing the +// client's request for the ImportImage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ImportImage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ImportImageRequest method. +// req, resp := client.ImportImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) ImportImageRequest(input *ImportImageInput) (req *request.Request, output *ImportImageOutput) { op := &request.Operation{ Name: opImportImage, @@ -5381,7 +8996,28 @@ func (c *EC2) ImportImage(input *ImportImageInput) (*ImportImageOutput, error) { const opImportInstance = "ImportInstance" -// ImportInstanceRequest generates a request for the ImportInstance operation. +// ImportInstanceRequest generates a "aws/request.Request" representing the +// client's request for the ImportInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ImportInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ImportInstanceRequest method. +// req, resp := client.ImportInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) ImportInstanceRequest(input *ImportInstanceInput) (req *request.Request, output *ImportInstanceOutput) { op := &request.Operation{ Name: opImportInstance, @@ -5417,7 +9053,28 @@ func (c *EC2) ImportInstance(input *ImportInstanceInput) (*ImportInstanceOutput, const opImportKeyPair = "ImportKeyPair" -// ImportKeyPairRequest generates a request for the ImportKeyPair operation. +// ImportKeyPairRequest generates a "aws/request.Request" representing the +// client's request for the ImportKeyPair operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ImportKeyPair method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ImportKeyPairRequest method. +// req, resp := client.ImportKeyPairRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) ImportKeyPairRequest(input *ImportKeyPairInput) (req *request.Request, output *ImportKeyPairOutput) { op := &request.Operation{ Name: opImportKeyPair, @@ -5451,7 +9108,28 @@ func (c *EC2) ImportKeyPair(input *ImportKeyPairInput) (*ImportKeyPairOutput, er const opImportSnapshot = "ImportSnapshot" -// ImportSnapshotRequest generates a request for the ImportSnapshot operation. +// ImportSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the ImportSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ImportSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ImportSnapshotRequest method. +// req, resp := client.ImportSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) ImportSnapshotRequest(input *ImportSnapshotInput) (req *request.Request, output *ImportSnapshotOutput) { op := &request.Operation{ Name: opImportSnapshot, @@ -5478,7 +9156,28 @@ func (c *EC2) ImportSnapshot(input *ImportSnapshotInput) (*ImportSnapshotOutput, const opImportVolume = "ImportVolume" -// ImportVolumeRequest generates a request for the ImportVolume operation. +// ImportVolumeRequest generates a "aws/request.Request" representing the +// client's request for the ImportVolume operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ImportVolume method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ImportVolumeRequest method. +// req, resp := client.ImportVolumeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) ImportVolumeRequest(input *ImportVolumeInput) (req *request.Request, output *ImportVolumeOutput) { op := &request.Operation{ Name: opImportVolume, @@ -5513,7 +9212,28 @@ func (c *EC2) ImportVolume(input *ImportVolumeInput) (*ImportVolumeOutput, error const opModifyHosts = "ModifyHosts" -// ModifyHostsRequest generates a request for the ModifyHosts operation. +// ModifyHostsRequest generates a "aws/request.Request" representing the +// client's request for the ModifyHosts operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyHosts method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyHostsRequest method. +// req, resp := client.ModifyHostsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) ModifyHostsRequest(input *ModifyHostsInput) (req *request.Request, output *ModifyHostsOutput) { op := &request.Operation{ Name: opModifyHosts, @@ -5546,7 +9266,28 @@ func (c *EC2) ModifyHosts(input *ModifyHostsInput) (*ModifyHostsOutput, error) { const opModifyIdFormat = "ModifyIdFormat" -// ModifyIdFormatRequest generates a request for the ModifyIdFormat operation. +// ModifyIdFormatRequest generates a "aws/request.Request" representing the +// client's request for the ModifyIdFormat operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyIdFormat method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyIdFormatRequest method. +// req, resp := client.ModifyIdFormatRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) ModifyIdFormatRequest(input *ModifyIdFormatInput) (req *request.Request, output *ModifyIdFormatOutput) { op := &request.Operation{ Name: opModifyIdFormat, @@ -5573,25 +9314,107 @@ func (c *EC2) ModifyIdFormatRequest(input *ModifyIdFormatInput) (req *request.Re // // This setting applies to the IAM user who makes the request; it does not // apply to the entire AWS account. By default, an IAM user defaults to the -// same settings as the root user. If you're using this action as the root user -// or as an IAM role that has permission to use this action, then these settings -// apply to the entire account, unless an IAM user explicitly overrides these -// settings for themselves. For more information, see Controlling Access to -// Longer ID Settings (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/resource-ids.html#resource-ids-access) +// same settings as the root user. If you're using this action as the root user, +// then these settings apply to the entire account, unless an IAM user explicitly +// overrides these settings for themselves. For more information, see Resource +// IDs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/resource-ids.html) // in the Amazon Elastic Compute Cloud User Guide. // -// Resources created with longer IDs are visible to all IAM users, regardless -// of these settings and provided that they have permission to use the relevant -// Describe command for the resource type. +// Resources created with longer IDs are visible to all IAM roles and users, +// regardless of these settings and provided that they have permission to use +// the relevant Describe command for the resource type. func (c *EC2) ModifyIdFormat(input *ModifyIdFormatInput) (*ModifyIdFormatOutput, error) { req, out := c.ModifyIdFormatRequest(input) err := req.Send() return out, err } +const opModifyIdentityIdFormat = "ModifyIdentityIdFormat" + +// ModifyIdentityIdFormatRequest generates a "aws/request.Request" representing the +// client's request for the ModifyIdentityIdFormat operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyIdentityIdFormat method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyIdentityIdFormatRequest method. +// req, resp := client.ModifyIdentityIdFormatRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *EC2) ModifyIdentityIdFormatRequest(input *ModifyIdentityIdFormatInput) (req *request.Request, output *ModifyIdentityIdFormatOutput) { + op := &request.Operation{ + Name: opModifyIdentityIdFormat, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyIdentityIdFormatInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(ec2query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &ModifyIdentityIdFormatOutput{} + req.Data = output + return +} + +// Modifies the ID format of a resource for the specified IAM user, IAM role, +// or root user. You can specify that resources should receive longer IDs (17-character +// IDs) when they are created. The following resource types support longer IDs: +// instance | reservation | snapshot | volume. For more information, see Resource +// IDs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/resource-ids.html) +// in the Amazon Elastic Compute Cloud User Guide. +// +// This setting applies to the principal specified in the request; it does +// not apply to the principal that makes the request. +// +// Resources created with longer IDs are visible to all IAM roles and users, +// regardless of these settings and provided that they have permission to use +// the relevant Describe command for the resource type. +func (c *EC2) ModifyIdentityIdFormat(input *ModifyIdentityIdFormatInput) (*ModifyIdentityIdFormatOutput, error) { + req, out := c.ModifyIdentityIdFormatRequest(input) + err := req.Send() + return out, err +} + const opModifyImageAttribute = "ModifyImageAttribute" -// ModifyImageAttributeRequest generates a request for the ModifyImageAttribute operation. +// ModifyImageAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ModifyImageAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyImageAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyImageAttributeRequest method. +// req, resp := client.ModifyImageAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) ModifyImageAttributeRequest(input *ModifyImageAttributeInput) (req *request.Request, output *ModifyImageAttributeOutput) { op := &request.Operation{ Name: opModifyImageAttribute, @@ -5614,7 +9437,7 @@ func (c *EC2) ModifyImageAttributeRequest(input *ModifyImageAttributeInput) (req // Modifies the specified attribute of the specified AMI. You can specify only // one attribute at a time. // -// AWS Marketplace product codes cannot be modified. Images with an AWS Marketplace +// AWS Marketplace product codes cannot be modified. Images with an AWS Marketplace // product code cannot be made public. func (c *EC2) ModifyImageAttribute(input *ModifyImageAttributeInput) (*ModifyImageAttributeOutput, error) { req, out := c.ModifyImageAttributeRequest(input) @@ -5624,7 +9447,28 @@ func (c *EC2) ModifyImageAttribute(input *ModifyImageAttributeInput) (*ModifyIma const opModifyInstanceAttribute = "ModifyInstanceAttribute" -// ModifyInstanceAttributeRequest generates a request for the ModifyInstanceAttribute operation. +// ModifyInstanceAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ModifyInstanceAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyInstanceAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyInstanceAttributeRequest method. +// req, resp := client.ModifyInstanceAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) ModifyInstanceAttributeRequest(input *ModifyInstanceAttributeInput) (req *request.Request, output *ModifyInstanceAttributeOutput) { op := &request.Operation{ Name: opModifyInstanceAttribute, @@ -5658,7 +9502,28 @@ func (c *EC2) ModifyInstanceAttribute(input *ModifyInstanceAttributeInput) (*Mod const opModifyInstancePlacement = "ModifyInstancePlacement" -// ModifyInstancePlacementRequest generates a request for the ModifyInstancePlacement operation. +// ModifyInstancePlacementRequest generates a "aws/request.Request" representing the +// client's request for the ModifyInstancePlacement operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyInstancePlacement method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyInstancePlacementRequest method. +// req, resp := client.ModifyInstancePlacementRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) ModifyInstancePlacementRequest(input *ModifyInstancePlacementInput) (req *request.Request, output *ModifyInstancePlacementOutput) { op := &request.Operation{ Name: opModifyInstancePlacement, @@ -5703,7 +9568,28 @@ func (c *EC2) ModifyInstancePlacement(input *ModifyInstancePlacementInput) (*Mod const opModifyNetworkInterfaceAttribute = "ModifyNetworkInterfaceAttribute" -// ModifyNetworkInterfaceAttributeRequest generates a request for the ModifyNetworkInterfaceAttribute operation. +// ModifyNetworkInterfaceAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ModifyNetworkInterfaceAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyNetworkInterfaceAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyNetworkInterfaceAttributeRequest method. +// req, resp := client.ModifyNetworkInterfaceAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) ModifyNetworkInterfaceAttributeRequest(input *ModifyNetworkInterfaceAttributeInput) (req *request.Request, output *ModifyNetworkInterfaceAttributeOutput) { op := &request.Operation{ Name: opModifyNetworkInterfaceAttribute, @@ -5733,7 +9619,28 @@ func (c *EC2) ModifyNetworkInterfaceAttribute(input *ModifyNetworkInterfaceAttri const opModifyReservedInstances = "ModifyReservedInstances" -// ModifyReservedInstancesRequest generates a request for the ModifyReservedInstances operation. +// ModifyReservedInstancesRequest generates a "aws/request.Request" representing the +// client's request for the ModifyReservedInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyReservedInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyReservedInstancesRequest method. +// req, resp := client.ModifyReservedInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) ModifyReservedInstancesRequest(input *ModifyReservedInstancesInput) (req *request.Request, output *ModifyReservedInstancesOutput) { op := &request.Operation{ Name: opModifyReservedInstances, @@ -5766,7 +9673,28 @@ func (c *EC2) ModifyReservedInstances(input *ModifyReservedInstancesInput) (*Mod const opModifySnapshotAttribute = "ModifySnapshotAttribute" -// ModifySnapshotAttributeRequest generates a request for the ModifySnapshotAttribute operation. +// ModifySnapshotAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ModifySnapshotAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifySnapshotAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifySnapshotAttributeRequest method. +// req, resp := client.ModifySnapshotAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) ModifySnapshotAttributeRequest(input *ModifySnapshotAttributeInput) (req *request.Request, output *ModifySnapshotAttributeOutput) { op := &request.Operation{ Name: opModifySnapshotAttribute, @@ -5792,11 +9720,13 @@ func (c *EC2) ModifySnapshotAttributeRequest(input *ModifySnapshotAttributeInput // both add and remove account IDs for a snapshot, you must use multiple API // calls. // -// For more information on modifying snapshot permissions, see Sharing Snapshots +// Encrypted snapshots and snapshots with AWS Marketplace product codes cannot +// be made public. Snapshots encrypted with your default CMK cannot be shared +// with other accounts. +// +// For more information on modifying snapshot permissions, see Sharing Snapshots // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-modifying-snapshot-permissions.html) // in the Amazon Elastic Compute Cloud User Guide. -// -// Snapshots with AWS Marketplace product codes cannot be made public. func (c *EC2) ModifySnapshotAttribute(input *ModifySnapshotAttributeInput) (*ModifySnapshotAttributeOutput, error) { req, out := c.ModifySnapshotAttributeRequest(input) err := req.Send() @@ -5805,7 +9735,28 @@ func (c *EC2) ModifySnapshotAttribute(input *ModifySnapshotAttributeInput) (*Mod const opModifySpotFleetRequest = "ModifySpotFleetRequest" -// ModifySpotFleetRequestRequest generates a request for the ModifySpotFleetRequest operation. +// ModifySpotFleetRequestRequest generates a "aws/request.Request" representing the +// client's request for the ModifySpotFleetRequest operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifySpotFleetRequest method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifySpotFleetRequestRequest method. +// req, resp := client.ModifySpotFleetRequestRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) ModifySpotFleetRequestRequest(input *ModifySpotFleetRequestInput) (req *request.Request, output *ModifySpotFleetRequestOutput) { op := &request.Operation{ Name: opModifySpotFleetRequest, @@ -5851,7 +9802,28 @@ func (c *EC2) ModifySpotFleetRequest(input *ModifySpotFleetRequestInput) (*Modif const opModifySubnetAttribute = "ModifySubnetAttribute" -// ModifySubnetAttributeRequest generates a request for the ModifySubnetAttribute operation. +// ModifySubnetAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ModifySubnetAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifySubnetAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifySubnetAttributeRequest method. +// req, resp := client.ModifySubnetAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) ModifySubnetAttributeRequest(input *ModifySubnetAttributeInput) (req *request.Request, output *ModifySubnetAttributeOutput) { op := &request.Operation{ Name: opModifySubnetAttribute, @@ -5880,7 +9852,28 @@ func (c *EC2) ModifySubnetAttribute(input *ModifySubnetAttributeInput) (*ModifyS const opModifyVolumeAttribute = "ModifyVolumeAttribute" -// ModifyVolumeAttributeRequest generates a request for the ModifyVolumeAttribute operation. +// ModifyVolumeAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ModifyVolumeAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyVolumeAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyVolumeAttributeRequest method. +// req, resp := client.ModifyVolumeAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) ModifyVolumeAttributeRequest(input *ModifyVolumeAttributeInput) (req *request.Request, output *ModifyVolumeAttributeOutput) { op := &request.Operation{ Name: opModifyVolumeAttribute, @@ -5918,7 +9911,28 @@ func (c *EC2) ModifyVolumeAttribute(input *ModifyVolumeAttributeInput) (*ModifyV const opModifyVpcAttribute = "ModifyVpcAttribute" -// ModifyVpcAttributeRequest generates a request for the ModifyVpcAttribute operation. +// ModifyVpcAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ModifyVpcAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyVpcAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyVpcAttributeRequest method. +// req, resp := client.ModifyVpcAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) ModifyVpcAttributeRequest(input *ModifyVpcAttributeInput) (req *request.Request, output *ModifyVpcAttributeOutput) { op := &request.Operation{ Name: opModifyVpcAttribute, @@ -5947,7 +9961,28 @@ func (c *EC2) ModifyVpcAttribute(input *ModifyVpcAttributeInput) (*ModifyVpcAttr const opModifyVpcEndpoint = "ModifyVpcEndpoint" -// ModifyVpcEndpointRequest generates a request for the ModifyVpcEndpoint operation. +// ModifyVpcEndpointRequest generates a "aws/request.Request" representing the +// client's request for the ModifyVpcEndpoint operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyVpcEndpoint method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyVpcEndpointRequest method. +// req, resp := client.ModifyVpcEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) ModifyVpcEndpointRequest(input *ModifyVpcEndpointInput) (req *request.Request, output *ModifyVpcEndpointOutput) { op := &request.Operation{ Name: opModifyVpcEndpoint, @@ -5976,7 +10011,28 @@ func (c *EC2) ModifyVpcEndpoint(input *ModifyVpcEndpointInput) (*ModifyVpcEndpoi const opModifyVpcPeeringConnectionOptions = "ModifyVpcPeeringConnectionOptions" -// ModifyVpcPeeringConnectionOptionsRequest generates a request for the ModifyVpcPeeringConnectionOptions operation. +// ModifyVpcPeeringConnectionOptionsRequest generates a "aws/request.Request" representing the +// client's request for the ModifyVpcPeeringConnectionOptions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyVpcPeeringConnectionOptions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyVpcPeeringConnectionOptionsRequest method. +// req, resp := client.ModifyVpcPeeringConnectionOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) ModifyVpcPeeringConnectionOptionsRequest(input *ModifyVpcPeeringConnectionOptionsInput) (req *request.Request, output *ModifyVpcPeeringConnectionOptionsOutput) { op := &request.Operation{ Name: opModifyVpcPeeringConnectionOptions, @@ -5997,14 +10053,14 @@ func (c *EC2) ModifyVpcPeeringConnectionOptionsRequest(input *ModifyVpcPeeringCo // Modifies the VPC peering connection options on one side of a VPC peering // connection. You can do the following: // -// Enable/disable communication over the peering connection between an EC2-Classic +// Enable/disable communication over the peering connection between an EC2-Classic // instance that's linked to your VPC (using ClassicLink) and instances in the // peer VPC. // -// Enable/disable communication over the peering connection between instances +// Enable/disable communication over the peering connection between instances // in your VPC and an EC2-Classic instance that's linked to the peer VPC. // -// If the peered VPCs are in different accounts, each owner must initiate +// If the peered VPCs are in different accounts, each owner must initiate // a separate request to enable or disable communication in either direction, // depending on whether their VPC was the requester or accepter for the VPC // peering connection. If the peered VPCs are in the same account, you can modify @@ -6019,7 +10075,28 @@ func (c *EC2) ModifyVpcPeeringConnectionOptions(input *ModifyVpcPeeringConnectio const opMonitorInstances = "MonitorInstances" -// MonitorInstancesRequest generates a request for the MonitorInstances operation. +// MonitorInstancesRequest generates a "aws/request.Request" representing the +// client's request for the MonitorInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the MonitorInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the MonitorInstancesRequest method. +// req, resp := client.MonitorInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) MonitorInstancesRequest(input *MonitorInstancesInput) (req *request.Request, output *MonitorInstancesOutput) { op := &request.Operation{ Name: opMonitorInstances, @@ -6048,7 +10125,28 @@ func (c *EC2) MonitorInstances(input *MonitorInstancesInput) (*MonitorInstancesO const opMoveAddressToVpc = "MoveAddressToVpc" -// MoveAddressToVpcRequest generates a request for the MoveAddressToVpc operation. +// MoveAddressToVpcRequest generates a "aws/request.Request" representing the +// client's request for the MoveAddressToVpc operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the MoveAddressToVpc method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the MoveAddressToVpcRequest method. +// req, resp := client.MoveAddressToVpcRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) MoveAddressToVpcRequest(input *MoveAddressToVpcInput) (req *request.Request, output *MoveAddressToVpcOutput) { op := &request.Operation{ Name: opMoveAddressToVpc, @@ -6072,9 +10170,7 @@ func (c *EC2) MoveAddressToVpcRequest(input *MoveAddressToVpcInput) (req *reques // Elastic IP address is moved, it is no longer available for use in the EC2-Classic // platform, unless you move it back using the RestoreAddressToClassic request. // You cannot move an Elastic IP address that was originally allocated for use -// in the EC2-VPC platform to the EC2-Classic platform. You cannot migrate an -// Elastic IP address that's associated with a reverse DNS record. Contact AWS -// account and billing support to remove the reverse DNS record. +// in the EC2-VPC platform to the EC2-Classic platform. func (c *EC2) MoveAddressToVpc(input *MoveAddressToVpcInput) (*MoveAddressToVpcOutput, error) { req, out := c.MoveAddressToVpcRequest(input) err := req.Send() @@ -6083,7 +10179,28 @@ func (c *EC2) MoveAddressToVpc(input *MoveAddressToVpcInput) (*MoveAddressToVpcO const opPurchaseReservedInstancesOffering = "PurchaseReservedInstancesOffering" -// PurchaseReservedInstancesOfferingRequest generates a request for the PurchaseReservedInstancesOffering operation. +// PurchaseReservedInstancesOfferingRequest generates a "aws/request.Request" representing the +// client's request for the PurchaseReservedInstancesOffering operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PurchaseReservedInstancesOffering method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PurchaseReservedInstancesOfferingRequest method. +// req, resp := client.PurchaseReservedInstancesOfferingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) PurchaseReservedInstancesOfferingRequest(input *PurchaseReservedInstancesOfferingInput) (req *request.Request, output *PurchaseReservedInstancesOfferingOutput) { op := &request.Operation{ Name: opPurchaseReservedInstancesOffering, @@ -6121,7 +10238,28 @@ func (c *EC2) PurchaseReservedInstancesOffering(input *PurchaseReservedInstances const opPurchaseScheduledInstances = "PurchaseScheduledInstances" -// PurchaseScheduledInstancesRequest generates a request for the PurchaseScheduledInstances operation. +// PurchaseScheduledInstancesRequest generates a "aws/request.Request" representing the +// client's request for the PurchaseScheduledInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PurchaseScheduledInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PurchaseScheduledInstancesRequest method. +// req, resp := client.PurchaseScheduledInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) PurchaseScheduledInstancesRequest(input *PurchaseScheduledInstancesInput) (req *request.Request, output *PurchaseScheduledInstancesOutput) { op := &request.Operation{ Name: opPurchaseScheduledInstances, @@ -6157,7 +10295,28 @@ func (c *EC2) PurchaseScheduledInstances(input *PurchaseScheduledInstancesInput) const opRebootInstances = "RebootInstances" -// RebootInstancesRequest generates a request for the RebootInstances operation. +// RebootInstancesRequest generates a "aws/request.Request" representing the +// client's request for the RebootInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RebootInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RebootInstancesRequest method. +// req, resp := client.RebootInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) RebootInstancesRequest(input *RebootInstancesInput) (req *request.Request, output *RebootInstancesOutput) { op := &request.Operation{ Name: opRebootInstances, @@ -6196,7 +10355,28 @@ func (c *EC2) RebootInstances(input *RebootInstancesInput) (*RebootInstancesOutp const opRegisterImage = "RegisterImage" -// RegisterImageRequest generates a request for the RegisterImage operation. +// RegisterImageRequest generates a "aws/request.Request" representing the +// client's request for the RegisterImage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterImage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterImageRequest method. +// req, resp := client.RegisterImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Request, output *RegisterImageOutput) { op := &request.Operation{ Name: opRegisterImage, @@ -6219,10 +10399,10 @@ func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Requ // about creating AMIs, see Creating Your Own AMIs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/creating-an-ami.html) // in the Amazon Elastic Compute Cloud User Guide. // -// For Amazon EBS-backed instances, CreateImage creates and registers the AMI -// in a single request, so you don't have to register the AMI yourself. +// For Amazon EBS-backed instances, CreateImage creates and registers the +// AMI in a single request, so you don't have to register the AMI yourself. // -// You can also use RegisterImage to create an Amazon EBS-backed Linux AMI +// You can also use RegisterImage to create an Amazon EBS-backed Linux AMI // from a snapshot of a root device volume. For more information, see Launching // an Instance from a Snapshot (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_LaunchingInstanceFromSnapshot.html) // in the Amazon Elastic Compute Cloud User Guide. @@ -6245,7 +10425,7 @@ func (c *EC2) RegisterImageRequest(input *RegisterImageInput) (req *request.Requ // If you make changes to an image, deregister the previous image and register // the new image. // -// You can't register an image where a secondary (non-root) snapshot has AWS +// You can't register an image where a secondary (non-root) snapshot has AWS // Marketplace product codes. func (c *EC2) RegisterImage(input *RegisterImageInput) (*RegisterImageOutput, error) { req, out := c.RegisterImageRequest(input) @@ -6255,7 +10435,28 @@ func (c *EC2) RegisterImage(input *RegisterImageInput) (*RegisterImageOutput, er const opRejectVpcPeeringConnection = "RejectVpcPeeringConnection" -// RejectVpcPeeringConnectionRequest generates a request for the RejectVpcPeeringConnection operation. +// RejectVpcPeeringConnectionRequest generates a "aws/request.Request" representing the +// client's request for the RejectVpcPeeringConnection operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RejectVpcPeeringConnection method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RejectVpcPeeringConnectionRequest method. +// req, resp := client.RejectVpcPeeringConnectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) RejectVpcPeeringConnectionRequest(input *RejectVpcPeeringConnectionInput) (req *request.Request, output *RejectVpcPeeringConnectionOutput) { op := &request.Operation{ Name: opRejectVpcPeeringConnection, @@ -6286,7 +10487,28 @@ func (c *EC2) RejectVpcPeeringConnection(input *RejectVpcPeeringConnectionInput) const opReleaseAddress = "ReleaseAddress" -// ReleaseAddressRequest generates a request for the ReleaseAddress operation. +// ReleaseAddressRequest generates a "aws/request.Request" representing the +// client's request for the ReleaseAddress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ReleaseAddress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ReleaseAddressRequest method. +// req, resp := client.ReleaseAddressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) ReleaseAddressRequest(input *ReleaseAddressInput) (req *request.Request, output *ReleaseAddressOutput) { op := &request.Operation{ Name: opReleaseAddress, @@ -6329,7 +10551,28 @@ func (c *EC2) ReleaseAddress(input *ReleaseAddressInput) (*ReleaseAddressOutput, const opReleaseHosts = "ReleaseHosts" -// ReleaseHostsRequest generates a request for the ReleaseHosts operation. +// ReleaseHostsRequest generates a "aws/request.Request" representing the +// client's request for the ReleaseHosts operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ReleaseHosts method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ReleaseHostsRequest method. +// req, resp := client.ReleaseHostsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) ReleaseHostsRequest(input *ReleaseHostsInput) (req *request.Request, output *ReleaseHostsOutput) { op := &request.Operation{ Name: opReleaseHosts, @@ -6367,7 +10610,28 @@ func (c *EC2) ReleaseHosts(input *ReleaseHostsInput) (*ReleaseHostsOutput, error const opReplaceNetworkAclAssociation = "ReplaceNetworkAclAssociation" -// ReplaceNetworkAclAssociationRequest generates a request for the ReplaceNetworkAclAssociation operation. +// ReplaceNetworkAclAssociationRequest generates a "aws/request.Request" representing the +// client's request for the ReplaceNetworkAclAssociation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ReplaceNetworkAclAssociation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ReplaceNetworkAclAssociationRequest method. +// req, resp := client.ReplaceNetworkAclAssociationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) ReplaceNetworkAclAssociationRequest(input *ReplaceNetworkAclAssociationInput) (req *request.Request, output *ReplaceNetworkAclAssociationOutput) { op := &request.Operation{ Name: opReplaceNetworkAclAssociation, @@ -6397,7 +10661,28 @@ func (c *EC2) ReplaceNetworkAclAssociation(input *ReplaceNetworkAclAssociationIn const opReplaceNetworkAclEntry = "ReplaceNetworkAclEntry" -// ReplaceNetworkAclEntryRequest generates a request for the ReplaceNetworkAclEntry operation. +// ReplaceNetworkAclEntryRequest generates a "aws/request.Request" representing the +// client's request for the ReplaceNetworkAclEntry operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ReplaceNetworkAclEntry method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ReplaceNetworkAclEntryRequest method. +// req, resp := client.ReplaceNetworkAclEntryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) ReplaceNetworkAclEntryRequest(input *ReplaceNetworkAclEntryInput) (req *request.Request, output *ReplaceNetworkAclEntryOutput) { op := &request.Operation{ Name: opReplaceNetworkAclEntry, @@ -6428,7 +10713,28 @@ func (c *EC2) ReplaceNetworkAclEntry(input *ReplaceNetworkAclEntryInput) (*Repla const opReplaceRoute = "ReplaceRoute" -// ReplaceRouteRequest generates a request for the ReplaceRoute operation. +// ReplaceRouteRequest generates a "aws/request.Request" representing the +// client's request for the ReplaceRoute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ReplaceRoute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ReplaceRouteRequest method. +// req, resp := client.ReplaceRouteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) ReplaceRouteRequest(input *ReplaceRouteInput) (req *request.Request, output *ReplaceRouteOutput) { op := &request.Operation{ Name: opReplaceRoute, @@ -6462,7 +10768,28 @@ func (c *EC2) ReplaceRoute(input *ReplaceRouteInput) (*ReplaceRouteOutput, error const opReplaceRouteTableAssociation = "ReplaceRouteTableAssociation" -// ReplaceRouteTableAssociationRequest generates a request for the ReplaceRouteTableAssociation operation. +// ReplaceRouteTableAssociationRequest generates a "aws/request.Request" representing the +// client's request for the ReplaceRouteTableAssociation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ReplaceRouteTableAssociation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ReplaceRouteTableAssociationRequest method. +// req, resp := client.ReplaceRouteTableAssociationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) ReplaceRouteTableAssociationRequest(input *ReplaceRouteTableAssociationInput) (req *request.Request, output *ReplaceRouteTableAssociationOutput) { op := &request.Operation{ Name: opReplaceRouteTableAssociation, @@ -6497,7 +10824,28 @@ func (c *EC2) ReplaceRouteTableAssociation(input *ReplaceRouteTableAssociationIn const opReportInstanceStatus = "ReportInstanceStatus" -// ReportInstanceStatusRequest generates a request for the ReportInstanceStatus operation. +// ReportInstanceStatusRequest generates a "aws/request.Request" representing the +// client's request for the ReportInstanceStatus operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ReportInstanceStatus method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ReportInstanceStatusRequest method. +// req, resp := client.ReportInstanceStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) ReportInstanceStatusRequest(input *ReportInstanceStatusInput) (req *request.Request, output *ReportInstanceStatusOutput) { op := &request.Operation{ Name: opReportInstanceStatus, @@ -6532,7 +10880,28 @@ func (c *EC2) ReportInstanceStatus(input *ReportInstanceStatusInput) (*ReportIns const opRequestSpotFleet = "RequestSpotFleet" -// RequestSpotFleetRequest generates a request for the RequestSpotFleet operation. +// RequestSpotFleetRequest generates a "aws/request.Request" representing the +// client's request for the RequestSpotFleet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RequestSpotFleet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RequestSpotFleetRequest method. +// req, resp := client.RequestSpotFleetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) RequestSpotFleetRequest(input *RequestSpotFleetInput) (req *request.Request, output *RequestSpotFleetOutput) { op := &request.Operation{ Name: opRequestSpotFleet, @@ -6575,7 +10944,28 @@ func (c *EC2) RequestSpotFleet(input *RequestSpotFleetInput) (*RequestSpotFleetO const opRequestSpotInstances = "RequestSpotInstances" -// RequestSpotInstancesRequest generates a request for the RequestSpotInstances operation. +// RequestSpotInstancesRequest generates a "aws/request.Request" representing the +// client's request for the RequestSpotInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RequestSpotInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RequestSpotInstancesRequest method. +// req, resp := client.RequestSpotInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) RequestSpotInstancesRequest(input *RequestSpotInstancesInput) (req *request.Request, output *RequestSpotInstancesOutput) { op := &request.Operation{ Name: opRequestSpotInstances, @@ -6607,7 +10997,28 @@ func (c *EC2) RequestSpotInstances(input *RequestSpotInstancesInput) (*RequestSp const opResetImageAttribute = "ResetImageAttribute" -// ResetImageAttributeRequest generates a request for the ResetImageAttribute operation. +// ResetImageAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ResetImageAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ResetImageAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ResetImageAttributeRequest method. +// req, resp := client.ResetImageAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) ResetImageAttributeRequest(input *ResetImageAttributeInput) (req *request.Request, output *ResetImageAttributeOutput) { op := &request.Operation{ Name: opResetImageAttribute, @@ -6638,7 +11049,28 @@ func (c *EC2) ResetImageAttribute(input *ResetImageAttributeInput) (*ResetImageA const opResetInstanceAttribute = "ResetInstanceAttribute" -// ResetInstanceAttributeRequest generates a request for the ResetInstanceAttribute operation. +// ResetInstanceAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ResetInstanceAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ResetInstanceAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ResetInstanceAttributeRequest method. +// req, resp := client.ResetInstanceAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) ResetInstanceAttributeRequest(input *ResetInstanceAttributeInput) (req *request.Request, output *ResetInstanceAttributeOutput) { op := &request.Operation{ Name: opResetInstanceAttribute, @@ -6659,10 +11091,10 @@ func (c *EC2) ResetInstanceAttributeRequest(input *ResetInstanceAttributeInput) } // Resets an attribute of an instance to its default value. To reset the kernel -// or ramdisk, the instance must be in a stopped state. To reset the SourceDestCheck, +// or ramdisk, the instance must be in a stopped state. To reset the sourceDestCheck, // the instance can be either running or stopped. // -// The SourceDestCheck attribute controls whether source/destination checking +// The sourceDestCheck attribute controls whether source/destination checking // is enabled. The default value is true, which means checking is enabled. This // value must be false for a NAT instance to perform NAT. For more information, // see NAT Instances (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html) @@ -6675,7 +11107,28 @@ func (c *EC2) ResetInstanceAttribute(input *ResetInstanceAttributeInput) (*Reset const opResetNetworkInterfaceAttribute = "ResetNetworkInterfaceAttribute" -// ResetNetworkInterfaceAttributeRequest generates a request for the ResetNetworkInterfaceAttribute operation. +// ResetNetworkInterfaceAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ResetNetworkInterfaceAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ResetNetworkInterfaceAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ResetNetworkInterfaceAttributeRequest method. +// req, resp := client.ResetNetworkInterfaceAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) ResetNetworkInterfaceAttributeRequest(input *ResetNetworkInterfaceAttributeInput) (req *request.Request, output *ResetNetworkInterfaceAttributeOutput) { op := &request.Operation{ Name: opResetNetworkInterfaceAttribute, @@ -6705,7 +11158,28 @@ func (c *EC2) ResetNetworkInterfaceAttribute(input *ResetNetworkInterfaceAttribu const opResetSnapshotAttribute = "ResetSnapshotAttribute" -// ResetSnapshotAttributeRequest generates a request for the ResetSnapshotAttribute operation. +// ResetSnapshotAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ResetSnapshotAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ResetSnapshotAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ResetSnapshotAttributeRequest method. +// req, resp := client.ResetSnapshotAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) ResetSnapshotAttributeRequest(input *ResetSnapshotAttributeInput) (req *request.Request, output *ResetSnapshotAttributeOutput) { op := &request.Operation{ Name: opResetSnapshotAttribute, @@ -6738,7 +11212,28 @@ func (c *EC2) ResetSnapshotAttribute(input *ResetSnapshotAttributeInput) (*Reset const opRestoreAddressToClassic = "RestoreAddressToClassic" -// RestoreAddressToClassicRequest generates a request for the RestoreAddressToClassic operation. +// RestoreAddressToClassicRequest generates a "aws/request.Request" representing the +// client's request for the RestoreAddressToClassic operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RestoreAddressToClassic method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RestoreAddressToClassicRequest method. +// req, resp := client.RestoreAddressToClassicRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) RestoreAddressToClassicRequest(input *RestoreAddressToClassicInput) (req *request.Request, output *RestoreAddressToClassicOutput) { op := &request.Operation{ Name: opRestoreAddressToClassic, @@ -6759,9 +11254,7 @@ func (c *EC2) RestoreAddressToClassicRequest(input *RestoreAddressToClassicInput // Restores an Elastic IP address that was previously moved to the EC2-VPC platform // back to the EC2-Classic platform. You cannot move an Elastic IP address that // was originally allocated for use in EC2-VPC. The Elastic IP address must -// not be associated with an instance or network interface. You cannot restore -// an Elastic IP address that's associated with a reverse DNS record. Contact -// AWS account and billing support to remove the reverse DNS record. +// not be associated with an instance or network interface. func (c *EC2) RestoreAddressToClassic(input *RestoreAddressToClassicInput) (*RestoreAddressToClassicOutput, error) { req, out := c.RestoreAddressToClassicRequest(input) err := req.Send() @@ -6770,7 +11263,28 @@ func (c *EC2) RestoreAddressToClassic(input *RestoreAddressToClassicInput) (*Res const opRevokeSecurityGroupEgress = "RevokeSecurityGroupEgress" -// RevokeSecurityGroupEgressRequest generates a request for the RevokeSecurityGroupEgress operation. +// RevokeSecurityGroupEgressRequest generates a "aws/request.Request" representing the +// client's request for the RevokeSecurityGroupEgress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RevokeSecurityGroupEgress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RevokeSecurityGroupEgressRequest method. +// req, resp := client.RevokeSecurityGroupEgressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) RevokeSecurityGroupEgressRequest(input *RevokeSecurityGroupEgressInput) (req *request.Request, output *RevokeSecurityGroupEgressOutput) { op := &request.Operation{ Name: opRevokeSecurityGroupEgress, @@ -6810,7 +11324,28 @@ func (c *EC2) RevokeSecurityGroupEgress(input *RevokeSecurityGroupEgressInput) ( const opRevokeSecurityGroupIngress = "RevokeSecurityGroupIngress" -// RevokeSecurityGroupIngressRequest generates a request for the RevokeSecurityGroupIngress operation. +// RevokeSecurityGroupIngressRequest generates a "aws/request.Request" representing the +// client's request for the RevokeSecurityGroupIngress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RevokeSecurityGroupIngress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RevokeSecurityGroupIngressRequest method. +// req, resp := client.RevokeSecurityGroupIngressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) RevokeSecurityGroupIngressRequest(input *RevokeSecurityGroupIngressInput) (req *request.Request, output *RevokeSecurityGroupIngressOutput) { op := &request.Operation{ Name: opRevokeSecurityGroupIngress, @@ -6849,7 +11384,28 @@ func (c *EC2) RevokeSecurityGroupIngress(input *RevokeSecurityGroupIngressInput) const opRunInstances = "RunInstances" -// RunInstancesRequest generates a request for the RunInstances operation. +// RunInstancesRequest generates a "aws/request.Request" representing the +// client's request for the RunInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RunInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RunInstancesRequest method. +// req, resp := client.RunInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) RunInstancesRequest(input *RunInstancesInput) (req *request.Request, output *Reservation) { op := &request.Operation{ Name: opRunInstances, @@ -6878,6 +11434,10 @@ func (c *EC2) RunInstancesRequest(input *RunInstancesInput) (req *request.Reques // batches. For example, create five separate launch requests for 100 instances // each instead of one launch request for 500 instances. // +// To tag your instance, ensure that it is running as CreateTags requires a +// resource ID. For more information about tagging, see Tagging Your Amazon +// EC2 Resources (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html). +// // If you don't specify a security group when launching an instance, Amazon // EC2 uses the default security group. For more information, see Security Groups // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html) @@ -6918,7 +11478,28 @@ func (c *EC2) RunInstances(input *RunInstancesInput) (*Reservation, error) { const opRunScheduledInstances = "RunScheduledInstances" -// RunScheduledInstancesRequest generates a request for the RunScheduledInstances operation. +// RunScheduledInstancesRequest generates a "aws/request.Request" representing the +// client's request for the RunScheduledInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RunScheduledInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RunScheduledInstancesRequest method. +// req, resp := client.RunScheduledInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) RunScheduledInstancesRequest(input *RunScheduledInstancesInput) (req *request.Request, output *RunScheduledInstancesOutput) { op := &request.Operation{ Name: opRunScheduledInstances, @@ -6955,7 +11536,28 @@ func (c *EC2) RunScheduledInstances(input *RunScheduledInstancesInput) (*RunSche const opStartInstances = "StartInstances" -// StartInstancesRequest generates a request for the StartInstances operation. +// StartInstancesRequest generates a "aws/request.Request" representing the +// client's request for the StartInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StartInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StartInstancesRequest method. +// req, resp := client.StartInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) StartInstancesRequest(input *StartInstancesInput) (req *request.Request, output *StartInstancesOutput) { op := &request.Operation{ Name: opStartInstances, @@ -7000,7 +11602,28 @@ func (c *EC2) StartInstances(input *StartInstancesInput) (*StartInstancesOutput, const opStopInstances = "StopInstances" -// StopInstancesRequest generates a request for the StopInstances operation. +// StopInstancesRequest generates a "aws/request.Request" representing the +// client's request for the StopInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StopInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StopInstancesRequest method. +// req, resp := client.StopInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) StopInstancesRequest(input *StopInstancesInput) (req *request.Request, output *StopInstancesOutput) { op := &request.Operation{ Name: opStopInstances, @@ -7043,8 +11666,10 @@ func (c *EC2) StopInstancesRequest(input *StopInstancesInput) (req *request.Requ // and terminating instances, see Instance Lifecycle (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-lifecycle.html) // in the Amazon Elastic Compute Cloud User Guide. // -// For more information about troubleshooting, see Troubleshooting Stopping -// Your Instance (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesStopping.html) +// When you stop an instance, we attempt to shut it down forcibly after a short +// while. If your instance appears stuck in the stopping state after a period +// of time, there may be an issue with the underlying host computer. For more +// information, see Troubleshooting Stopping Your Instance (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/TroubleshootingInstancesStopping.html) // in the Amazon Elastic Compute Cloud User Guide. func (c *EC2) StopInstances(input *StopInstancesInput) (*StopInstancesOutput, error) { req, out := c.StopInstancesRequest(input) @@ -7054,7 +11679,28 @@ func (c *EC2) StopInstances(input *StopInstancesInput) (*StopInstancesOutput, er const opTerminateInstances = "TerminateInstances" -// TerminateInstancesRequest generates a request for the TerminateInstances operation. +// TerminateInstancesRequest generates a "aws/request.Request" representing the +// client's request for the TerminateInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the TerminateInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the TerminateInstancesRequest method. +// req, resp := client.TerminateInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) TerminateInstancesRequest(input *TerminateInstancesInput) (req *request.Request, output *TerminateInstancesOutput) { op := &request.Operation{ Name: opTerminateInstances, @@ -7102,7 +11748,28 @@ func (c *EC2) TerminateInstances(input *TerminateInstancesInput) (*TerminateInst const opUnassignPrivateIpAddresses = "UnassignPrivateIpAddresses" -// UnassignPrivateIpAddressesRequest generates a request for the UnassignPrivateIpAddresses operation. +// UnassignPrivateIpAddressesRequest generates a "aws/request.Request" representing the +// client's request for the UnassignPrivateIpAddresses operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UnassignPrivateIpAddresses method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UnassignPrivateIpAddressesRequest method. +// req, resp := client.UnassignPrivateIpAddressesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) UnassignPrivateIpAddressesRequest(input *UnassignPrivateIpAddressesInput) (req *request.Request, output *UnassignPrivateIpAddressesOutput) { op := &request.Operation{ Name: opUnassignPrivateIpAddresses, @@ -7131,7 +11798,28 @@ func (c *EC2) UnassignPrivateIpAddresses(input *UnassignPrivateIpAddressesInput) const opUnmonitorInstances = "UnmonitorInstances" -// UnmonitorInstancesRequest generates a request for the UnmonitorInstances operation. +// UnmonitorInstancesRequest generates a "aws/request.Request" representing the +// client's request for the UnmonitorInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UnmonitorInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UnmonitorInstancesRequest method. +// req, resp := client.UnmonitorInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EC2) UnmonitorInstancesRequest(input *UnmonitorInstancesInput) (req *request.Request, output *UnmonitorInstancesOutput) { op := &request.Operation{ Name: opUnmonitorInstances, @@ -7985,11 +12673,11 @@ func (s AttachVpnGatewayOutput) GoString() string { return s.String() } -// The value to use when a resource attribute accepts a Boolean value. +// Describes a value for a resource attribute that is a Boolean value. type AttributeBooleanValue struct { _ struct{} `type:"structure"` - // Valid values are true or false. + // The attribute value. The valid values are true or false. Value *bool `locationName:"value" type:"boolean"` } @@ -8003,11 +12691,11 @@ func (s AttributeBooleanValue) GoString() string { return s.String() } -// The value to use for a resource attribute. +// Describes a value for a resource attribute that is a String. type AttributeValue struct { _ struct{} `type:"structure"` - // Valid values are case-sensitive and vary by action. + // The attribute value. Note that the value is case-sensitive. Value *string `locationName:"value" type:"string"` } @@ -9174,7 +13862,7 @@ type CopySnapshotInput struct { // copy operation. This parameter is only valid for specifying the destination // region in a PresignedUrl parameter, where it is required. // - // CopySnapshot sends the snapshot copy to the regional endpoint that you + // CopySnapshot sends the snapshot copy to the regional endpoint that you // send the HTTP request to, such as ec2.us-east-1.amazonaws.com (in the AWS // CLI, this is specified with the --region parameter or the default region // in your AWS configuration file). @@ -9186,13 +13874,13 @@ type CopySnapshotInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // Specifies whether the destination snapshot should be encrypted. There is - // no way to create an unencrypted snapshot copy from an encrypted snapshot; - // however, you can encrypt a copy of an unencrypted snapshot with this flag. - // The default CMK for EBS is used unless a non-default AWS Key Management Service - // (AWS KMS) CMK is specified with KmsKeyId. For more information, see Amazon - // EBS Encryption (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) - // in the Amazon Elastic Compute Cloud User Guide. + // Specifies whether the destination snapshot should be encrypted. You can encrypt + // a copy of an unencrypted snapshot using this flag, but you cannot use it + // to create an unencrypted copy from an encrypted snapshot. Your default CMK + // for EBS is used unless a non-default AWS Key Management Service (AWS KMS) + // CMK is specified with KmsKeyId. For more information, see Amazon EBS Encryption + // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) in + // the Amazon Elastic Compute Cloud User Guide. Encrypted *bool `locationName:"encrypted" type:"boolean"` // The full ARN of the AWS Key Management Service (AWS KMS) CMK to use when @@ -9411,6 +14099,8 @@ type CreateFlowLogsInput struct { LogGroupName *string `type:"string" required:"true"` // One or more subnet, network interface, or VPC IDs. + // + // Constraints: Maximum of 1000 resources ResourceIds []*string `locationName:"ResourceId" locationNameList:"item" type:"list" required:"true"` // The type of resource on which to create the flow log. @@ -9821,7 +14511,8 @@ type CreateNetworkAclEntryInput struct { // The rule number for the entry (for example, 100). ACL entries are processed // in ascending order by rule number. // - // Constraints: Positive integer from 1 to 32766 + // Constraints: Positive integer from 1 to 32766. The range 32767 to 65535 + // is reserved for internal use. RuleNumber *int64 `locationName:"ruleNumber" type:"integer" required:"true"` } @@ -10299,7 +14990,7 @@ type CreateSecurityGroupInput struct { // // Constraints for EC2-Classic: ASCII characters // - // Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$* + // Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$* Description *string `locationName:"GroupDescription" type:"string" required:"true"` // Checks whether you have the required permissions for the action, without @@ -10314,7 +15005,7 @@ type CreateSecurityGroupInput struct { // // Constraints for EC2-Classic: ASCII characters // - // Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=;{}!$* + // Constraints for EC2-VPC: a-z, A-Z, 0-9, spaces, and ._-:/()#,@[]+=&;{}!$* GroupName *string `type:"string" required:"true"` // [EC2-VPC] The ID of the VPC. Required for EC2-VPC. @@ -10741,7 +15432,7 @@ type CreateVpcEndpointInput struct { // One or more route table IDs. RouteTableIds []*string `locationName:"RouteTableId" locationNameList:"item" type:"list"` - // The AWS service name, in the form com.amazonaws.region.service. To get a + // The AWS service name, in the form com.amazonaws.region.service . To get a // list of available services, use the DescribeVpcEndpointServices request. ServiceName *string `type:"string" required:"true"` @@ -10816,7 +15507,7 @@ type CreateVpcInput struct { // as dedicated tenancy instances by default. You can only launch instances // with a tenancy of dedicated or host into a dedicated tenancy VPC. // - // Important: The host value cannot be used with this parameter. Use the default + // Important: The host value cannot be used with this parameter. Use the default // or dedicated values only. // // Default: default @@ -12470,25 +17161,25 @@ type DescribeAddressesInput struct { // One or more filters. Filter names and values are case-sensitive. // - // allocation-id - [EC2-VPC] The allocation ID for the address. + // allocation-id - [EC2-VPC] The allocation ID for the address. // - // association-id - [EC2-VPC] The association ID for the address. + // association-id - [EC2-VPC] The association ID for the address. // - // domain - Indicates whether the address is for use in EC2-Classic (standard) + // domain - Indicates whether the address is for use in EC2-Classic (standard) // or in a VPC (vpc). // - // instance-id - The ID of the instance the address is associated with, if - // any. + // instance-id - The ID of the instance the address is associated with, + // if any. // - // network-interface-id - [EC2-VPC] The ID of the network interface that + // network-interface-id - [EC2-VPC] The ID of the network interface that // the address is associated with, if any. // - // network-interface-owner-id - The AWS account ID of the owner. + // network-interface-owner-id - The AWS account ID of the owner. // - // private-ip-address - [EC2-VPC] The private IP address associated with + // private-ip-address - [EC2-VPC] The private IP address associated with // the Elastic IP address. // - // public-ip - The Elastic IP address. + // public-ip - The Elastic IP address. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // [EC2-Classic] One or more Elastic IP addresses. @@ -12537,15 +17228,15 @@ type DescribeAvailabilityZonesInput struct { // One or more filters. // - // message - Information about the Availability Zone. + // message - Information about the Availability Zone. // - // region-name - The name of the region for the Availability Zone (for example, + // region-name - The name of the region for the Availability Zone (for example, // us-east-1). // - // state - The state of the Availability Zone (available | information | + // state - The state of the Availability Zone (available | information | // impaired | unavailable). // - // zone-name - The name of the Availability Zone (for example, us-east-1a). + // zone-name - The name of the Availability Zone (for example, us-east-1a). Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The names of one or more Availability Zones. @@ -12597,27 +17288,27 @@ type DescribeBundleTasksInput struct { // One or more filters. // - // bundle-id - The ID of the bundle task. + // bundle-id - The ID of the bundle task. // - // error-code - If the task failed, the error code returned. + // error-code - If the task failed, the error code returned. // - // error-message - If the task failed, the error message returned. + // error-message - If the task failed, the error message returned. // - // instance-id - The ID of the instance. + // instance-id - The ID of the instance. // - // progress - The level of task completion, as a percentage (for example, + // progress - The level of task completion, as a percentage (for example, // 20%). // - // s3-bucket - The Amazon S3 bucket to store the AMI. + // s3-bucket - The Amazon S3 bucket to store the AMI. // - // s3-prefix - The beginning of the AMI name. + // s3-prefix - The beginning of the AMI name. // - // start-time - The time the task started (for example, 2013-09-15T17:15:20.000Z). + // start-time - The time the task started (for example, 2013-09-15T17:15:20.000Z). // - // state - The state of the task (pending | waiting-for-shutdown | bundling + // state - The state of the task (pending | waiting-for-shutdown | bundling // | storing | cancelling | complete | failed). // - // update-time - The time of the most recent update for the task. + // update-time - The time of the most recent update for the task. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` } @@ -12661,23 +17352,24 @@ type DescribeClassicLinkInstancesInput struct { // One or more filters. // - // group-id - The ID of a VPC security group that's associated with the instance. + // group-id - The ID of a VPC security group that's associated with the + // instance. // - // instance-id - The ID of the instance. + // instance-id - The ID of the instance. // - // tag:key=value - The key/value combination of a tag assigned to the resource. + // tag:key=value - The key/value combination of a tag assigned to the resource. // - // tag-key - The key of a tag assigned to the resource. This filter is independent + // tag-key - The key of a tag assigned to the resource. This filter is independent // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" // and the filter "tag-value=X", you get any resources assigned both the tag // key Purpose (regardless of what the tag's value is), and the tag value X // (regardless of what the tag's key is). If you want to list only resources // where Purpose is X, see the tag:key=value filter. // - // tag-value - The value of a tag assigned to the resource. This filter is - // independent of the tag-key filter. + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. // - // vpc-id - The ID of the VPC that the instance is linked to. + // vpc-id - The ID of the VPC that the instance is linked to. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // One or more instance IDs. Must be instances linked to a VPC through ClassicLink. @@ -12791,31 +17483,31 @@ type DescribeCustomerGatewaysInput struct { // One or more filters. // - // bgp-asn - The customer gateway's Border Gateway Protocol (BGP) Autonomous + // bgp-asn - The customer gateway's Border Gateway Protocol (BGP) Autonomous // System Number (ASN). // - // customer-gateway-id - The ID of the customer gateway. + // customer-gateway-id - The ID of the customer gateway. // - // ip-address - The IP address of the customer gateway's Internet-routable + // ip-address - The IP address of the customer gateway's Internet-routable // external interface. // - // state - The state of the customer gateway (pending | available | deleting + // state - The state of the customer gateway (pending | available | deleting // | deleted). // - // type - The type of customer gateway. Currently, the only supported type + // type - The type of customer gateway. Currently, the only supported type // is ipsec.1. // - // tag:key=value - The key/value combination of a tag assigned to the resource. + // tag:key=value - The key/value combination of a tag assigned to the resource. // - // tag-key - The key of a tag assigned to the resource. This filter is independent + // tag-key - The key of a tag assigned to the resource. This filter is independent // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" // and the filter "tag-value=X", you get any resources assigned both the tag // key Purpose (regardless of what the tag's value is), and the tag value X // (regardless of what the tag's key is). If you want to list only resources // where Purpose is X, see the tag:key=value filter. // - // tag-value - The value of a tag assigned to the resource. This filter is - // independent of the tag-key filter. + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` } @@ -12864,23 +17556,23 @@ type DescribeDhcpOptionsInput struct { // One or more filters. // - // dhcp-options-id - The ID of a set of DHCP options. + // dhcp-options-id - The ID of a set of DHCP options. // - // key - The key for one of the options (for example, domain-name). + // key - The key for one of the options (for example, domain-name). // - // value - The value for one of the options. + // value - The value for one of the options. // - // tag:key=value - The key/value combination of a tag assigned to the resource. + // tag:key=value - The key/value combination of a tag assigned to the resource. // - // tag-key - The key of a tag assigned to the resource. This filter is independent + // tag-key - The key of a tag assigned to the resource. This filter is independent // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" // and the filter "tag-value=X", you get any resources assigned both the tag // key Purpose (regardless of what the tag's value is), and the tag value X // (regardless of what the tag's key is). If you want to list only resources // where Purpose is X, see the tag:key=value filter. // - // tag-value - The value of a tag assigned to the resource. This filter is - // independent of the tag-key filter. + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` } @@ -12954,15 +17646,15 @@ type DescribeFlowLogsInput struct { // One or more filters. // - // deliver-log-status - The status of the logs delivery (SUCCESS | FAILED). + // deliver-log-status - The status of the logs delivery (SUCCESS | FAILED). // - // flow-log-id - The ID of the flow log. + // flow-log-id - The ID of the flow log. // - // log-group-name - The name of the log group. + // log-group-name - The name of the log group. // - // resource-id - The ID of the VPC, subnet, or network interface. + // resource-id - The ID of the VPC, subnet, or network interface. // - // traffic-type - The type of traffic (ACCEPT | REJECT | ALL) + // traffic-type - The type of traffic (ACCEPT | REJECT | ALL) Filter []*Filter `locationNameList:"Filter" type:"list"` // One or more flow log IDs. @@ -13017,20 +17709,22 @@ type DescribeHostsInput struct { // One or more filters. // - // instance-type - The instance type size that the Dedicated host is configured + // instance-type - The instance type size that the Dedicated host is configured // to support. // - // auto-placement - Whether auto-placement is enabled or disabled (on | off). + // auto-placement - Whether auto-placement is enabled or disabled (on | + // off). // - // host-reservation-id - The ID of the reservation associated with this host. + // host-reservation-id - The ID of the reservation associated with this + // host. // - // client-token - The idempotency token you provided when you launched the + // client-token - The idempotency token you provided when you launched the // instance // - // state- The allocation state of the Dedicated host (available | under-assessment + // state- The allocation state of the Dedicated host (available | under-assessment // | permanent-failure | released | released-permanent-failure). // - // availability-zone - The Availability Zone of the host. + // availability-zone - The Availability Zone of the host. Filter []*Filter `locationName:"filter" locationNameList:"Filter" type:"list"` // The IDs of the Dedicated hosts. The IDs are used for targeted instance launches. @@ -13115,13 +17809,66 @@ func (s DescribeIdFormatOutput) GoString() string { return s.String() } +// Contains the parameters for DescribeIdentityIdFormat. +type DescribeIdentityIdFormatInput struct { + _ struct{} `type:"structure"` + + // The ARN of the principal, which can be an IAM role, IAM user, or the root + // user. + PrincipalArn *string `locationName:"principalArn" type:"string" required:"true"` + + // The type of resource. + Resource *string `locationName:"resource" type:"string"` +} + +// String returns the string representation +func (s DescribeIdentityIdFormatInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIdentityIdFormatInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeIdentityIdFormatInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeIdentityIdFormatInput"} + if s.PrincipalArn == nil { + invalidParams.Add(request.NewErrParamRequired("PrincipalArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of DescribeIdentityIdFormat. +type DescribeIdentityIdFormatOutput struct { + _ struct{} `type:"structure"` + + // Information about the ID format for the resources. + Statuses []*IdFormat `locationName:"statusSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeIdentityIdFormatOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeIdentityIdFormatOutput) GoString() string { + return s.String() +} + // Contains the parameters for DescribeImageAttribute. type DescribeImageAttributeInput struct { _ struct{} `type:"structure"` // The AMI attribute. // - // Note: Depending on your account privileges, the blockDeviceMapping attribute + // Note: Depending on your account privileges, the blockDeviceMapping attribute // may return a Client.AuthFailure error. If this happens, use DescribeImages // to get information about the block device mapping for the AMI. Attribute *string `type:"string" required:"true" enum:"ImageAttributeName"` @@ -13187,7 +17934,8 @@ type DescribeImageAttributeOutput struct { // The RAM disk ID. RamdiskId *AttributeValue `locationName:"ramdisk" type:"structure"` - // The value to use for a resource attribute. + // Indicates whether enhanced networking with the Intel 82599 Virtual Function + // interface is enabled. SriovNetSupport *AttributeValue `locationName:"sriovNetSupport" type:"structure"` } @@ -13217,74 +17965,74 @@ type DescribeImagesInput struct { // One or more filters. // - // architecture - The image architecture (i386 | x86_64). + // architecture - The image architecture (i386 | x86_64). // - // block-device-mapping.delete-on-termination - A Boolean value that indicates + // block-device-mapping.delete-on-termination - A Boolean value that indicates // whether the Amazon EBS volume is deleted on instance termination. // - // block-device-mapping.device-name - The device name for the EBS volume + // block-device-mapping.device-name - The device name for the EBS volume // (for example, /dev/sdh). // - // block-device-mapping.snapshot-id - The ID of the snapshot used for the + // block-device-mapping.snapshot-id - The ID of the snapshot used for the // EBS volume. // - // block-device-mapping.volume-size - The volume size of the EBS volume, + // block-device-mapping.volume-size - The volume size of the EBS volume, // in GiB. // - // block-device-mapping.volume-type - The volume type of the EBS volume (gp2 - // | io1 | st1 | sc1 | standard). + // block-device-mapping.volume-type - The volume type of the EBS volume + // (gp2 | io1 | st1 | sc1 | standard). // - // description - The description of the image (provided during image creation). + // description - The description of the image (provided during image creation). // - // hypervisor - The hypervisor type (ovm | xen). + // hypervisor - The hypervisor type (ovm | xen). // - // image-id - The ID of the image. + // image-id - The ID of the image. // - // image-type - The image type (machine | kernel | ramdisk). + // image-type - The image type (machine | kernel | ramdisk). // - // is-public - A Boolean that indicates whether the image is public. + // is-public - A Boolean that indicates whether the image is public. // - // kernel-id - The kernel ID. + // kernel-id - The kernel ID. // - // manifest-location - The location of the image manifest. + // manifest-location - The location of the image manifest. // - // name - The name of the AMI (provided during image creation). + // name - The name of the AMI (provided during image creation). // - // owner-alias - The AWS account alias (for example, amazon). + // owner-alias - The AWS account alias (for example, amazon). // - // owner-id - The AWS account ID of the image owner. + // owner-id - The AWS account ID of the image owner. // - // platform - The platform. To only list Windows-based AMIs, use windows. + // platform - The platform. To only list Windows-based AMIs, use windows. // - // product-code - The product code. + // product-code - The product code. // - // product-code.type - The type of the product code (devpay | marketplace). + // product-code.type - The type of the product code (devpay | marketplace). // - // ramdisk-id - The RAM disk ID. + // ramdisk-id - The RAM disk ID. // - // root-device-name - The name of the root device volume (for example, /dev/sda1). + // root-device-name - The name of the root device volume (for example, /dev/sda1). // - // root-device-type - The type of the root device volume (ebs | instance-store). + // root-device-type - The type of the root device volume (ebs | instance-store). // - // state - The state of the image (available | pending | failed). + // state - The state of the image (available | pending | failed). // - // state-reason-code - The reason code for the state change. + // state-reason-code - The reason code for the state change. // - // state-reason-message - The message for the state change. + // state-reason-message - The message for the state change. // - // tag:key=value - The key/value combination of a tag assigned to the resource. + // tag:key=value - The key/value combination of a tag assigned to the resource. // - // tag-key - The key of a tag assigned to the resource. This filter is independent + // tag-key - The key of a tag assigned to the resource. This filter is independent // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" // and the filter "tag-value=X", you get any resources assigned both the tag // key Purpose (regardless of what the tag's value is), and the tag value X // (regardless of what the tag's key is). If you want to list only resources // where Purpose is X, see the tag:key=value filter. // - // tag-value - The value of a tag assigned to the resource. This filter is - // independent of the tag-key filter. + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. // - // virtualization-type - The virtualization type (paravirtual | hvm). + // virtualization-type - The virtualization type (paravirtual | hvm). Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // One or more image IDs. @@ -13447,6 +18195,8 @@ type DescribeInstanceAttributeInput struct { _ struct{} `type:"structure"` // The instance attribute. + // + // Note: The enaSupport attribute is not supported at this time. Attribute *string `locationName:"attribute" type:"string" required:"true" enum:"InstanceAttributeName"` // Checks whether you have the required permissions for the action, without @@ -13499,6 +18249,9 @@ type DescribeInstanceAttributeOutput struct { // Indicates whether the instance is optimized for EBS I/O. EbsOptimized *AttributeBooleanValue `locationName:"ebsOptimized" type:"structure"` + // Indicates whether enhanced networking with ENA is enabled. + EnaSupport *AttributeBooleanValue `locationName:"enaSupport" type:"structure"` + // The security groups associated with the instance. Groups []*GroupIdentifier `locationName:"groupSet" locationNameList:"item" type:"list"` @@ -13529,10 +18282,11 @@ type DescribeInstanceAttributeOutput struct { // must be false for a NAT instance to perform NAT. SourceDestCheck *AttributeBooleanValue `locationName:"sourceDestCheck" type:"structure"` - // The value to use for a resource attribute. + // Indicates whether enhanced networking with the Intel 82599 Virtual Function + // interface is enabled. SriovNetSupport *AttributeValue `locationName:"sriovNetSupport" type:"structure"` - // The Base64-encoded MIME user data. + // The user data. UserData *AttributeValue `locationName:"userData" type:"structure"` } @@ -13558,38 +18312,38 @@ type DescribeInstanceStatusInput struct { // One or more filters. // - // availability-zone - The Availability Zone of the instance. + // availability-zone - The Availability Zone of the instance. // - // event.code - The code for the scheduled event (instance-reboot | system-reboot + // event.code - The code for the scheduled event (instance-reboot | system-reboot // | system-maintenance | instance-retirement | instance-stop). // - // event.description - A description of the event. + // event.description - A description of the event. // - // event.not-after - The latest end time for the scheduled event (for example, + // event.not-after - The latest end time for the scheduled event (for example, // 2014-09-15T17:15:20.000Z). // - // event.not-before - The earliest start time for the scheduled event (for + // event.not-before - The earliest start time for the scheduled event (for // example, 2014-09-15T17:15:20.000Z). // - // instance-state-code - The code for the instance state, as a 16-bit unsigned + // instance-state-code - The code for the instance state, as a 16-bit unsigned // integer. The high byte is an opaque internal value and should be ignored. // The low byte is set based on the state represented. The valid values are // 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), // and 80 (stopped). // - // instance-state-name - The state of the instance (pending | running | shutting-down - // | terminated | stopping | stopped). + // instance-state-name - The state of the instance (pending | running | + // shutting-down | terminated | stopping | stopped). // - // instance-status.reachability - Filters on instance status where the name + // instance-status.reachability - Filters on instance status where the name // is reachability (passed | failed | initializing | insufficient-data). // - // instance-status.status - The status of the instance (ok | impaired | initializing - // | insufficient-data | not-applicable). + // instance-status.status - The status of the instance (ok | impaired | + // initializing | insufficient-data | not-applicable). // - // system-status.reachability - Filters on system status where the name is - // reachability (passed | failed | initializing | insufficient-data). + // system-status.reachability - Filters on system status where the name + // is reachability (passed | failed | initializing | insufficient-data). // - // system-status.status - The system status of the instance (ok | impaired + // system-status.status - The system status of the instance (ok | impaired // | initializing | insufficient-data | not-applicable). Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -13660,233 +18414,234 @@ type DescribeInstancesInput struct { // One or more filters. // - // affinity - The affinity setting for an instance running on a Dedicated + // affinity - The affinity setting for an instance running on a Dedicated // host (default | host). // - // architecture - The instance architecture (i386 | x86_64). + // architecture - The instance architecture (i386 | x86_64). // - // availability-zone - The Availability Zone of the instance. + // availability-zone - The Availability Zone of the instance. // - // block-device-mapping.attach-time - The attach time for an EBS volume mapped - // to the instance, for example, 2010-09-15T17:15:20.000Z. + // block-device-mapping.attach-time - The attach time for an EBS volume + // mapped to the instance, for example, 2010-09-15T17:15:20.000Z. // - // block-device-mapping.delete-on-termination - A Boolean that indicates + // block-device-mapping.delete-on-termination - A Boolean that indicates // whether the EBS volume is deleted on instance termination. // - // block-device-mapping.device-name - The device name for the EBS volume + // block-device-mapping.device-name - The device name for the EBS volume // (for example, /dev/sdh or xvdh). // - // block-device-mapping.status - The status for the EBS volume (attaching + // block-device-mapping.status - The status for the EBS volume (attaching // | attached | detaching | detached). // - // block-device-mapping.volume-id - The volume ID of the EBS volume. + // block-device-mapping.volume-id - The volume ID of the EBS volume. // - // client-token - The idempotency token you provided when you launched the + // client-token - The idempotency token you provided when you launched the // instance. // - // dns-name - The public DNS name of the instance. + // dns-name - The public DNS name of the instance. // - // group-id - The ID of the security group for the instance. EC2-Classic + // group-id - The ID of the security group for the instance. EC2-Classic // only. // - // group-name - The name of the security group for the instance. EC2-Classic + // group-name - The name of the security group for the instance. EC2-Classic // only. // - // host-Id - The ID of the Dedicated host on which the instance is running, + // host-Id - The ID of the Dedicated host on which the instance is running, // if applicable. // - // hypervisor - The hypervisor type of the instance (ovm | xen). + // hypervisor - The hypervisor type of the instance (ovm | xen). // - // iam-instance-profile.arn - The instance profile associated with the instance. + // iam-instance-profile.arn - The instance profile associated with the instance. // Specified as an ARN. // - // image-id - The ID of the image used to launch the instance. + // image-id - The ID of the image used to launch the instance. // - // instance-id - The ID of the instance. + // instance-id - The ID of the instance. // - // instance-lifecycle - Indicates whether this is a Spot Instance or a Scheduled + // instance-lifecycle - Indicates whether this is a Spot Instance or a Scheduled // Instance (spot | scheduled). // - // instance-state-code - The state of the instance, as a 16-bit unsigned + // instance-state-code - The state of the instance, as a 16-bit unsigned // integer. The high byte is an opaque internal value and should be ignored. // The low byte is set based on the state represented. The valid values are: // 0 (pending), 16 (running), 32 (shutting-down), 48 (terminated), 64 (stopping), // and 80 (stopped). // - // instance-state-name - The state of the instance (pending | running | shutting-down - // | terminated | stopping | stopped). + // instance-state-name - The state of the instance (pending | running | + // shutting-down | terminated | stopping | stopped). // - // instance-type - The type of instance (for example, t2.micro). + // instance-type - The type of instance (for example, t2.micro). // - // instance.group-id - The ID of the security group for the instance. + // instance.group-id - The ID of the security group for the instance. // - // instance.group-name - The name of the security group for the instance. + // instance.group-name - The name of the security group for the instance. // - // ip-address - The public IP address of the instance. + // ip-address - The public IP address of the instance. // - // kernel-id - The kernel ID. + // kernel-id - The kernel ID. // - // key-name - The name of the key pair used when the instance was launched. + // key-name - The name of the key pair used when the instance was launched. // - // launch-index - When launching multiple instances, this is the index for + // launch-index - When launching multiple instances, this is the index for // the instance in the launch group (for example, 0, 1, 2, and so on). // - // launch-time - The time when the instance was launched. + // launch-time - The time when the instance was launched. // - // monitoring-state - Indicates whether monitoring is enabled for the instance + // monitoring-state - Indicates whether monitoring is enabled for the instance // (disabled | enabled). // - // owner-id - The AWS account ID of the instance owner. + // owner-id - The AWS account ID of the instance owner. // - // placement-group-name - The name of the placement group for the instance. + // placement-group-name - The name of the placement group for the instance. // - // platform - The platform. Use windows if you have Windows instances; otherwise, + // platform - The platform. Use windows if you have Windows instances; otherwise, // leave blank. // - // private-dns-name - The private DNS name of the instance. + // private-dns-name - The private DNS name of the instance. // - // private-ip-address - The private IP address of the instance. + // private-ip-address - The private IP address of the instance. // - // product-code - The product code associated with the AMI used to launch + // product-code - The product code associated with the AMI used to launch // the instance. // - // product-code.type - The type of product code (devpay | marketplace). + // product-code.type - The type of product code (devpay | marketplace). // - // ramdisk-id - The RAM disk ID. + // ramdisk-id - The RAM disk ID. // - // reason - The reason for the current state of the instance (for example, + // reason - The reason for the current state of the instance (for example, // shows "User Initiated [date]" when you stop or terminate the instance). Similar // to the state-reason-code filter. // - // requester-id - The ID of the entity that launched the instance on your + // requester-id - The ID of the entity that launched the instance on your // behalf (for example, AWS Management Console, Auto Scaling, and so on). // - // reservation-id - The ID of the instance's reservation. A reservation ID - // is created any time you launch an instance. A reservation ID has a one-to-one + // reservation-id - The ID of the instance's reservation. A reservation + // ID is created any time you launch an instance. A reservation ID has a one-to-one // relationship with an instance launch request, but can be associated with // more than one instance if you launch multiple instances using the same launch // request. For example, if you launch one instance, you'll get one reservation // ID. If you launch ten instances using the same launch request, you'll also // get one reservation ID. // - // root-device-name - The name of the root device for the instance (for example, - // /dev/sda1 or /dev/xvda). + // root-device-name - The name of the root device for the instance (for + // example, /dev/sda1 or /dev/xvda). // - // root-device-type - The type of root device that the instance uses (ebs + // root-device-type - The type of root device that the instance uses (ebs // | instance-store). // - // source-dest-check - Indicates whether the instance performs source/destination + // source-dest-check - Indicates whether the instance performs source/destination // checking. A value of true means that checking is enabled, and false means // checking is disabled. The value must be false for the instance to perform // network address translation (NAT) in your VPC. // - // spot-instance-request-id - The ID of the Spot instance request. + // spot-instance-request-id - The ID of the Spot instance request. // - // state-reason-code - The reason code for the state change. + // state-reason-code - The reason code for the state change. // - // state-reason-message - A message that describes the state change. + // state-reason-message - A message that describes the state change. // - // subnet-id - The ID of the subnet for the instance. + // subnet-id - The ID of the subnet for the instance. // - // tag:key=value - The key/value combination of a tag assigned to the resource, + // tag:key=value - The key/value combination of a tag assigned to the resource, // where tag:key is the tag's key. // - // tag-key - The key of a tag assigned to the resource. This filter is independent + // tag-key - The key of a tag assigned to the resource. This filter is independent // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" // and the filter "tag-value=X", you get any resources assigned both the tag // key Purpose (regardless of what the tag's value is), and the tag value X // (regardless of what the tag's key is). If you want to list only resources // where Purpose is X, see the tag:key=value filter. // - // tag-value - The value of a tag assigned to the resource. This filter is - // independent of the tag-key filter. + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. // - // tenancy - The tenancy of an instance (dedicated | default | host). + // tenancy - The tenancy of an instance (dedicated | default | host). // - // virtualization-type - The virtualization type of the instance (paravirtual + // virtualization-type - The virtualization type of the instance (paravirtual // | hvm). // - // vpc-id - The ID of the VPC that the instance is running in. + // vpc-id - The ID of the VPC that the instance is running in. // - // network-interface.description - The description of the network interface. + // network-interface.description - The description of the network interface. // - // network-interface.subnet-id - The ID of the subnet for the network interface. + // network-interface.subnet-id - The ID of the subnet for the network interface. // - // network-interface.vpc-id - The ID of the VPC for the network interface. + // network-interface.vpc-id - The ID of the VPC for the network interface. // - // network-interface.network-interface-id - The ID of the network interface. + // network-interface.network-interface-id - The ID of the network interface. // - // network-interface.owner-id - The ID of the owner of the network interface. + // network-interface.owner-id - The ID of the owner of the network interface. // - // network-interface.availability-zone - The Availability Zone for the network + // network-interface.availability-zone - The Availability Zone for the network // interface. // - // network-interface.requester-id - The requester ID for the network interface. + // network-interface.requester-id - The requester ID for the network interface. // - // network-interface.requester-managed - Indicates whether the network interface + // network-interface.requester-managed - Indicates whether the network interface // is being managed by AWS. // - // network-interface.status - The status of the network interface (available) + // network-interface.status - The status of the network interface (available) // | in-use). // - // network-interface.mac-address - The MAC address of the network interface. + // network-interface.mac-address - The MAC address of the network interface. // - // network-interface.private-dns-name - The private DNS name of the network + // network-interface.private-dns-name - The private DNS name of the network // interface. // - // network-interface.source-dest-check - Whether the network interface performs + // network-interface.source-dest-check - Whether the network interface performs // source/destination checking. A value of true means checking is enabled, and // false means checking is disabled. The value must be false for the network // interface to perform network address translation (NAT) in your VPC. // - // network-interface.group-id - The ID of a security group associated with + // network-interface.group-id - The ID of a security group associated with // the network interface. // - // network-interface.group-name - The name of a security group associated + // network-interface.group-name - The name of a security group associated // with the network interface. // - // network-interface.attachment.attachment-id - The ID of the interface attachment. + // network-interface.attachment.attachment-id - The ID of the interface + // attachment. // - // network-interface.attachment.instance-id - The ID of the instance to which - // the network interface is attached. + // network-interface.attachment.instance-id - The ID of the instance to + // which the network interface is attached. // - // network-interface.attachment.instance-owner-id - The owner ID of the instance - // to which the network interface is attached. + // network-interface.attachment.instance-owner-id - The owner ID of the + // instance to which the network interface is attached. // - // network-interface.addresses.private-ip-address - The private IP address + // network-interface.addresses.private-ip-address - The private IP address // associated with the network interface. // - // network-interface.attachment.device-index - The device index to which + // network-interface.attachment.device-index - The device index to which // the network interface is attached. // - // network-interface.attachment.status - The status of the attachment (attaching + // network-interface.attachment.status - The status of the attachment (attaching // | attached | detaching | detached). // - // network-interface.attachment.attach-time - The time that the network interface - // was attached to an instance. + // network-interface.attachment.attach-time - The time that the network + // interface was attached to an instance. // - // network-interface.attachment.delete-on-termination - Specifies whether + // network-interface.attachment.delete-on-termination - Specifies whether // the attachment is deleted when an instance is terminated. // - // network-interface.addresses.primary - Specifies whether the IP address + // network-interface.addresses.primary - Specifies whether the IP address // of the network interface is the primary private IP address. // - // network-interface.addresses.association.public-ip - The ID of the association + // network-interface.addresses.association.public-ip - The ID of the association // of an Elastic IP address with a network interface. // - // network-interface.addresses.association.ip-owner-id - The owner ID of + // network-interface.addresses.association.ip-owner-id - The owner ID of // the private IP address associated with the network interface. // - // association.public-ip - The address of the Elastic IP address bound to + // association.public-ip - The address of the Elastic IP address bound to // the network interface. // - // association.ip-owner-id - The owner of the Elastic IP address associated + // association.ip-owner-id - The owner of the Elastic IP address associated // with the network interface. // - // association.allocation-id - The allocation ID returned when you allocated + // association.allocation-id - The allocation ID returned when you allocated // the Elastic IP address for your network interface. // - // association.association-id - The association ID returned when the network + // association.association-id - The association ID returned when the network // interface was associated with an IP address. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -13898,7 +18653,7 @@ type DescribeInstancesInput struct { // The maximum number of results to return in a single call. To retrieve the // remaining results, make another call with the returned NextToken value. This // value can be between 5 and 1000. You cannot specify this parameter and the - // instance IDs parameter in the same call. + // instance IDs parameter or tag filters in the same call. MaxResults *int64 `locationName:"maxResults" type:"integer"` // The token to request the next page of results. @@ -13949,24 +18704,24 @@ type DescribeInternetGatewaysInput struct { // One or more filters. // - // attachment.state - The current state of the attachment between the gateway + // attachment.state - The current state of the attachment between the gateway // and the VPC (available). Present only if a VPC is attached. // - // attachment.vpc-id - The ID of an attached VPC. + // attachment.vpc-id - The ID of an attached VPC. // - // internet-gateway-id - The ID of the Internet gateway. + // internet-gateway-id - The ID of the Internet gateway. // - // tag:key=value - The key/value combination of a tag assigned to the resource. + // tag:key=value - The key/value combination of a tag assigned to the resource. // - // tag-key - The key of a tag assigned to the resource. This filter is independent + // tag-key - The key of a tag assigned to the resource. This filter is independent // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" // and the filter "tag-value=X", you get any resources assigned both the tag // key Purpose (regardless of what the tag's value is), and the tag value X // (regardless of what the tag's key is). If you want to list only resources // where Purpose is X, see the tag:key=value filter. // - // tag-value - The value of a tag assigned to the resource. This filter is - // independent of the tag-key filter. + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // One or more Internet gateway IDs. @@ -14015,9 +18770,9 @@ type DescribeKeyPairsInput struct { // One or more filters. // - // fingerprint - The fingerprint of the key pair. + // fingerprint - The fingerprint of the key pair. // - // key-name - The name of the key pair. + // key-name - The name of the key pair. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // One or more key pair names. @@ -14066,7 +18821,7 @@ type DescribeMovingAddressesInput struct { // One or more filters. // - // moving-status - The status of the Elastic IP address (MovingToVpc | RestoringToClassic). + // moving-status - The status of the Elastic IP address (MovingToVpc | RestoringToClassic). Filters []*Filter `locationName:"filter" locationNameList:"Filter" type:"list"` // The maximum number of results to return for the request in a single page. @@ -14122,14 +18877,14 @@ type DescribeNatGatewaysInput struct { // One or more filters. // - // nat-gateway-id - The ID of the NAT gateway. + // nat-gateway-id - The ID of the NAT gateway. // - // state - The state of the NAT gateway (pending | failed | available | deleting - // | deleted). + // state - The state of the NAT gateway (pending | failed | available | + // deleting | deleted). // - // subnet-id - The ID of the subnet in which the NAT gateway resides. + // subnet-id - The ID of the subnet in which the NAT gateway resides. // - // vpc-id - The ID of the VPC in which the NAT gateway resides. + // vpc-id - The ID of the VPC in which the NAT gateway resides. Filter []*Filter `locationNameList:"Filter" type:"list"` // The maximum number of items to return for this request. The request returns @@ -14191,51 +18946,52 @@ type DescribeNetworkAclsInput struct { // One or more filters. // - // association.association-id - The ID of an association ID for the ACL. + // association.association-id - The ID of an association ID for the ACL. // - // association.network-acl-id - The ID of the network ACL involved in the + // association.network-acl-id - The ID of the network ACL involved in the // association. // - // association.subnet-id - The ID of the subnet involved in the association. + // association.subnet-id - The ID of the subnet involved in the association. // - // default - Indicates whether the ACL is the default network ACL for the + // default - Indicates whether the ACL is the default network ACL for the // VPC. // - // entry.cidr - The CIDR range specified in the entry. + // entry.cidr - The CIDR range specified in the entry. // - // entry.egress - Indicates whether the entry applies to egress traffic. + // entry.egress - Indicates whether the entry applies to egress traffic. // - // entry.icmp.code - The ICMP code specified in the entry, if any. + // entry.icmp.code - The ICMP code specified in the entry, if any. // - // entry.icmp.type - The ICMP type specified in the entry, if any. + // entry.icmp.type - The ICMP type specified in the entry, if any. // - // entry.port-range.from - The start of the port range specified in the entry. + // entry.port-range.from - The start of the port range specified in the + // entry. // - // entry.port-range.to - The end of the port range specified in the entry. + // entry.port-range.to - The end of the port range specified in the entry. // - // entry.protocol - The protocol specified in the entry (tcp | udp | icmp + // entry.protocol - The protocol specified in the entry (tcp | udp | icmp // or a protocol number). // - // entry.rule-action - Allows or denies the matching traffic (allow | deny). + // entry.rule-action - Allows or denies the matching traffic (allow | deny). // - // entry.rule-number - The number of an entry (in other words, rule) in the - // ACL's set of entries. + // entry.rule-number - The number of an entry (in other words, rule) in + // the ACL's set of entries. // - // network-acl-id - The ID of the network ACL. + // network-acl-id - The ID of the network ACL. // - // tag:key=value - The key/value combination of a tag assigned to the resource. + // tag:key=value - The key/value combination of a tag assigned to the resource. // - // tag-key - The key of a tag assigned to the resource. This filter is independent + // tag-key - The key of a tag assigned to the resource. This filter is independent // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" // and the filter "tag-value=X", you get any resources assigned both the tag // key Purpose (regardless of what the tag's value is), and the tag value X // (regardless of what the tag's key is). If you want to list only resources // where Purpose is X, see the tag:key=value filter. // - // tag-value - The value of a tag assigned to the resource. This filter is - // independent of the tag-key filter. + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. // - // vpc-id - The ID of the VPC for the network ACL. + // vpc-id - The ID of the VPC for the network ACL. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // One or more network ACL IDs. @@ -14354,106 +19110,106 @@ type DescribeNetworkInterfacesInput struct { // One or more filters. // - // addresses.private-ip-address - The private IP addresses associated with + // addresses.private-ip-address - The private IP addresses associated with // the network interface. // - // addresses.primary - Whether the private IP address is the primary IP address - // associated with the network interface. + // addresses.primary - Whether the private IP address is the primary IP + // address associated with the network interface. // - // addresses.association.public-ip - The association ID returned when the + // addresses.association.public-ip - The association ID returned when the // network interface was associated with the Elastic IP address. // - // addresses.association.owner-id - The owner ID of the addresses associated + // addresses.association.owner-id - The owner ID of the addresses associated // with the network interface. // - // association.association-id - The association ID returned when the network + // association.association-id - The association ID returned when the network // interface was associated with an IP address. // - // association.allocation-id - The allocation ID returned when you allocated + // association.allocation-id - The allocation ID returned when you allocated // the Elastic IP address for your network interface. // - // association.ip-owner-id - The owner of the Elastic IP address associated + // association.ip-owner-id - The owner of the Elastic IP address associated // with the network interface. // - // association.public-ip - The address of the Elastic IP address bound to + // association.public-ip - The address of the Elastic IP address bound to // the network interface. // - // association.public-dns-name - The public DNS name for the network interface. + // association.public-dns-name - The public DNS name for the network interface. // - // attachment.attachment-id - The ID of the interface attachment. + // attachment.attachment-id - The ID of the interface attachment. // - // attachment.attach.time - The time that the network interface was attached + // attachment.attach.time - The time that the network interface was attached // to an instance. // - // attachment.delete-on-termination - Indicates whether the attachment is + // attachment.delete-on-termination - Indicates whether the attachment is // deleted when an instance is terminated. // - // attachment.device-index - The device index to which the network interface + // attachment.device-index - The device index to which the network interface // is attached. // - // attachment.instance-id - The ID of the instance to which the network interface - // is attached. - // - // attachment.instance-owner-id - The owner ID of the instance to which the - // network interface is attached. - // - // attachment.nat-gateway-id - The ID of the NAT gateway to which the network + // attachment.instance-id - The ID of the instance to which the network // interface is attached. // - // attachment.status - The status of the attachment (attaching | attached + // attachment.instance-owner-id - The owner ID of the instance to which + // the network interface is attached. + // + // attachment.nat-gateway-id - The ID of the NAT gateway to which the network + // interface is attached. + // + // attachment.status - The status of the attachment (attaching | attached // | detaching | detached). // - // availability-zone - The Availability Zone of the network interface. + // availability-zone - The Availability Zone of the network interface. // - // description - The description of the network interface. + // description - The description of the network interface. // - // group-id - The ID of a security group associated with the network interface. + // group-id - The ID of a security group associated with the network interface. // - // group-name - The name of a security group associated with the network + // group-name - The name of a security group associated with the network // interface. // - // mac-address - The MAC address of the network interface. + // mac-address - The MAC address of the network interface. // - // network-interface-id - The ID of the network interface. + // network-interface-id - The ID of the network interface. // - // owner-id - The AWS account ID of the network interface owner. + // owner-id - The AWS account ID of the network interface owner. // - // private-ip-address - The private IP address or addresses of the network + // private-ip-address - The private IP address or addresses of the network // interface. // - // private-dns-name - The private DNS name of the network interface. + // private-dns-name - The private DNS name of the network interface. // - // requester-id - The ID of the entity that launched the instance on your + // requester-id - The ID of the entity that launched the instance on your // behalf (for example, AWS Management Console, Auto Scaling, and so on). // - // requester-managed - Indicates whether the network interface is being managed - // by an AWS service (for example, AWS Management Console, Auto Scaling, and - // so on). + // requester-managed - Indicates whether the network interface is being + // managed by an AWS service (for example, AWS Management Console, Auto Scaling, + // and so on). // - // source-desk-check - Indicates whether the network interface performs source/destination - // checking. A value of true means checking is enabled, and false means checking - // is disabled. The value must be false for the network interface to perform - // network address translation (NAT) in your VPC. + // source-desk-check - Indicates whether the network interface performs + // source/destination checking. A value of true means checking is enabled, and + // false means checking is disabled. The value must be false for the network + // interface to perform network address translation (NAT) in your VPC. // - // status - The status of the network interface. If the network interface + // status - The status of the network interface. If the network interface // is not attached to an instance, the status is available; if a network interface // is attached to an instance the status is in-use. // - // subnet-id - The ID of the subnet for the network interface. + // subnet-id - The ID of the subnet for the network interface. // - // tag:key=value - The key/value combination of a tag assigned to the resource. + // tag:key=value - The key/value combination of a tag assigned to the resource. // - // tag-key - The key of a tag assigned to the resource. This filter is independent + // tag-key - The key of a tag assigned to the resource. This filter is independent // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" // and the filter "tag-value=X", you get any resources assigned both the tag // key Purpose (regardless of what the tag's value is), and the tag value X // (regardless of what the tag's key is). If you want to list only resources // where Purpose is X, see the tag:key=value filter. // - // tag-value - The value of a tag assigned to the resource. This filter is - // independent of the tag-key filter. + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. // - // vpc-id - The ID of the VPC for the network interface. + // vpc-id - The ID of the VPC for the network interface. Filters []*Filter `locationName:"filter" locationNameList:"Filter" type:"list"` // One or more network interface IDs. @@ -14502,12 +19258,12 @@ type DescribePlacementGroupsInput struct { // One or more filters. // - // group-name - The name of the placement group. + // group-name - The name of the placement group. // - // state - The state of the placement group (pending | available | deleting + // state - The state of the placement group (pending | available | deleting // | deleted). // - // strategy - The strategy of the placement group (cluster). + // strategy - The strategy of the placement group (cluster). Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // One or more placement group names. @@ -14556,9 +19312,9 @@ type DescribePrefixListsInput struct { // One or more filters. // - // prefix-list-id: The ID of a prefix list. + // prefix-list-id: The ID of a prefix list. // - // prefix-list-name: The name of a prefix list. + // prefix-list-name: The name of a prefix list. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The maximum number of items to return for this request. The request returns @@ -14621,9 +19377,9 @@ type DescribeRegionsInput struct { // One or more filters. // - // endpoint - The endpoint of the region (for example, ec2.us-east-1.amazonaws.com). + // endpoint - The endpoint of the region (for example, ec2.us-east-1.amazonaws.com). // - // region-name - The name of the region (for example, us-east-1). + // region-name - The name of the region (for example, us-east-1). Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The names of one or more regions. @@ -14670,20 +19426,20 @@ type DescribeReservedInstancesInput struct { // One or more filters. // - // availability-zone - The Availability Zone where the Reserved Instance + // availability-zone - The Availability Zone where the Reserved Instance // can be used. // - // duration - The duration of the Reserved Instance (one year or three years), + // duration - The duration of the Reserved Instance (one year or three years), // in seconds (31536000 | 94608000). // - // end - The time when the Reserved Instance expires (for example, 2015-08-07T11:54:42.000Z). + // end - The time when the Reserved Instance expires (for example, 2015-08-07T11:54:42.000Z). // - // fixed-price - The purchase price of the Reserved Instance (for example, + // fixed-price - The purchase price of the Reserved Instance (for example, // 9800.0). // - // instance-type - The instance type that is covered by the reservation. + // instance-type - The instance type that is covered by the reservation. // - // product-description - The Reserved Instance product platform description. + // product-description - The Reserved Instance product platform description. // Instances that include (Amazon VPC) in the product platform description will // only be displayed to EC2-Classic account holders and are for use with Amazon // VPC (Linux/UNIX | Linux/UNIX (Amazon VPC) | SUSE Linux | SUSE Linux (Amazon @@ -14693,27 +19449,27 @@ type DescribeReservedInstancesInput struct { // with SQL Server Web (Amazon VPC) | Windows with SQL Server Enterprise | Windows // with SQL Server Enterprise (Amazon VPC)). // - // reserved-instances-id - The ID of the Reserved Instance. + // reserved-instances-id - The ID of the Reserved Instance. // - // start - The time at which the Reserved Instance purchase request was placed - // (for example, 2014-08-07T11:54:42.000Z). + // start - The time at which the Reserved Instance purchase request was + // placed (for example, 2014-08-07T11:54:42.000Z). // - // state - The state of the Reserved Instance (payment-pending | active | - // payment-failed | retired). + // state - The state of the Reserved Instance (payment-pending | active + // | payment-failed | retired). // - // tag:key=value - The key/value combination of a tag assigned to the resource. + // tag:key=value - The key/value combination of a tag assigned to the resource. // - // tag-key - The key of a tag assigned to the resource. This filter is independent + // tag-key - The key of a tag assigned to the resource. This filter is independent // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" // and the filter "tag-value=X", you get any resources assigned both the tag // key Purpose (regardless of what the tag's value is), and the tag value X // (regardless of what the tag's key is). If you want to list only resources // where Purpose is X, see the tag:key=value filter. // - // tag-value - The value of a tag assigned to the resource. This filter is - // independent of the tag-key filter. + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. // - // usage-price - The usage price of the Reserved Instance, per hour (for + // usage-price - The usage price of the Reserved Instance, per hour (for // example, 0.84). Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -14745,14 +19501,14 @@ type DescribeReservedInstancesListingsInput struct { // One or more filters. // - // reserved-instances-id - The ID of the Reserved Instances. + // reserved-instances-id - The ID of the Reserved Instances. // - // reserved-instances-listing-id - The ID of the Reserved Instances listing. + // reserved-instances-listing-id - The ID of the Reserved Instances listing. // - // status - The status of the Reserved Instance listing (pending | active + // status - The status of the Reserved Instance listing (pending | active // | cancelled | closed). // - // status-message - The reason for the status. + // status-message - The reason for the status. Filters []*Filter `locationName:"filters" locationNameList:"Filter" type:"list"` // One or more Reserved Instance IDs. @@ -14796,38 +19552,38 @@ type DescribeReservedInstancesModificationsInput struct { // One or more filters. // - // client-token - The idempotency token for the modification request. + // client-token - The idempotency token for the modification request. // - // create-date - The time when the modification request was created. + // create-date - The time when the modification request was created. // - // effective-date - The time when the modification becomes effective. + // effective-date - The time when the modification becomes effective. // - // modification-result.reserved-instances-id - The ID for the Reserved Instances + // modification-result.reserved-instances-id - The ID for the Reserved Instances // created as part of the modification request. This ID is only available when // the status of the modification is fulfilled. // - // modification-result.target-configuration.availability-zone - The Availability + // modification-result.target-configuration.availability-zone - The Availability // Zone for the new Reserved Instances. // - // modification-result.target-configuration.instance-count - The number + // modification-result.target-configuration.instance-count - The number // of new Reserved Instances. // - // modification-result.target-configuration.instance-type - The instance + // modification-result.target-configuration.instance-type - The instance // type of the new Reserved Instances. // - // modification-result.target-configuration.platform - The network platform + // modification-result.target-configuration.platform - The network platform // of the new Reserved Instances (EC2-Classic | EC2-VPC). // - // reserved-instances-id - The ID of the Reserved Instances modified. + // reserved-instances-id - The ID of the Reserved Instances modified. // - // reserved-instances-modification-id - The ID of the modification request. + // reserved-instances-modification-id - The ID of the modification request. // - // status - The status of the Reserved Instances modification request (processing + // status - The status of the Reserved Instances modification request (processing // | fulfilled | failed). // - // status-message - The reason for the status. + // status-message - The reason for the status. // - // update-date - The time when the modification request was last updated. + // update-date - The time when the modification request was last updated. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The token to retrieve the next page of results. @@ -14884,22 +19640,22 @@ type DescribeReservedInstancesOfferingsInput struct { // One or more filters. // - // availability-zone - The Availability Zone where the Reserved Instance + // availability-zone - The Availability Zone where the Reserved Instance // can be used. // - // duration - The duration of the Reserved Instance (for example, one year + // duration - The duration of the Reserved Instance (for example, one year // or three years), in seconds (31536000 | 94608000). // - // fixed-price - The purchase price of the Reserved Instance (for example, + // fixed-price - The purchase price of the Reserved Instance (for example, // 9800.0). // - // instance-type - The instance type that is covered by the reservation. + // instance-type - The instance type that is covered by the reservation. // - // marketplace - Set to true to show only Reserved Instance Marketplace offerings. - // When this filter is not used, which is the default behavior, all offerings - // from both AWS and the Reserved Instance Marketplace are listed. + // marketplace - Set to true to show only Reserved Instance Marketplace + // offerings. When this filter is not used, which is the default behavior, all + // offerings from both AWS and the Reserved Instance Marketplace are listed. // - // product-description - The Reserved Instance product platform description. + // product-description - The Reserved Instance product platform description. // Instances that include (Amazon VPC) in the product platform description will // only be displayed to EC2-Classic account holders and are for use with Amazon // VPC. (Linux/UNIX | Linux/UNIX (Amazon VPC) | SUSE Linux | SUSE Linux (Amazon @@ -14909,9 +19665,9 @@ type DescribeReservedInstancesOfferingsInput struct { // with SQL Server Web (Amazon VPC) | Windows with SQL Server Enterprise | Windows // with SQL Server Enterprise (Amazon VPC)) // - // reserved-instances-offering-id - The Reserved Instances offering ID. + // reserved-instances-offering-id - The Reserved Instances offering ID. // - // usage-price - The usage price of the Reserved Instance, per hour (for + // usage-price - The usage price of the Reserved Instance, per hour (for // example, 0.84). Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -15030,59 +19786,59 @@ type DescribeRouteTablesInput struct { // One or more filters. // - // association.route-table-association-id - The ID of an association ID for - // the route table. + // association.route-table-association-id - The ID of an association ID + // for the route table. // - // association.route-table-id - The ID of the route table involved in the + // association.route-table-id - The ID of the route table involved in the // association. // - // association.subnet-id - The ID of the subnet involved in the association. + // association.subnet-id - The ID of the subnet involved in the association. // - // association.main - Indicates whether the route table is the main route + // association.main - Indicates whether the route table is the main route // table for the VPC (true | false). // - // route-table-id - The ID of the route table. + // route-table-id - The ID of the route table. // - // route.destination-cidr-block - The CIDR range specified in a route in + // route.destination-cidr-block - The CIDR range specified in a route in // the table. // - // route.destination-prefix-list-id - The ID (prefix) of the AWS service + // route.destination-prefix-list-id - The ID (prefix) of the AWS service // specified in a route in the table. // - // route.gateway-id - The ID of a gateway specified in a route in the table. + // route.gateway-id - The ID of a gateway specified in a route in the table. // - // route.instance-id - The ID of an instance specified in a route in the + // route.instance-id - The ID of an instance specified in a route in the // table. // - // route.nat-gateway-id - The ID of a NAT gateway. + // route.nat-gateway-id - The ID of a NAT gateway. // - // route.origin - Describes how the route was created. CreateRouteTable indicates - // that the route was automatically created when the route table was created; - // CreateRoute indicates that the route was manually added to the route table; - // EnableVgwRoutePropagation indicates that the route was propagated by route - // propagation. + // route.origin - Describes how the route was created. CreateRouteTable + // indicates that the route was automatically created when the route table was + // created; CreateRoute indicates that the route was manually added to the route + // table; EnableVgwRoutePropagation indicates that the route was propagated + // by route propagation. // - // route.state - The state of a route in the route table (active | blackhole). + // route.state - The state of a route in the route table (active | blackhole). // The blackhole state indicates that the route's target isn't available (for // example, the specified gateway isn't attached to the VPC, the specified NAT // instance has been terminated, and so on). // - // route.vpc-peering-connection-id - The ID of a VPC peering connection specified - // in a route in the table. + // route.vpc-peering-connection-id - The ID of a VPC peering connection + // specified in a route in the table. // - // tag:key=value - The key/value combination of a tag assigned to the resource. + // tag:key=value - The key/value combination of a tag assigned to the resource. // - // tag-key - The key of a tag assigned to the resource. This filter is independent + // tag-key - The key of a tag assigned to the resource. This filter is independent // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" // and the filter "tag-value=X", you get any resources assigned both the tag // key Purpose (regardless of what the tag's value is), and the tag value X // (regardless of what the tag's key is). If you want to list only resources // where Purpose is X, see the tag:key=value filter. // - // tag-value - The value of a tag assigned to the resource. This filter is - // independent of the tag-key filter. + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. // - // vpc-id - The ID of the VPC for the route table. + // vpc-id - The ID of the VPC for the route table. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // One or more route table IDs. @@ -15131,13 +19887,13 @@ type DescribeScheduledInstanceAvailabilityInput struct { // One or more filters. // - // availability-zone - The Availability Zone (for example, us-west-2a). + // availability-zone - The Availability Zone (for example, us-west-2a). // - // instance-type - The instance type (for example, c4.large). + // instance-type - The instance type (for example, c4.large). // - // network-platform - The network platform (EC2-Classic or EC2-VPC). + // network-platform - The network platform (EC2-Classic or EC2-VPC). // - // platform - The platform (Linux/UNIX or Windows). + // platform - The platform (Linux/UNIX or Windows). Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The time period for the first schedule to start. @@ -15230,13 +19986,13 @@ type DescribeScheduledInstancesInput struct { // One or more filters. // - // availability-zone - The Availability Zone (for example, us-west-2a). + // availability-zone - The Availability Zone (for example, us-west-2a). // - // instance-type - The instance type (for example, c4.large). + // instance-type - The instance type (for example, c4.large). // - // network-platform - The network platform (EC2-Classic or EC2-VPC). + // network-platform - The network platform (EC2-Classic or EC2-VPC). // - // platform - The platform (Linux/UNIX or Windows). + // platform - The platform (Linux/UNIX or Windows). Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The maximum number of results to return in a single call. This value can @@ -15286,6 +20042,59 @@ func (s DescribeScheduledInstancesOutput) GoString() string { return s.String() } +type DescribeSecurityGroupReferencesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the operation, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more security group IDs in your account. + GroupId []*string `locationNameList:"item" type:"list" required:"true"` +} + +// String returns the string representation +func (s DescribeSecurityGroupReferencesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSecurityGroupReferencesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeSecurityGroupReferencesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeSecurityGroupReferencesInput"} + if s.GroupId == nil { + invalidParams.Add(request.NewErrParamRequired("GroupId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeSecurityGroupReferencesOutput struct { + _ struct{} `type:"structure"` + + // Information about the VPCs with the referencing security groups. + SecurityGroupReferenceSet []*SecurityGroupReference `locationName:"securityGroupReferenceSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeSecurityGroupReferencesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeSecurityGroupReferencesOutput) GoString() string { + return s.String() +} + // Contains the parameters for DescribeSecurityGroups. type DescribeSecurityGroupsInput struct { _ struct{} `type:"structure"` @@ -15300,42 +20109,42 @@ type DescribeSecurityGroupsInput struct { // security groups for which any combination of rules - not necessarily a single // rule - match all filters. // - // description - The description of the security group. + // description - The description of the security group. // - // egress.ip-permission.prefix-list-id - The ID (prefix) of the AWS service + // egress.ip-permission.prefix-list-id - The ID (prefix) of the AWS service // to which the security group allows access. // - // group-id - The ID of the security group. + // group-id - The ID of the security group. // - // group-name - The name of the security group. + // group-name - The name of the security group. // - // ip-permission.cidr - A CIDR range that has been granted permission. + // ip-permission.cidr - A CIDR range that has been granted permission. // - // ip-permission.from-port - The start of port range for the TCP and UDP + // ip-permission.from-port - The start of port range for the TCP and UDP // protocols, or an ICMP type number. // - // ip-permission.group-id - The ID of a security group that has been granted + // ip-permission.group-id - The ID of a security group that has been granted // permission. // - // ip-permission.group-name - The name of a security group that has been + // ip-permission.group-name - The name of a security group that has been // granted permission. // - // ip-permission.protocol - The IP protocol for the permission (tcp | udp + // ip-permission.protocol - The IP protocol for the permission (tcp | udp // | icmp or a protocol number). // - // ip-permission.to-port - The end of port range for the TCP and UDP protocols, + // ip-permission.to-port - The end of port range for the TCP and UDP protocols, // or an ICMP code. // - // ip-permission.user-id - The ID of an AWS account that has been granted + // ip-permission.user-id - The ID of an AWS account that has been granted // permission. // - // owner-id - The AWS account ID of the owner of the security group. + // owner-id - The AWS account ID of the owner of the security group. // - // tag-key - The key of a tag assigned to the security group. + // tag-key - The key of a tag assigned to the security group. // - // tag-value - The value of a tag assigned to the security group. + // tag-value - The value of a tag assigned to the security group. // - // vpc-id - The ID of the VPC specified when the security group was created. + // vpc-id - The ID of the VPC specified when the security group was created. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // One or more security group IDs. Required for security groups in a nondefault @@ -15460,37 +20269,37 @@ type DescribeSnapshotsInput struct { // One or more filters. // - // description - A description of the snapshot. + // description - A description of the snapshot. // - // owner-alias - The AWS account alias (for example, amazon) that owns the + // owner-alias - The AWS account alias (for example, amazon) that owns the // snapshot. // - // owner-id - The ID of the AWS account that owns the snapshot. + // owner-id - The ID of the AWS account that owns the snapshot. // - // progress - The progress of the snapshot, as a percentage (for example, + // progress - The progress of the snapshot, as a percentage (for example, // 80%). // - // snapshot-id - The snapshot ID. + // snapshot-id - The snapshot ID. // - // start-time - The time stamp when the snapshot was initiated. + // start-time - The time stamp when the snapshot was initiated. // - // status - The status of the snapshot (pending | completed | error). + // status - The status of the snapshot (pending | completed | error). // - // tag:key=value - The key/value combination of a tag assigned to the resource. + // tag:key=value - The key/value combination of a tag assigned to the resource. // - // tag-key - The key of a tag assigned to the resource. This filter is independent + // tag-key - The key of a tag assigned to the resource. This filter is independent // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" // and the filter "tag-value=X", you get any resources assigned both the tag // key Purpose (regardless of what the tag's value is), and the tag value X // (regardless of what the tag's key is). If you want to list only resources // where Purpose is X, see the tag:key=value filter. // - // tag-value - The value of a tag assigned to the resource. This filter is - // independent of the tag-key filter. + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. // - // volume-id - The ID of the volume the snapshot is for. + // volume-id - The ID of the volume the snapshot is for. // - // volume-size - The size of the volume, in GiB. + // volume-size - The size of the volume, in GiB. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The maximum number of snapshot results returned by DescribeSnapshots in paginated @@ -15822,113 +20631,113 @@ type DescribeSpotInstanceRequestsInput struct { // One or more filters. // - // availability-zone-group - The Availability Zone group. + // availability-zone-group - The Availability Zone group. // - // create-time - The time stamp when the Spot instance request was created. + // create-time - The time stamp when the Spot instance request was created. // - // fault-code - The fault code related to the request. + // fault-code - The fault code related to the request. // - // fault-message - The fault message related to the request. + // fault-message - The fault message related to the request. // - // instance-id - The ID of the instance that fulfilled the request. + // instance-id - The ID of the instance that fulfilled the request. // - // launch-group - The Spot instance launch group. + // launch-group - The Spot instance launch group. // - // launch.block-device-mapping.delete-on-termination - Indicates whether + // launch.block-device-mapping.delete-on-termination - Indicates whether // the Amazon EBS volume is deleted on instance termination. // - // launch.block-device-mapping.device-name - The device name for the Amazon + // launch.block-device-mapping.device-name - The device name for the Amazon // EBS volume (for example, /dev/sdh). // - // launch.block-device-mapping.snapshot-id - The ID of the snapshot used + // launch.block-device-mapping.snapshot-id - The ID of the snapshot used // for the Amazon EBS volume. // - // launch.block-device-mapping.volume-size - The size of the Amazon EBS volume, - // in GiB. + // launch.block-device-mapping.volume-size - The size of the Amazon EBS + // volume, in GiB. // - // launch.block-device-mapping.volume-type - The type of the Amazon EBS volume: - // gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, st1 for Throughput - // Optimized HDD, sc1for Cold HDD, or standard for Magnetic. + // launch.block-device-mapping.volume-type - The type of the Amazon EBS + // volume: gp2 for General Purpose SSD, io1 for Provisioned IOPS SSD, st1 for + // Throughput Optimized HDD, sc1for Cold HDD, or standard for Magnetic. // - // launch.group-id - The security group for the instance. + // launch.group-id - The security group for the instance. // - // launch.image-id - The ID of the AMI. + // launch.image-id - The ID of the AMI. // - // launch.instance-type - The type of instance (for example, m3.medium). + // launch.instance-type - The type of instance (for example, m3.medium). // - // launch.kernel-id - The kernel ID. + // launch.kernel-id - The kernel ID. // - // launch.key-name - The name of the key pair the instance launched with. + // launch.key-name - The name of the key pair the instance launched with. // - // launch.monitoring-enabled - Whether monitoring is enabled for the Spot + // launch.monitoring-enabled - Whether monitoring is enabled for the Spot // instance. // - // launch.ramdisk-id - The RAM disk ID. + // launch.ramdisk-id - The RAM disk ID. // - // network-interface.network-interface-id - The ID of the network interface. + // network-interface.network-interface-id - The ID of the network interface. // - // network-interface.device-index - The index of the device for the network + // network-interface.device-index - The index of the device for the network // interface attachment on the instance. // - // network-interface.subnet-id - The ID of the subnet for the instance. + // network-interface.subnet-id - The ID of the subnet for the instance. // - // network-interface.description - A description of the network interface. + // network-interface.description - A description of the network interface. // - // network-interface.private-ip-address - The primary private IP address + // network-interface.private-ip-address - The primary private IP address // of the network interface. // - // network-interface.delete-on-termination - Indicates whether the network + // network-interface.delete-on-termination - Indicates whether the network // interface is deleted when the instance is terminated. // - // network-interface.group-id - The ID of the security group associated with - // the network interface. - // - // network-interface.group-name - The name of the security group associated + // network-interface.group-id - The ID of the security group associated // with the network interface. // - // network-interface.addresses.primary - Indicates whether the IP address + // network-interface.group-name - The name of the security group associated + // with the network interface. + // + // network-interface.addresses.primary - Indicates whether the IP address // is the primary private IP address. // - // product-description - The product description associated with the instance + // product-description - The product description associated with the instance // (Linux/UNIX | Windows). // - // spot-instance-request-id - The Spot instance request ID. + // spot-instance-request-id - The Spot instance request ID. // - // spot-price - The maximum hourly price for any Spot instance launched to - // fulfill the request. + // spot-price - The maximum hourly price for any Spot instance launched + // to fulfill the request. // - // state - The state of the Spot instance request (open | active | closed + // state - The state of the Spot instance request (open | active | closed // | cancelled | failed). Spot bid status information can help you track your // Amazon EC2 Spot instance requests. For more information, see Spot Bid Status // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html) // in the Amazon Elastic Compute Cloud User Guide. // - // status-code - The short code describing the most recent evaluation of + // status-code - The short code describing the most recent evaluation of // your Spot instance request. // - // status-message - The message explaining the status of the Spot instance + // status-message - The message explaining the status of the Spot instance // request. // - // tag:key=value - The key/value combination of a tag assigned to the resource. + // tag:key=value - The key/value combination of a tag assigned to the resource. // - // tag-key - The key of a tag assigned to the resource. This filter is independent + // tag-key - The key of a tag assigned to the resource. This filter is independent // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" // and the filter "tag-value=X", you get any resources assigned both the tag // key Purpose (regardless of what the tag's value is), and the tag value X // (regardless of what the tag's key is). If you want to list only resources // where Purpose is X, see the tag:key=value filter. // - // tag-value - The value of a tag assigned to the resource. This filter is - // independent of the tag-key filter. + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. // - // type - The type of Spot instance request (one-time | persistent). + // type - The type of Spot instance request (one-time | persistent). // - // launched-availability-zone - The Availability Zone in which the bid is + // launched-availability-zone - The Availability Zone in which the bid is // launched. // - // valid-from - The start date of the request. + // valid-from - The start date of the request. // - // valid-until - The end date of the request. + // valid-until - The end date of the request. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // One or more Spot instance request IDs. @@ -15982,18 +20791,19 @@ type DescribeSpotPriceHistoryInput struct { // One or more filters. // - // availability-zone - The Availability Zone for which prices should be returned. + // availability-zone - The Availability Zone for which prices should be + // returned. // - // instance-type - The type of instance (for example, m3.medium). + // instance-type - The type of instance (for example, m3.medium). // - // product-description - The product description for the Spot price (Linux/UNIX + // product-description - The product description for the Spot price (Linux/UNIX // | SUSE Linux | Windows | Linux/UNIX (Amazon VPC) | SUSE Linux (Amazon VPC) // | Windows (Amazon VPC)). // - // spot-price - The Spot price. The value must match exactly (or use wildcards; + // spot-price - The Spot price. The value must match exactly (or use wildcards; // greater than or less than comparison is not supported). // - // timestamp - The timestamp of the Spot price history, in UTC format (for + // timestamp - The timestamp of the Spot price history, in UTC format (for // example, YYYY-MM-DDTHH:MM:SSZ). You can use wildcards (* and ?). Greater // than or less than comparison is not supported. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -16049,6 +20859,78 @@ func (s DescribeSpotPriceHistoryOutput) GoString() string { return s.String() } +type DescribeStaleSecurityGroupsInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the operation, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The maximum number of items to return for this request. The request returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `min:"5" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a prior call.) + NextToken *string `min:"1" type:"string"` + + // The ID of the VPC. + VpcId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeStaleSecurityGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStaleSecurityGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeStaleSecurityGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeStaleSecurityGroupsInput"} + if s.MaxResults != nil && *s.MaxResults < 5 { + invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5)) + } + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) + } + if s.VpcId == nil { + invalidParams.Add(request.NewErrParamRequired("VpcId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeStaleSecurityGroupsOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the stale security groups. + StaleSecurityGroupSet []*StaleSecurityGroup `locationName:"staleSecurityGroupSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeStaleSecurityGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeStaleSecurityGroupsOutput) GoString() string { + return s.String() +} + // Contains the parameters for DescribeSubnets. type DescribeSubnetsInput struct { _ struct{} `type:"structure"` @@ -16061,36 +20943,36 @@ type DescribeSubnetsInput struct { // One or more filters. // - // availabilityZone - The Availability Zone for the subnet. You can also + // availabilityZone - The Availability Zone for the subnet. You can also // use availability-zone as the filter name. // - // available-ip-address-count - The number of IP addresses in the subnet + // available-ip-address-count - The number of IP addresses in the subnet // that are available. // - // cidrBlock - The CIDR block of the subnet. The CIDR block you specify must - // exactly match the subnet's CIDR block for information to be returned for - // the subnet. You can also use cidr or cidr-block as the filter names. + // cidrBlock - The CIDR block of the subnet. The CIDR block you specify + // must exactly match the subnet's CIDR block for information to be returned + // for the subnet. You can also use cidr or cidr-block as the filter names. // - // defaultForAz - Indicates whether this is the default subnet for the Availability + // defaultForAz - Indicates whether this is the default subnet for the Availability // Zone. You can also use default-for-az as the filter name. // - // state - The state of the subnet (pending | available). + // state - The state of the subnet (pending | available). // - // subnet-id - The ID of the subnet. + // subnet-id - The ID of the subnet. // - // tag:key=value - The key/value combination of a tag assigned to the resource. + // tag:key=value - The key/value combination of a tag assigned to the resource. // - // tag-key - The key of a tag assigned to the resource. This filter is independent + // tag-key - The key of a tag assigned to the resource. This filter is independent // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" // and the filter "tag-value=X", you get any resources assigned both the tag // key Purpose (regardless of what the tag's value is), and the tag value X // (regardless of what the tag's key is). If you want to list only resources // where Purpose is X, see the tag:key=value filter. // - // tag-value - The value of a tag assigned to the resource. This filter is - // independent of the tag-key filter. + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. // - // vpc-id - The ID of the VPC for the subnet. + // vpc-id - The ID of the VPC for the subnet. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // One or more subnet IDs. @@ -16139,16 +21021,16 @@ type DescribeTagsInput struct { // One or more filters. // - // key - The tag key. + // key - The tag key. // - // resource-id - The resource ID. + // resource-id - The resource ID. // - // resource-type - The resource type (customer-gateway | dhcp-options | image - // | instance | internet-gateway | network-acl | network-interface | reserved-instances + // resource-type - The resource type (customer-gateway | dhcp-options | + // image | instance | internet-gateway | network-acl | network-interface | reserved-instances // | route-table | security-group | snapshot | spot-instances-request | subnet // | volume | vpc | vpn-connection | vpn-gateway). // - // value - The tag value. + // value - The tag value. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // The maximum number of results to return in a single call. This value can @@ -16268,34 +21150,34 @@ type DescribeVolumeStatusInput struct { // One or more filters. // - // action.code - The action code for the event (for example, enable-volume-io). + // action.code - The action code for the event (for example, enable-volume-io). // - // action.description - A description of the action. + // action.description - A description of the action. // - // action.event-id - The event ID associated with the action. + // action.event-id - The event ID associated with the action. // - // availability-zone - The Availability Zone of the instance. + // availability-zone - The Availability Zone of the instance. // - // event.description - A description of the event. + // event.description - A description of the event. // - // event.event-id - The event ID. + // event.event-id - The event ID. // - // event.event-type - The event type (for io-enabled: passed | failed; for + // event.event-type - The event type (for io-enabled: passed | failed; for // io-performance: io-performance:degraded | io-performance:severely-degraded // | io-performance:stalled). // - // event.not-after - The latest end time for the event. + // event.not-after - The latest end time for the event. // - // event.not-before - The earliest start time for the event. + // event.not-before - The earliest start time for the event. // - // volume-status.details-name - The cause for volume-status.status (io-enabled + // volume-status.details-name - The cause for volume-status.status (io-enabled // | io-performance). // - // volume-status.details-status - The status of volume-status.details-name + // volume-status.details-status - The status of volume-status.details-name // (for io-enabled: passed | failed; for io-performance: normal | degraded | // severely-degraded | stalled). // - // volume-status.status - The status of the volume (ok | impaired | warning + // volume-status.status - The status of the volume (ok | impaired | warning // | insufficient-data). Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -16365,48 +21247,48 @@ type DescribeVolumesInput struct { // One or more filters. // - // attachment.attach-time - The time stamp when the attachment initiated. + // attachment.attach-time - The time stamp when the attachment initiated. // - // attachment.delete-on-termination - Whether the volume is deleted on instance + // attachment.delete-on-termination - Whether the volume is deleted on instance // termination. // - // attachment.device - The device name that is exposed to the instance (for + // attachment.device - The device name that is exposed to the instance (for // example, /dev/sda1). // - // attachment.instance-id - The ID of the instance the volume is attached + // attachment.instance-id - The ID of the instance the volume is attached // to. // - // attachment.status - The attachment state (attaching | attached | detaching + // attachment.status - The attachment state (attaching | attached | detaching // | detached). // - // availability-zone - The Availability Zone in which the volume was created. + // availability-zone - The Availability Zone in which the volume was created. // - // create-time - The time stamp when the volume was created. + // create-time - The time stamp when the volume was created. // - // encrypted - The encryption status of the volume. + // encrypted - The encryption status of the volume. // - // size - The size of the volume, in GiB. + // size - The size of the volume, in GiB. // - // snapshot-id - The snapshot from which the volume was created. + // snapshot-id - The snapshot from which the volume was created. // - // status - The status of the volume (creating | available | in-use | deleting + // status - The status of the volume (creating | available | in-use | deleting // | deleted | error). // - // tag:key=value - The key/value combination of a tag assigned to the resource. + // tag:key=value - The key/value combination of a tag assigned to the resource. // - // tag-key - The key of a tag assigned to the resource. This filter is independent + // tag-key - The key of a tag assigned to the resource. This filter is independent // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" // and the filter "tag-value=X", you get any resources assigned both the tag // key Purpose (regardless of what the tag's value is), and the tag value X // (regardless of what the tag's key is). If you want to list only resources // where Purpose is X, see the tag:key=value filter. // - // tag-value - The value of a tag assigned to the resource. This filter is - // independent of the tag-key filter. + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. // - // volume-id - The volume ID. + // volume-id - The volume ID. // - // volume-type - The Amazon EBS volume type. This can be gp2 for General + // volume-type - The Amazon EBS volume type. This can be gp2 for General // Purpose SSD, io1 for Provisioned IOPS SSD, st1 for Throughput Optimized HDD, // sc1 for Cold HDD, or standard for Magnetic volumes. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -16613,20 +21495,20 @@ type DescribeVpcClassicLinkInput struct { // One or more filters. // - // is-classic-link-enabled - Whether the VPC is enabled for ClassicLink (true - // | false). + // is-classic-link-enabled - Whether the VPC is enabled for ClassicLink + // (true | false). // - // tag:key=value - The key/value combination of a tag assigned to the resource. + // tag:key=value - The key/value combination of a tag assigned to the resource. // - // tag-key - The key of a tag assigned to the resource. This filter is independent + // tag-key - The key of a tag assigned to the resource. This filter is independent // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" // and the filter "tag-value=X", you get any resources assigned both the tag // key Purpose (regardless of what the tag's value is), and the tag value X // (regardless of what the tag's key is). If you want to list only resources // where Purpose is X, see the tag:key=value filter. // - // tag-value - The value of a tag assigned to the resource. This filter is - // independent of the tag-key filter. + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // One or more VPCs for which you want to describe the ClassicLink status. @@ -16727,13 +21609,13 @@ type DescribeVpcEndpointsInput struct { // One or more filters. // - // service-name: The name of the AWS service. + // service-name: The name of the AWS service. // - // vpc-id: The ID of the VPC in which the endpoint resides. + // vpc-id: The ID of the VPC in which the endpoint resides. // - // vpc-endpoint-id: The ID of the endpoint. + // vpc-endpoint-id: The ID of the endpoint. // - // vpc-endpoint-state: The state of the endpoint. (pending | available | + // vpc-endpoint-state: The state of the endpoint. (pending | available | // deleting | deleted) Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` @@ -16796,41 +21678,41 @@ type DescribeVpcPeeringConnectionsInput struct { // One or more filters. // - // accepter-vpc-info.cidr-block - The CIDR block of the peer VPC. + // accepter-vpc-info.cidr-block - The CIDR block of the peer VPC. // - // accepter-vpc-info.owner-id - The AWS account ID of the owner of the peer + // accepter-vpc-info.owner-id - The AWS account ID of the owner of the peer // VPC. // - // accepter-vpc-info.vpc-id - The ID of the peer VPC. + // accepter-vpc-info.vpc-id - The ID of the peer VPC. // - // expiration-time - The expiration date and time for the VPC peering connection. + // expiration-time - The expiration date and time for the VPC peering connection. // - // requester-vpc-info.cidr-block - The CIDR block of the requester's VPC. + // requester-vpc-info.cidr-block - The CIDR block of the requester's VPC. // - // requester-vpc-info.owner-id - The AWS account ID of the owner of the requester - // VPC. + // requester-vpc-info.owner-id - The AWS account ID of the owner of the + // requester VPC. // - // requester-vpc-info.vpc-id - The ID of the requester VPC. + // requester-vpc-info.vpc-id - The ID of the requester VPC. // - // status-code - The status of the VPC peering connection (pending-acceptance + // status-code - The status of the VPC peering connection (pending-acceptance // | failed | expired | provisioning | active | deleted | rejected). // - // status-message - A message that provides more information about the status + // status-message - A message that provides more information about the status // of the VPC peering connection, if applicable. // - // tag:key=value - The key/value combination of a tag assigned to the resource. + // tag:key=value - The key/value combination of a tag assigned to the resource. // - // tag-key - The key of a tag assigned to the resource. This filter is independent + // tag-key - The key of a tag assigned to the resource. This filter is independent // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" // and the filter "tag-value=X", you get any resources assigned both the tag // key Purpose (regardless of what the tag's value is), and the tag value X // (regardless of what the tag's key is). If you want to list only resources // where Purpose is X, see the tag:key=value filter. // - // tag-value - The value of a tag assigned to the resource. This filter is - // independent of the tag-key filter. + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. // - // vpc-peering-connection-id - The ID of the VPC peering connection. + // vpc-peering-connection-id - The ID of the VPC peering connection. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // One or more VPC peering connection IDs. @@ -16879,29 +21761,29 @@ type DescribeVpcsInput struct { // One or more filters. // - // cidr - The CIDR block of the VPC. The CIDR block you specify must exactly + // cidr - The CIDR block of the VPC. The CIDR block you specify must exactly // match the VPC's CIDR block for information to be returned for the VPC. Must // contain the slash followed by one or two digits (for example, /28). // - // dhcp-options-id - The ID of a set of DHCP options. + // dhcp-options-id - The ID of a set of DHCP options. // - // isDefault - Indicates whether the VPC is the default VPC. + // isDefault - Indicates whether the VPC is the default VPC. // - // state - The state of the VPC (pending | available). + // state - The state of the VPC (pending | available). // - // tag:key=value - The key/value combination of a tag assigned to the resource. + // tag:key=value - The key/value combination of a tag assigned to the resource. // - // tag-key - The key of a tag assigned to the resource. This filter is independent + // tag-key - The key of a tag assigned to the resource. This filter is independent // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" // and the filter "tag-value=X", you get any resources assigned both the tag // key Purpose (regardless of what the tag's value is), and the tag value X // (regardless of what the tag's key is). If you want to list only resources // where Purpose is X, see the tag:key=value filter. // - // tag-value - The value of a tag assigned to the resource. This filter is - // independent of the tag-key filter. + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. // - // vpc-id - The ID of the VPC. + // vpc-id - The ID of the VPC. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // One or more VPC IDs. @@ -16950,44 +21832,44 @@ type DescribeVpnConnectionsInput struct { // One or more filters. // - // customer-gateway-configuration - The configuration information for the + // customer-gateway-configuration - The configuration information for the // customer gateway. // - // customer-gateway-id - The ID of a customer gateway associated with the + // customer-gateway-id - The ID of a customer gateway associated with the // VPN connection. // - // state - The state of the VPN connection (pending | available | deleting + // state - The state of the VPN connection (pending | available | deleting // | deleted). // - // option.static-routes-only - Indicates whether the connection has static + // option.static-routes-only - Indicates whether the connection has static // routes only. Used for devices that do not support Border Gateway Protocol // (BGP). // - // route.destination-cidr-block - The destination CIDR block. This corresponds + // route.destination-cidr-block - The destination CIDR block. This corresponds // to the subnet used in a customer data center. // - // bgp-asn - The BGP Autonomous System Number (ASN) associated with a BGP + // bgp-asn - The BGP Autonomous System Number (ASN) associated with a BGP // device. // - // tag:key=value - The key/value combination of a tag assigned to the resource. + // tag:key=value - The key/value combination of a tag assigned to the resource. // - // tag-key - The key of a tag assigned to the resource. This filter is independent + // tag-key - The key of a tag assigned to the resource. This filter is independent // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" // and the filter "tag-value=X", you get any resources assigned both the tag // key Purpose (regardless of what the tag's value is), and the tag value X // (regardless of what the tag's key is). If you want to list only resources // where Purpose is X, see the tag:key=value filter. // - // tag-value - The value of a tag assigned to the resource. This filter is - // independent of the tag-key filter. + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. // - // type - The type of VPN connection. Currently the only supported type is - // ipsec.1. + // type - The type of VPN connection. Currently the only supported type + // is ipsec.1. // - // vpn-connection-id - The ID of the VPN connection. + // vpn-connection-id - The ID of the VPN connection. // - // vpn-gateway-id - The ID of a virtual private gateway associated with the - // VPN connection. + // vpn-gateway-id - The ID of a virtual private gateway associated with + // the VPN connection. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // One or more VPN connection IDs. @@ -17036,33 +21918,33 @@ type DescribeVpnGatewaysInput struct { // One or more filters. // - // attachment.state - The current state of the attachment between the gateway + // attachment.state - The current state of the attachment between the gateway // and the VPC (attaching | attached | detaching | detached). // - // attachment.vpc-id - The ID of an attached VPC. + // attachment.vpc-id - The ID of an attached VPC. // - // availability-zone - The Availability Zone for the virtual private gateway + // availability-zone - The Availability Zone for the virtual private gateway // (if applicable). // - // state - The state of the virtual private gateway (pending | available + // state - The state of the virtual private gateway (pending | available // | deleting | deleted). // - // tag:key=value - The key/value combination of a tag assigned to the resource. + // tag:key=value - The key/value combination of a tag assigned to the resource. // - // tag-key - The key of a tag assigned to the resource. This filter is independent + // tag-key - The key of a tag assigned to the resource. This filter is independent // of the tag-value filter. For example, if you use both the filter "tag-key=Purpose" // and the filter "tag-value=X", you get any resources assigned both the tag // key Purpose (regardless of what the tag's value is), and the tag value X // (regardless of what the tag's key is). If you want to list only resources // where Purpose is X, see the tag:key=value filter. // - // tag-value - The value of a tag assigned to the resource. This filter is - // independent of the tag-key filter. + // tag-value - The value of a tag assigned to the resource. This filter + // is independent of the tag-key filter. // - // type - The type of virtual private gateway. Currently the only supported + // type - The type of virtual private gateway. Currently the only supported // type is ipsec.1. // - // vpn-gateway-id - The ID of the virtual private gateway. + // vpn-gateway-id - The ID of the virtual private gateway. Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // One or more virtual private gateway IDs. @@ -17824,8 +22706,8 @@ type EbsBlockDevice struct { // For io1, this represents the number of IOPS that are provisioned for the // volume. For gp2, this represents the baseline performance of the volume and // the rate at which the volume accumulates I/O credits for bursting. For more - // information on General Purpose SSD baseline performance, I/O credits, and - // bursting, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // information about General Purpose SSD baseline performance, I/O credits, + // and bursting, see Amazon EBS Volume Types (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) // in the Amazon Elastic Compute Cloud User Guide. // // Constraint: Range is 100-20000 IOPS for io1 volumes and 100-10000 IOPS for @@ -18119,54 +23001,54 @@ type EventInformation struct { // // The following are the error events. // - // iamFleetRoleInvalid - The Spot fleet did not have the required permissions + // iamFleetRoleInvalid - The Spot fleet did not have the required permissions // either to launch or terminate an instance. // - // launchSpecTemporarilyBlacklisted - The configuration is not valid and + // launchSpecTemporarilyBlacklisted - The configuration is not valid and // several attempts to launch instances have failed. For more information, see // the description of the event. // - // spotFleetRequestConfigurationInvalid - The configuration is not valid. + // spotFleetRequestConfigurationInvalid - The configuration is not valid. // For more information, see the description of the event. // - // spotInstanceCountLimitExceeded - You've reached the limit on the number + // spotInstanceCountLimitExceeded - You've reached the limit on the number // of Spot instances that you can launch. // // The following are the fleetRequestChange events. // - // active - The Spot fleet has been validated and Amazon EC2 is attempting + // active - The Spot fleet has been validated and Amazon EC2 is attempting // to maintain the target number of running Spot instances. // - // cancelled - The Spot fleet is canceled and has no running Spot instances. + // cancelled - The Spot fleet is canceled and has no running Spot instances. // The Spot fleet will be deleted two days after its instances were terminated. // - // cancelled_running - The Spot fleet is canceled and will not launch additional + // cancelled_running - The Spot fleet is canceled and will not launch additional // Spot instances, but its existing Spot instances continue to run until they // are interrupted or terminated. // - // cancelled_terminating - The Spot fleet is canceled and its Spot instances + // cancelled_terminating - The Spot fleet is canceled and its Spot instances // are terminating. // - // expired - The Spot fleet request has expired. A subsequent event indicates + // expired - The Spot fleet request has expired. A subsequent event indicates // that the instances were terminated, if the request was created with TerminateInstancesWithExpiration // set. // - // modify_in_progress - A request to modify the Spot fleet request was accepted + // modify_in_progress - A request to modify the Spot fleet request was accepted // and is in progress. // - // modify_successful - The Spot fleet request was modified. + // modify_successful - The Spot fleet request was modified. // - // price_update - The bid price for a launch configuration was adjusted because - // it was too high. This change is permanent. + // price_update - The bid price for a launch configuration was adjusted + // because it was too high. This change is permanent. // - // submitted - The Spot fleet request is being evaluated and Amazon EC2 is - // preparing to launch the target number of Spot instances. + // submitted - The Spot fleet request is being evaluated and Amazon EC2 + // is preparing to launch the target number of Spot instances. // // The following are the instanceChange events. // - // launched - A bid was fulfilled and a new instance was launched. + // launched - A bid was fulfilled and a new instance was launched. // - // terminated - An instance was terminated by the user. + // terminated - An instance was terminated by the user. EventSubType *string `locationName:"eventSubType" type:"string"` // The ID of the instance. This information is available only for instanceChange @@ -18390,8 +23272,8 @@ type GetConsoleOutputOutput struct { // The ID of the instance. InstanceId *string `locationName:"instanceId" type:"string"` - // The console output, Base64 encoded. If using a command line tool, the tools - // decode the output for you. + // The console output, Base64-encoded. If using a command line tool, the tool + // decodes the output for you. Output *string `locationName:"output" type:"string"` // The time the output was last updated. @@ -18408,6 +23290,68 @@ func (s GetConsoleOutputOutput) GoString() string { return s.String() } +// Contains the parameters for the request. +type GetConsoleScreenshotInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The ID of the instance. + InstanceId *string `type:"string" required:"true"` + + // When set to true, acts as keystroke input and wakes up an instance that's + // in standby or "sleep" mode. + WakeUp *bool `type:"boolean"` +} + +// String returns the string representation +func (s GetConsoleScreenshotInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetConsoleScreenshotInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetConsoleScreenshotInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetConsoleScreenshotInput"} + if s.InstanceId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of the request. +type GetConsoleScreenshotOutput struct { + _ struct{} `type:"structure"` + + // The data that comprises the image. + ImageData *string `locationName:"imageData" type:"string"` + + // The ID of the instance. + InstanceId *string `locationName:"instanceId" type:"string"` +} + +// String returns the string representation +func (s GetConsoleScreenshotOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetConsoleScreenshotOutput) GoString() string { + return s.String() +} + // Contains the parameters for GetPasswordData. type GetPasswordDataInput struct { _ struct{} `type:"structure"` @@ -18499,12 +23443,12 @@ type HistoryRecord struct { // The event type. // - // error - Indicates an error with the Spot fleet request. + // error - Indicates an error with the Spot fleet request. // - // fleetRequestChange - Indicates a change in the status or configuration + // fleetRequestChange - Indicates a change in the status or configuration // of the Spot fleet request. // - // instanceChange - Indicates that an instance was launched or terminated. + // instanceChange - Indicates that an instance was launched or terminated. EventType *string `locationName:"eventType" type:"string" required:"true" enum:"EventType"` // The date and time of the event, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). @@ -18719,6 +23663,9 @@ type Image struct { // The description of the AMI that was provided during image creation. Description *string `locationName:"description" type:"string"` + // Specifies whether enhanced networking with ENA is enabled. + EnaSupport *bool `locationName:"enaSupport" type:"boolean"` + // The hypervisor type of the image. Hypervisor *string `locationName:"hypervisor" type:"string" enum:"HypervisorType"` @@ -18767,7 +23714,8 @@ type Image struct { // an instance store volume. RootDeviceType *string `locationName:"rootDeviceType" type:"string" enum:"DeviceType"` - // Specifies whether enhanced networking is enabled. + // Specifies whether enhanced networking with the Intel 82599 Virtual Function + // interface is enabled. SriovNetSupport *string `locationName:"sriovNetSupport" type:"string"` // The current state of the AMI. If the state is available, the image is successfully @@ -18864,7 +23812,7 @@ type ImportImageInput struct { // The license type to be used for the Amazon Machine Image (AMI) after importing. // - // Note: You may only use BYOL if you have existing licenses with rights to + // Note: You may only use BYOL if you have existing licenses with rights to // use these licenses in a third party cloud like AWS. For more information, // see VM Import/Export Prerequisites (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/VMImportPrerequisites.html) // in the Amazon Elastic Compute Cloud User Guide. @@ -19084,7 +24032,9 @@ type ImportInstanceLaunchSpecification struct { // [EC2-VPC] The ID of the subnet in which to launch the instance. SubnetId *string `locationName:"subnetId" type:"string"` - // The Base64-encoded MIME user data to be made available to the instance. + // The user data to make available to the instance. If you are using an AWS + // SDK or command line tool, Base64-encoding is performed for you, and you can + // load the text from a file. Otherwise, you must provide Base64-encoded text. UserData *UserData `locationName:"userData" type:"structure"` } @@ -19464,6 +24414,9 @@ type Instance struct { // Optimized instance. EbsOptimized *bool `locationName:"ebsOptimized" type:"boolean"` + // Specifies whether enhanced networking with ENA is enabled. + EnaSupport *bool `locationName:"enaSupport" type:"boolean"` + // The hypervisor type of the instance. Hypervisor *string `locationName:"hypervisor" type:"string" enum:"HypervisorType"` @@ -19548,7 +24501,8 @@ type Instance struct { // If the request is a Spot instance request, the ID of the request. SpotInstanceRequestId *string `locationName:"spotInstanceRequestId" type:"string"` - // Specifies whether enhanced networking is enabled. + // Specifies whether enhanced networking with the Intel 82599 Virtual Function + // interface is enabled. SriovNetSupport *string `locationName:"sriovNetSupport" type:"string"` // The current state of the instance. @@ -19947,17 +24901,17 @@ type InstanceState struct { // The low byte represents the state. The high byte is an opaque internal value // and should be ignored. // - // 0 : pending + // 0 : pending // - // 16 : running + // 16 : running // - // 32 : shutting-down + // 32 : shutting-down // - // 48 : terminated + // 48 : terminated // - // 64 : stopping + // 64 : stopping // - // 80 : stopped + // 80 : stopped Code *int64 `locationName:"code" type:"integer"` // The current state of the instance. @@ -20342,7 +25296,9 @@ type LaunchSpecification struct { // The ID of the subnet in which to launch the instance. SubnetId *string `locationName:"subnetId" type:"string"` - // The Base64-encoded MIME user data to make available to the instances. + // The user data to make available to the instances. If you are using an AWS + // SDK or command line tool, Base64-encoding is performed for you, and you can + // load the text from a file. Otherwise, you must provide Base64-encoded text. UserData *string `locationName:"userData" type:"string"` } @@ -20466,6 +25422,64 @@ func (s ModifyIdFormatOutput) GoString() string { return s.String() } +// Contains the parameters of ModifyIdentityIdFormat. +type ModifyIdentityIdFormatInput struct { + _ struct{} `type:"structure"` + + // The ARN of the principal, which can be an IAM user, IAM role, or the root + // user. + PrincipalArn *string `locationName:"principalArn" type:"string" required:"true"` + + // The type of resource. + Resource *string `locationName:"resource" type:"string" required:"true"` + + // Indicates whether the resource should use longer IDs (17-character IDs) + UseLongIds *bool `locationName:"useLongIds" type:"boolean" required:"true"` +} + +// String returns the string representation +func (s ModifyIdentityIdFormatInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyIdentityIdFormatInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyIdentityIdFormatInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyIdentityIdFormatInput"} + if s.PrincipalArn == nil { + invalidParams.Add(request.NewErrParamRequired("PrincipalArn")) + } + if s.Resource == nil { + invalidParams.Add(request.NewErrParamRequired("Resource")) + } + if s.UseLongIds == nil { + invalidParams.Add(request.NewErrParamRequired("UseLongIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyIdentityIdFormatOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ModifyIdentityIdFormatOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyIdentityIdFormatOutput) GoString() string { + return s.String() +} + // Contains the parameters for ModifyImageAttribute. type ModifyImageAttributeInput struct { _ struct{} `type:"structure"` @@ -20581,6 +25595,12 @@ type ModifyInstanceAttributeInput struct { // Optimized instance. EbsOptimized *AttributeBooleanValue `locationName:"ebsOptimized" type:"structure"` + // Set to true to enable enhanced networking with ENA for the instance. + // + // This option is supported only for HVM instances. Specifying this option + // with a PV instance can make it unreachable. + EnaSupport *AttributeBooleanValue `locationName:"enaSupport" type:"structure"` + // [EC2-VPC] Changes the security groups of the instance. You must specify at // least one security group, even if it's just the default security group for // the VPC. You must specify the security group ID, not the security group name. @@ -20613,16 +25633,20 @@ type ModifyInstanceAttributeInput struct { // value must be false for a NAT instance to perform NAT. SourceDestCheck *AttributeBooleanValue `type:"structure"` - // Set to simple to enable enhanced networking for the instance. + // Set to simple to enable enhanced networking with the Intel 82599 Virtual + // Function interface for the instance. // - // There is no way to disable enhanced networking at this time. + // There is no way to disable enhanced networking with the Intel 82599 Virtual + // Function interface at this time. // // This option is supported only for HVM instances. Specifying this option // with a PV instance can make it unreachable. SriovNetSupport *AttributeValue `locationName:"sriovNetSupport" type:"structure"` - // Changes the instance's user data to the specified base64-encoded value. For - // command line tools, base64 encoding is performed for you. + // Changes the instance's user data to the specified value. If you are using + // an AWS SDK or command line tool, Base64-encoding is performed for you, and + // you can load the text from a file. Otherwise, you must provide Base64-encoded + // text. UserData *BlobAttributeValue `locationName:"userData" type:"structure"` // A new value for the attribute. Use only with the kernel, ramdisk, userData, @@ -21458,21 +26482,22 @@ type NatGateway struct { // If the NAT gateway could not be created, specifies the error message for // the failure, that corresponds to the error code. // - // For InsufficientFreeAddressesInSubnet: "Subnet has insufficient free addresses + // For InsufficientFreeAddressesInSubnet: "Subnet has insufficient free addresses // to create this NAT gateway" // - // For Gateway.NotAttached: "Network vpc-xxxxxxxx has no Internet gateway attached" + // For Gateway.NotAttached: "Network vpc-xxxxxxxx has no Internet gateway + // attached" // - // For InvalidAllocationID.NotFound: "Elastic IP address eipalloc-xxxxxxxx + // For InvalidAllocationID.NotFound: "Elastic IP address eipalloc-xxxxxxxx // could not be associated with this NAT gateway" // - // For Resource.AlreadyAssociated: "Elastic IP address eipalloc-xxxxxxxx is - // already associated" + // For Resource.AlreadyAssociated: "Elastic IP address eipalloc-xxxxxxxx + // is already associated" // - // For InternalError: "Network interface eni-xxxxxxxx, created and used internally + // For InternalError: "Network interface eni-xxxxxxxx, created and used internally // by this NAT gateway is in an invalid state. Please try again." // - // For InvalidSubnetID.NotFound: "The specified subnet subnet-xxxxxxxx does + // For InvalidSubnetID.NotFound: "The specified subnet subnet-xxxxxxxx does // not exist or could not be found." FailureMessage *string `locationName:"failureMessage" type:"string"` @@ -21483,7 +26508,28 @@ type NatGateway struct { // The ID of the NAT gateway. NatGatewayId *string `locationName:"natGatewayId" type:"string"` + // Reserved. If you need to sustain traffic greater than the documented limits + // (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html), + // contact us through the Support Center (https://console.aws.amazon.com/support/home?). + ProvisionedBandwidth *ProvisionedBandwidth `locationName:"provisionedBandwidth" type:"structure"` + // The state of the NAT gateway. + // + // pending: The NAT gateway is being created and is not ready to process + // traffic. + // + // failed: The NAT gateway could not be created. Check the failureCode and + // failureMessage fields for the reason. + // + // available: The NAT gateway is able to process traffic. This status remains + // until you delete the NAT gateway, and does not indicate the health of the + // NAT gateway. + // + // deleting: The NAT gateway is in the process of being terminated and may + // still be processing traffic. + // + // deleted: The NAT gateway has been terminated and is no longer processing + // traffic. State *string `locationName:"state" type:"string" enum:"NatGatewayState"` // The ID of the subnet in which the NAT gateway is located. @@ -22176,6 +27222,48 @@ func (s PropagatingVgw) GoString() string { return s.String() } +// Reserved. If you need to sustain traffic greater than the documented limits +// (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html), +// contact us through the Support Center (https://console.aws.amazon.com/support/home?). +type ProvisionedBandwidth struct { + _ struct{} `type:"structure"` + + // Reserved. If you need to sustain traffic greater than the documented limits + // (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html), + // contact us through the Support Center (https://console.aws.amazon.com/support/home?). + ProvisionTime *time.Time `locationName:"provisionTime" type:"timestamp" timestampFormat:"iso8601"` + + // Reserved. If you need to sustain traffic greater than the documented limits + // (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html), + // contact us through the Support Center (https://console.aws.amazon.com/support/home?). + Provisioned *string `locationName:"provisioned" type:"string"` + + // Reserved. If you need to sustain traffic greater than the documented limits + // (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html), + // contact us through the Support Center (https://console.aws.amazon.com/support/home?). + RequestTime *time.Time `locationName:"requestTime" type:"timestamp" timestampFormat:"iso8601"` + + // Reserved. If you need to sustain traffic greater than the documented limits + // (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html), + // contact us through the Support Center (https://console.aws.amazon.com/support/home?). + Requested *string `locationName:"requested" type:"string"` + + // Reserved. If you need to sustain traffic greater than the documented limits + // (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html), + // contact us through the Support Center (https://console.aws.amazon.com/support/home?). + Status *string `locationName:"status" type:"string"` +} + +// String returns the string representation +func (s ProvisionedBandwidth) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ProvisionedBandwidth) GoString() string { + return s.String() +} + // Describes a request to purchase Scheduled Instances. type PurchaseRequest struct { _ struct{} `type:"structure"` @@ -22466,6 +27554,13 @@ type RegisterImageInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` + // Set to true to enable enhanced networking with ENA for the AMI and any instances + // that you launch from the AMI. + // + // This option is supported only for HVM AMIs. Specifying this option with + // a PV AMI can make instances launched from the AMI unreachable. + EnaSupport *bool `locationName:"enaSupport" type:"boolean"` + // The full path to your AMI manifest in Amazon S3 storage. ImageLocation *string `type:"string"` @@ -22485,10 +27580,11 @@ type RegisterImageInput struct { // The name of the root device (for example, /dev/sda1, or /dev/xvda). RootDeviceName *string `locationName:"rootDeviceName" type:"string"` - // Set to simple to enable enhanced networking for the AMI and any instances - // that you launch from the AMI. + // Set to simple to enable enhanced networking with the Intel 82599 Virtual + // Function interface for the AMI and any instances that you launch from the + // AMI. // - // There is no way to disable enhanced networking at this time. + // There is no way to disable sriovNetSupport at this time. // // This option is supported only for HVM AMIs. Specifying this option with // a PV AMI can make instances launched from the AMI unreachable. @@ -22998,26 +28094,26 @@ type ReportInstanceStatusInput struct { // One or more reason codes that describes the health state of your instance. // - // instance-stuck-in-state: My instance is stuck in a state. + // instance-stuck-in-state: My instance is stuck in a state. // - // unresponsive: My instance is unresponsive. + // unresponsive: My instance is unresponsive. // - // not-accepting-credentials: My instance is not accepting my credentials. + // not-accepting-credentials: My instance is not accepting my credentials. // - // password-not-available: A password is not available for my instance. + // password-not-available: A password is not available for my instance. // - // performance-network: My instance is experiencing performance problems + // performance-network: My instance is experiencing performance problems // which I believe are network related. // - // performance-instance-store: My instance is experiencing performance problems + // performance-instance-store: My instance is experiencing performance problems // which I believe are related to the instance stores. // - // performance-ebs-volume: My instance is experiencing performance problems + // performance-ebs-volume: My instance is experiencing performance problems // which I believe are related to an EBS volume. // - // performance-other: My instance is experiencing performance problems. + // performance-other: My instance is experiencing performance problems. // - // other: [explain using the description parameter] + // other: [explain using the description parameter] ReasonCodes []*string `locationName:"reasonCode" locationNameList:"item" type:"list" required:"true"` // The time at which the reported instance health state began. @@ -23322,7 +28418,9 @@ type RequestSpotLaunchSpecification struct { // The ID of the subnet in which to launch the instance. SubnetId *string `locationName:"subnetId" type:"string"` - // The Base64-encoded MIME user data to make available to the instances. + // The user data to make available to the instances. If you are using an AWS + // SDK or command line tool, Base64-encoding is performed for you, and you can + // load the text from a file. Otherwise, you must provide Base64-encoded text. UserData *string `locationName:"userData" type:"string"` } @@ -23760,6 +28858,9 @@ type ResetInstanceAttributeInput struct { _ struct{} `type:"structure"` // The attribute to reset. + // + // You can only reset the following attributes: kernel | ramdisk | sourceDestCheck. + // To change an instance attribute, use ModifyInstanceAttribute. Attribute *string `locationName:"attribute" type:"string" required:"true" enum:"InstanceAttributeName"` // Checks whether you have the required permissions for the action, without @@ -24168,12 +29269,12 @@ type Route struct { // Describes how the route was created. // - // CreateRouteTable - The route was automatically created when the route table - // was created. + // CreateRouteTable - The route was automatically created when the route + // table was created. // - // CreateRoute - The route was manually added to the route table. + // CreateRoute - The route was manually added to the route table. // - // EnableVgwRoutePropagation - The route was propagated by route propagation. + // EnableVgwRoutePropagation - The route was propagated by route propagation. Origin *string `locationName:"origin" type:"string" enum:"RouteOrigin"` // The state of the route. The blackhole state indicates that the route's target @@ -24389,13 +29490,12 @@ type RunInstancesInput struct { // [EC2-VPC] The ID of the subnet to launch the instance into. SubnetId *string `type:"string"` - // Data to configure the instance, or a script to run during instance launch. - // For more information, see Running Commands on Your Linux Instance at Launch - // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) (Linux) - // and Adding User Data (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-instance-metadata.html#instancedata-add-user-data) - // (Windows). For API calls, the text must be base64-encoded. For command line - // tools, the encoding is performed for you, and you can load the text from - // a file. + // The user data to make available to the instance. For more information, see + // Running Commands on Your Linux Instance at Launch (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) + // (Linux) and Adding User Data (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-instance-metadata.html#instancedata-add-user-data) + // (Windows). If you are using an AWS SDK or command line tool, Base64-encoding + // is performed for you, and you can load the text from a file. Otherwise, you + // must provide Base64-encoded text. UserData *string `type:"string"` } @@ -24567,14 +29667,13 @@ type S3Storage struct { // The beginning of the file name of the AMI. Prefix *string `locationName:"prefix" type:"string"` - // A base64-encoded Amazon S3 upload policy that gives Amazon EC2 permission - // to upload items into Amazon S3 on your behalf. For command line tools, base64 - // encoding is performed for you. + // An Amazon S3 upload policy that gives Amazon EC2 permission to upload items + // into Amazon S3 on your behalf. // // UploadPolicy is automatically base64 encoded/decoded by the SDK. UploadPolicy []byte `locationName:"uploadPolicy" type:"blob"` - // The signature of the Base64 encoded JSON document. + // The signature of the JSON document. UploadPolicySignature *string `locationName:"uploadPolicySignature" type:"string"` } @@ -25114,6 +30213,30 @@ func (s SecurityGroup) GoString() string { return s.String() } +// Describes a VPC with a security group that references your security group. +type SecurityGroupReference struct { + _ struct{} `type:"structure"` + + // The ID of your security group. + GroupId *string `locationName:"groupId" type:"string" required:"true"` + + // The ID of the VPC with the referencing security group. + ReferencingVpcId *string `locationName:"referencingVpcId" type:"string" required:"true"` + + // The ID of the VPC peering connection. + VpcPeeringConnectionId *string `locationName:"vpcPeeringConnectionId" type:"string"` +} + +// String returns the string representation +func (s SecurityGroupReference) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SecurityGroupReference) GoString() string { + return s.String() +} + // Describes the time period for a Scheduled Instance to start its first schedule. // The time period must span less than one day. type SlotDateTimeRangeRequest struct { @@ -25453,7 +30576,9 @@ type SpotFleetLaunchSpecification struct { // subnets, separate them using commas; for example, "subnet-a61dafcf, subnet-65ea5f08". SubnetId *string `locationName:"subnetId" type:"string"` - // The Base64-encoded MIME user data to make available to the instances. + // The user data to make available to the instances. If you are using an AWS + // SDK or command line tool, Base64-encoding is performed for you, and you can + // load the text from a file. Otherwise, you must provide Base64-encoded text. UserData *string `locationName:"userData" type:"string"` // The number of units provided by the specified instance type. These are the @@ -25561,6 +30686,10 @@ type SpotFleetRequestConfigData struct { // the Spot fleet. ExcessCapacityTerminationPolicy *string `locationName:"excessCapacityTerminationPolicy" type:"string" enum:"ExcessCapacityTerminationPolicy"` + // The number of units fulfilled by this request compared to the set target + // capacity. + FulfilledCapacity *float64 `locationName:"fulfilledCapacity" type:"double"` + // Grants the Spot fleet permission to terminate Spot instances on your behalf // when you cancel its Spot fleet request using CancelSpotFleetRequests or when // the Spot fleet request expires, if you set terminateInstancesWithExpiration. @@ -25581,6 +30710,16 @@ type SpotFleetRequestConfigData struct { // fleet request expires. TerminateInstancesWithExpiration *bool `locationName:"terminateInstancesWithExpiration" type:"boolean"` + // The type of request. Indicates whether the fleet will only request the target + // capacity or also attempt to maintain it. When you request a certain target + // capacity, the fleet will only place the required bids. It will not attempt + // to replenish Spot instances if capacity is diminished, nor will it submit + // bids in alternative Spot pools if capacity is not available. When you want + // to maintain a certain target capacity, fleet will place the required bids + // to meet this target capacity. It will also automatically replenish any interrupted + // instances. Default: maintain. + Type *string `locationName:"type" type:"string" enum:"FleetType"` + // The start date and time of the request, in UTC format (for example, YYYY-MM-DDTHH:MM:SSZ). // The default is to start fulfilling the request immediately. ValidFrom *time.Time `locationName:"validFrom" type:"timestamp" timestampFormat:"iso8601"` @@ -25821,6 +30960,77 @@ func (s SpotPrice) GoString() string { return s.String() } +// Describes a stale rule in a security group. +type StaleIpPermission struct { + _ struct{} `type:"structure"` + + // The start of the port range for the TCP and UDP protocols, or an ICMP type + // number. A value of -1 indicates all ICMP types. + FromPort *int64 `locationName:"fromPort" type:"integer"` + + // The IP protocol name (for tcp, udp, and icmp) or number (see Protocol Numbers) + // (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml). + IpProtocol *string `locationName:"ipProtocol" type:"string"` + + // One or more IP ranges. Not applicable for stale security group rules. + IpRanges []*string `locationName:"ipRanges" locationNameList:"item" type:"list"` + + // One or more prefix list IDs for an AWS service. Not applicable for stale + // security group rules. + PrefixListIds []*string `locationName:"prefixListIds" locationNameList:"item" type:"list"` + + // The end of the port range for the TCP and UDP protocols, or an ICMP type + // number. A value of -1 indicates all ICMP types. + ToPort *int64 `locationName:"toPort" type:"integer"` + + // One or more security group pairs. Returns the ID of the referenced security + // group and VPC, and the ID and status of the VPC peering connection. + UserIdGroupPairs []*UserIdGroupPair `locationName:"groups" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s StaleIpPermission) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StaleIpPermission) GoString() string { + return s.String() +} + +// Describes a stale security group (a security group that contains stale rules). +type StaleSecurityGroup struct { + _ struct{} `type:"structure"` + + // The description of the security group. + Description *string `locationName:"description" type:"string"` + + // The ID of the security group. + GroupId *string `locationName:"groupId" type:"string" required:"true"` + + // The name of the security group. + GroupName *string `locationName:"groupName" type:"string"` + + // Information about the stale inbound rules in the security group. + StaleIpPermissions []*StaleIpPermission `locationName:"staleIpPermissions" locationNameList:"item" type:"list"` + + // Information about the stale outbound rules in the security group. + StaleIpPermissionsEgress []*StaleIpPermission `locationName:"staleIpPermissionsEgress" locationNameList:"item" type:"list"` + + // The ID of the VPC for the security group. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s StaleSecurityGroup) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StaleSecurityGroup) GoString() string { + return s.String() +} + // Contains the parameters for StartInstances. type StartInstancesInput struct { _ struct{} `type:"structure"` @@ -25888,28 +31098,29 @@ type StateReason struct { // The message for the state change. // - // Server.SpotInstanceTermination: A Spot instance was terminated due to an - // increase in the market price. + // Server.SpotInstanceTermination: A Spot instance was terminated due to + // an increase in the market price. // - // Server.InternalError: An internal error occurred during instance launch, + // Server.InternalError: An internal error occurred during instance launch, // resulting in termination. // - // Server.InsufficientInstanceCapacity: There was insufficient instance capacity - // to satisfy the launch request. + // Server.InsufficientInstanceCapacity: There was insufficient instance + // capacity to satisfy the launch request. // - // Client.InternalError: A client error caused the instance to terminate on - // launch. + // Client.InternalError: A client error caused the instance to terminate + // on launch. // - // Client.InstanceInitiatedShutdown: The instance was shut down using the shutdown - // -h command from the instance. + // Client.InstanceInitiatedShutdown: The instance was shut down using the + // shutdown -h command from the instance. // - // Client.UserInitiatedShutdown: The instance was shut down using the Amazon + // Client.UserInitiatedShutdown: The instance was shut down using the Amazon // EC2 API. // - // Client.VolumeLimitExceeded: The limit on the number of EBS volumes or total - // storage was exceeded. Decrease usage or request an increase in your limits. + // Client.VolumeLimitExceeded: The limit on the number of EBS volumes or + // total storage was exceeded. Decrease usage or request an increase in your + // limits. // - // Client.InvalidSnapshot.NotFound: The specified snapshot was not found. + // Client.InvalidSnapshot.NotFound: The specified snapshot was not found. Message *string `locationName:"message" type:"string"` } @@ -26348,11 +31559,13 @@ func (s UserBucketDetails) GoString() string { return s.String() } -// Describes the user data to be made available to an instance. +// Describes the user data for an instance. type UserData struct { _ struct{} `type:"structure"` - // The Base64-encoded MIME user data for the instance. + // The user data. If you are using an AWS SDK or command line tool, Base64-encoding + // is performed for you, and you can load the text from a file. Otherwise, you + // must provide Base64-encoded text. Data *string `locationName:"data" type:"string"` } @@ -26381,7 +31594,8 @@ type UserIdGroupPair struct { // The status of a VPC peering connection, if applicable. PeeringStatus *string `locationName:"peeringStatus" type:"string"` - // The ID of an AWS account. + // The ID of an AWS account. For a referenced security group in another VPC, + // the account ID of the referenced security group is returned. // // [EC2-Classic] Required when adding or removing rules that reference a security // group in another AWS account. @@ -26816,9 +32030,9 @@ func (s VpcEndpoint) GoString() string { type VpcPeeringConnection struct { _ struct{} `type:"structure"` - // Information about the peer VPC. CIDR block information is not returned when - // creating a VPC peering connection, or when describing a VPC peering connection - // that's in the initiating-request or pending-acceptance state. + // Information about the accepter VPC. CIDR block information is not returned + // when creating a VPC peering connection, or when describing a VPC peering + // connection that's in the initiating-request or pending-acceptance state. AccepterVpcInfo *VpcPeeringConnectionVpcInfo `locationName:"accepterVpcInfo" type:"structure"` // The time that an unaccepted VPC peering connection will expire. @@ -27291,6 +32505,13 @@ const ( ExportTaskStateCompleted = "completed" ) +const ( + // @enum FleetType + FleetTypeRequest = "request" + // @enum FleetType + FleetTypeMaintain = "maintain" +) + const ( // @enum FlowLogsResourceType FlowLogsResourceTypeVpc = "VPC" @@ -27389,6 +32610,8 @@ const ( InstanceAttributeNameEbsOptimized = "ebsOptimized" // @enum InstanceAttributeName InstanceAttributeNameSriovNetSupport = "sriovNetSupport" + // @enum InstanceAttributeName + InstanceAttributeNameEnaSupport = "enaSupport" ) const ( @@ -27417,6 +32640,16 @@ const ( // @enum InstanceType InstanceTypeT1Micro = "t1.micro" // @enum InstanceType + InstanceTypeT2Nano = "t2.nano" + // @enum InstanceType + InstanceTypeT2Micro = "t2.micro" + // @enum InstanceType + InstanceTypeT2Small = "t2.small" + // @enum InstanceType + InstanceTypeT2Medium = "t2.medium" + // @enum InstanceType + InstanceTypeT2Large = "t2.large" + // @enum InstanceType InstanceTypeM1Small = "m1.small" // @enum InstanceType InstanceTypeM1Medium = "m1.medium" @@ -27443,16 +32676,6 @@ const ( // @enum InstanceType InstanceTypeM410xlarge = "m4.10xlarge" // @enum InstanceType - InstanceTypeT2Nano = "t2.nano" - // @enum InstanceType - InstanceTypeT2Micro = "t2.micro" - // @enum InstanceType - InstanceTypeT2Small = "t2.small" - // @enum InstanceType - InstanceTypeT2Medium = "t2.medium" - // @enum InstanceType - InstanceTypeT2Large = "t2.large" - // @enum InstanceType InstanceTypeM2Xlarge = "m2.xlarge" // @enum InstanceType InstanceTypeM22xlarge = "m2.2xlarge" @@ -27461,6 +32684,24 @@ const ( // @enum InstanceType InstanceTypeCr18xlarge = "cr1.8xlarge" // @enum InstanceType + InstanceTypeR3Large = "r3.large" + // @enum InstanceType + InstanceTypeR3Xlarge = "r3.xlarge" + // @enum InstanceType + InstanceTypeR32xlarge = "r3.2xlarge" + // @enum InstanceType + InstanceTypeR34xlarge = "r3.4xlarge" + // @enum InstanceType + InstanceTypeR38xlarge = "r3.8xlarge" + // @enum InstanceType + InstanceTypeX14xlarge = "x1.4xlarge" + // @enum InstanceType + InstanceTypeX18xlarge = "x1.8xlarge" + // @enum InstanceType + InstanceTypeX116xlarge = "x1.16xlarge" + // @enum InstanceType + InstanceTypeX132xlarge = "x1.32xlarge" + // @enum InstanceType InstanceTypeI2Xlarge = "i2.xlarge" // @enum InstanceType InstanceTypeI22xlarge = "i2.2xlarge" @@ -27507,16 +32748,6 @@ const ( // @enum InstanceType InstanceTypeCg14xlarge = "cg1.4xlarge" // @enum InstanceType - InstanceTypeR3Large = "r3.large" - // @enum InstanceType - InstanceTypeR3Xlarge = "r3.xlarge" - // @enum InstanceType - InstanceTypeR32xlarge = "r3.2xlarge" - // @enum InstanceType - InstanceTypeR34xlarge = "r3.4xlarge" - // @enum InstanceType - InstanceTypeR38xlarge = "r3.8xlarge" - // @enum InstanceType InstanceTypeD2Xlarge = "d2.xlarge" // @enum InstanceType InstanceTypeD22xlarge = "d2.2xlarge" diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go index 9d5fa8160..4e6fa4cd7 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/ec2query" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // Amazon Elastic Compute Cloud (Amazon EC2) provides resizable computing capacity @@ -54,14 +54,14 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceName: ServiceName, SigningRegion: signingRegion, Endpoint: endpoint, - APIVersion: "2015-10-01", + APIVersion: "2016-04-01", }, handlers, ), } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(ec2query.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(ec2query.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(ec2query.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go index e263b0cef..bee4a057f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/waiters.go @@ -784,6 +784,35 @@ func (c *EC2) WaitUntilVpcAvailable(input *DescribeVpcsInput) error { return w.Wait() } +func (c *EC2) WaitUntilVpcExists(input *DescribeVpcsInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeVpcs", + Delay: 1, + MaxAttempts: 5, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "status", + Argument: "", + Expected: 200, + }, + { + State: "retry", + Matcher: "error", + Argument: "", + Expected: "InvalidVpcID.NotFound", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + func (c *EC2) WaitUntilVpcPeeringConnectionExists(input *DescribeVpcPeeringConnectionsInput) error { waiterCfg := waiter.Config{ Operation: "DescribeVpcPeeringConnections", diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go index 57a875905..aa162c0be 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go @@ -12,7 +12,28 @@ import ( const opBatchCheckLayerAvailability = "BatchCheckLayerAvailability" -// BatchCheckLayerAvailabilityRequest generates a request for the BatchCheckLayerAvailability operation. +// BatchCheckLayerAvailabilityRequest generates a "aws/request.Request" representing the +// client's request for the BatchCheckLayerAvailability operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the BatchCheckLayerAvailability method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the BatchCheckLayerAvailabilityRequest method. +// req, resp := client.BatchCheckLayerAvailabilityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECR) BatchCheckLayerAvailabilityRequest(input *BatchCheckLayerAvailabilityInput) (req *request.Request, output *BatchCheckLayerAvailabilityOutput) { op := &request.Operation{ Name: opBatchCheckLayerAvailability, @@ -43,7 +64,28 @@ func (c *ECR) BatchCheckLayerAvailability(input *BatchCheckLayerAvailabilityInpu const opBatchDeleteImage = "BatchDeleteImage" -// BatchDeleteImageRequest generates a request for the BatchDeleteImage operation. +// BatchDeleteImageRequest generates a "aws/request.Request" representing the +// client's request for the BatchDeleteImage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the BatchDeleteImage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the BatchDeleteImageRequest method. +// req, resp := client.BatchDeleteImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECR) BatchDeleteImageRequest(input *BatchDeleteImageInput) (req *request.Request, output *BatchDeleteImageOutput) { op := &request.Operation{ Name: opBatchDeleteImage, @@ -71,7 +113,28 @@ func (c *ECR) BatchDeleteImage(input *BatchDeleteImageInput) (*BatchDeleteImageO const opBatchGetImage = "BatchGetImage" -// BatchGetImageRequest generates a request for the BatchGetImage operation. +// BatchGetImageRequest generates a "aws/request.Request" representing the +// client's request for the BatchGetImage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the BatchGetImage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the BatchGetImageRequest method. +// req, resp := client.BatchGetImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECR) BatchGetImageRequest(input *BatchGetImageInput) (req *request.Request, output *BatchGetImageOutput) { op := &request.Operation{ Name: opBatchGetImage, @@ -99,7 +162,28 @@ func (c *ECR) BatchGetImage(input *BatchGetImageInput) (*BatchGetImageOutput, er const opCompleteLayerUpload = "CompleteLayerUpload" -// CompleteLayerUploadRequest generates a request for the CompleteLayerUpload operation. +// CompleteLayerUploadRequest generates a "aws/request.Request" representing the +// client's request for the CompleteLayerUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CompleteLayerUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CompleteLayerUploadRequest method. +// req, resp := client.CompleteLayerUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECR) CompleteLayerUploadRequest(input *CompleteLayerUploadInput) (req *request.Request, output *CompleteLayerUploadOutput) { op := &request.Operation{ Name: opCompleteLayerUpload, @@ -131,7 +215,28 @@ func (c *ECR) CompleteLayerUpload(input *CompleteLayerUploadInput) (*CompleteLay const opCreateRepository = "CreateRepository" -// CreateRepositoryRequest generates a request for the CreateRepository operation. +// CreateRepositoryRequest generates a "aws/request.Request" representing the +// client's request for the CreateRepository operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateRepository method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateRepositoryRequest method. +// req, resp := client.CreateRepositoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECR) CreateRepositoryRequest(input *CreateRepositoryInput) (req *request.Request, output *CreateRepositoryOutput) { op := &request.Operation{ Name: opCreateRepository, @@ -158,7 +263,28 @@ func (c *ECR) CreateRepository(input *CreateRepositoryInput) (*CreateRepositoryO const opDeleteRepository = "DeleteRepository" -// DeleteRepositoryRequest generates a request for the DeleteRepository operation. +// DeleteRepositoryRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRepository operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteRepository method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteRepositoryRequest method. +// req, resp := client.DeleteRepositoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECR) DeleteRepositoryRequest(input *DeleteRepositoryInput) (req *request.Request, output *DeleteRepositoryOutput) { op := &request.Operation{ Name: opDeleteRepository, @@ -186,7 +312,28 @@ func (c *ECR) DeleteRepository(input *DeleteRepositoryInput) (*DeleteRepositoryO const opDeleteRepositoryPolicy = "DeleteRepositoryPolicy" -// DeleteRepositoryPolicyRequest generates a request for the DeleteRepositoryPolicy operation. +// DeleteRepositoryPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRepositoryPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteRepositoryPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteRepositoryPolicyRequest method. +// req, resp := client.DeleteRepositoryPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECR) DeleteRepositoryPolicyRequest(input *DeleteRepositoryPolicyInput) (req *request.Request, output *DeleteRepositoryPolicyOutput) { op := &request.Operation{ Name: opDeleteRepositoryPolicy, @@ -213,7 +360,28 @@ func (c *ECR) DeleteRepositoryPolicy(input *DeleteRepositoryPolicyInput) (*Delet const opDescribeRepositories = "DescribeRepositories" -// DescribeRepositoriesRequest generates a request for the DescribeRepositories operation. +// DescribeRepositoriesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeRepositories operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeRepositories method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeRepositoriesRequest method. +// req, resp := client.DescribeRepositoriesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECR) DescribeRepositoriesRequest(input *DescribeRepositoriesInput) (req *request.Request, output *DescribeRepositoriesOutput) { op := &request.Operation{ Name: opDescribeRepositories, @@ -240,7 +408,28 @@ func (c *ECR) DescribeRepositories(input *DescribeRepositoriesInput) (*DescribeR const opGetAuthorizationToken = "GetAuthorizationToken" -// GetAuthorizationTokenRequest generates a request for the GetAuthorizationToken operation. +// GetAuthorizationTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetAuthorizationToken operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetAuthorizationToken method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetAuthorizationTokenRequest method. +// req, resp := client.GetAuthorizationTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECR) GetAuthorizationTokenRequest(input *GetAuthorizationTokenInput) (req *request.Request, output *GetAuthorizationTokenOutput) { op := &request.Operation{ Name: opGetAuthorizationToken, @@ -274,7 +463,28 @@ func (c *ECR) GetAuthorizationToken(input *GetAuthorizationTokenInput) (*GetAuth const opGetDownloadUrlForLayer = "GetDownloadUrlForLayer" -// GetDownloadUrlForLayerRequest generates a request for the GetDownloadUrlForLayer operation. +// GetDownloadUrlForLayerRequest generates a "aws/request.Request" representing the +// client's request for the GetDownloadUrlForLayer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDownloadUrlForLayer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDownloadUrlForLayerRequest method. +// req, resp := client.GetDownloadUrlForLayerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECR) GetDownloadUrlForLayerRequest(input *GetDownloadUrlForLayerInput) (req *request.Request, output *GetDownloadUrlForLayerOutput) { op := &request.Operation{ Name: opGetDownloadUrlForLayer, @@ -305,7 +515,28 @@ func (c *ECR) GetDownloadUrlForLayer(input *GetDownloadUrlForLayerInput) (*GetDo const opGetRepositoryPolicy = "GetRepositoryPolicy" -// GetRepositoryPolicyRequest generates a request for the GetRepositoryPolicy operation. +// GetRepositoryPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetRepositoryPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetRepositoryPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetRepositoryPolicyRequest method. +// req, resp := client.GetRepositoryPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECR) GetRepositoryPolicyRequest(input *GetRepositoryPolicyInput) (req *request.Request, output *GetRepositoryPolicyOutput) { op := &request.Operation{ Name: opGetRepositoryPolicy, @@ -332,7 +563,28 @@ func (c *ECR) GetRepositoryPolicy(input *GetRepositoryPolicyInput) (*GetReposito const opInitiateLayerUpload = "InitiateLayerUpload" -// InitiateLayerUploadRequest generates a request for the InitiateLayerUpload operation. +// InitiateLayerUploadRequest generates a "aws/request.Request" representing the +// client's request for the InitiateLayerUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InitiateLayerUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InitiateLayerUploadRequest method. +// req, resp := client.InitiateLayerUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECR) InitiateLayerUploadRequest(input *InitiateLayerUploadInput) (req *request.Request, output *InitiateLayerUploadOutput) { op := &request.Operation{ Name: opInitiateLayerUpload, @@ -362,7 +614,28 @@ func (c *ECR) InitiateLayerUpload(input *InitiateLayerUploadInput) (*InitiateLay const opListImages = "ListImages" -// ListImagesRequest generates a request for the ListImages operation. +// ListImagesRequest generates a "aws/request.Request" representing the +// client's request for the ListImages operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListImages method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListImagesRequest method. +// req, resp := client.ListImagesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECR) ListImagesRequest(input *ListImagesInput) (req *request.Request, output *ListImagesOutput) { op := &request.Operation{ Name: opListImages, @@ -389,7 +662,28 @@ func (c *ECR) ListImages(input *ListImagesInput) (*ListImagesOutput, error) { const opPutImage = "PutImage" -// PutImageRequest generates a request for the PutImage operation. +// PutImageRequest generates a "aws/request.Request" representing the +// client's request for the PutImage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutImage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutImageRequest method. +// req, resp := client.PutImageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECR) PutImageRequest(input *PutImageInput) (req *request.Request, output *PutImageOutput) { op := &request.Operation{ Name: opPutImage, @@ -419,7 +713,28 @@ func (c *ECR) PutImage(input *PutImageInput) (*PutImageOutput, error) { const opSetRepositoryPolicy = "SetRepositoryPolicy" -// SetRepositoryPolicyRequest generates a request for the SetRepositoryPolicy operation. +// SetRepositoryPolicyRequest generates a "aws/request.Request" representing the +// client's request for the SetRepositoryPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetRepositoryPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetRepositoryPolicyRequest method. +// req, resp := client.SetRepositoryPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECR) SetRepositoryPolicyRequest(input *SetRepositoryPolicyInput) (req *request.Request, output *SetRepositoryPolicyOutput) { op := &request.Operation{ Name: opSetRepositoryPolicy, @@ -446,7 +761,28 @@ func (c *ECR) SetRepositoryPolicy(input *SetRepositoryPolicyInput) (*SetReposito const opUploadLayerPart = "UploadLayerPart" -// UploadLayerPartRequest generates a request for the UploadLayerPart operation. +// UploadLayerPartRequest generates a "aws/request.Request" representing the +// client's request for the UploadLayerPart operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UploadLayerPart method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UploadLayerPartRequest method. +// req, resp := client.UploadLayerPartRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECR) UploadLayerPartRequest(input *UploadLayerPartInput) (req *request.Request, output *UploadLayerPartOutput) { op := &request.Operation{ Name: opUploadLayerPart, diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go index 0f6c6760e..53e3c57e3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // Amazon EC2 Container Registry (Amazon ECR) is a managed AWS Docker registry @@ -65,7 +65,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecs/api.go b/vendor/github.com/aws/aws-sdk-go/service/ecs/api.go index 70941f1a6..21bf3c381 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecs/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecs/api.go @@ -13,7 +13,28 @@ import ( const opCreateCluster = "CreateCluster" -// CreateClusterRequest generates a request for the CreateCluster operation. +// CreateClusterRequest generates a "aws/request.Request" representing the +// client's request for the CreateCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateClusterRequest method. +// req, resp := client.CreateClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECS) CreateClusterRequest(input *CreateClusterInput) (req *request.Request, output *CreateClusterOutput) { op := &request.Operation{ Name: opCreateCluster, @@ -42,7 +63,28 @@ func (c *ECS) CreateCluster(input *CreateClusterInput) (*CreateClusterOutput, er const opCreateService = "CreateService" -// CreateServiceRequest generates a request for the CreateService operation. +// CreateServiceRequest generates a "aws/request.Request" representing the +// client's request for the CreateService operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateService method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateServiceRequest method. +// req, resp := client.CreateServiceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECS) CreateServiceRequest(input *CreateServiceInput) (req *request.Request, output *CreateServiceOutput) { op := &request.Operation{ Name: opCreateService, @@ -65,6 +107,10 @@ func (c *ECS) CreateServiceRequest(input *CreateServiceInput) (req *request.Requ // ECS spawns another instantiation of the task in the specified cluster. To // update an existing service, see UpdateService. // +// In addition to maintaining the desired count of tasks in your service, you +// can optionally run your service behind a load balancer. The load balancer +// distributes traffic across the tasks that are associated with the service. +// // You can optionally specify a deployment configuration for your service. // During a deployment (which is triggered by changing the task definition of // a service with an UpdateService operation), the service scheduler uses the @@ -113,7 +159,28 @@ func (c *ECS) CreateService(input *CreateServiceInput) (*CreateServiceOutput, er const opDeleteCluster = "DeleteCluster" -// DeleteClusterRequest generates a request for the DeleteCluster operation. +// DeleteClusterRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteClusterRequest method. +// req, resp := client.DeleteClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECS) DeleteClusterRequest(input *DeleteClusterInput) (req *request.Request, output *DeleteClusterOutput) { op := &request.Operation{ Name: opDeleteCluster, @@ -142,7 +209,28 @@ func (c *ECS) DeleteCluster(input *DeleteClusterInput) (*DeleteClusterOutput, er const opDeleteService = "DeleteService" -// DeleteServiceRequest generates a request for the DeleteService operation. +// DeleteServiceRequest generates a "aws/request.Request" representing the +// client's request for the DeleteService operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteService method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteServiceRequest method. +// req, resp := client.DeleteServiceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECS) DeleteServiceRequest(input *DeleteServiceInput) (req *request.Request, output *DeleteServiceOutput) { op := &request.Operation{ Name: opDeleteService, @@ -182,7 +270,28 @@ func (c *ECS) DeleteService(input *DeleteServiceInput) (*DeleteServiceOutput, er const opDeregisterContainerInstance = "DeregisterContainerInstance" -// DeregisterContainerInstanceRequest generates a request for the DeregisterContainerInstance operation. +// DeregisterContainerInstanceRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterContainerInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeregisterContainerInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeregisterContainerInstanceRequest method. +// req, resp := client.DeregisterContainerInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECS) DeregisterContainerInstanceRequest(input *DeregisterContainerInstanceInput) (req *request.Request, output *DeregisterContainerInstanceOutput) { op := &request.Operation{ Name: opDeregisterContainerInstance, @@ -212,8 +321,10 @@ func (c *ECS) DeregisterContainerInstanceRequest(input *DeregisterContainerInsta // but it does not terminate the EC2 instance; if you are finished using the // instance, be sure to terminate it in the Amazon EC2 console to stop billing. // -// When you terminate a container instance, it is automatically deregistered -// from your cluster. +// If you terminate a running container instance with a connected Amazon ECS +// container agent, the agent automatically deregisters the instance from your +// cluster (stopped container instances or instances with disconnected agents +// are not automatically deregistered when terminated). func (c *ECS) DeregisterContainerInstance(input *DeregisterContainerInstanceInput) (*DeregisterContainerInstanceOutput, error) { req, out := c.DeregisterContainerInstanceRequest(input) err := req.Send() @@ -222,7 +333,28 @@ func (c *ECS) DeregisterContainerInstance(input *DeregisterContainerInstanceInpu const opDeregisterTaskDefinition = "DeregisterTaskDefinition" -// DeregisterTaskDefinitionRequest generates a request for the DeregisterTaskDefinition operation. +// DeregisterTaskDefinitionRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterTaskDefinition operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeregisterTaskDefinition method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeregisterTaskDefinitionRequest method. +// req, resp := client.DeregisterTaskDefinitionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECS) DeregisterTaskDefinitionRequest(input *DeregisterTaskDefinitionInput) (req *request.Request, output *DeregisterTaskDefinitionOutput) { op := &request.Operation{ Name: opDeregisterTaskDefinition, @@ -258,7 +390,28 @@ func (c *ECS) DeregisterTaskDefinition(input *DeregisterTaskDefinitionInput) (*D const opDescribeClusters = "DescribeClusters" -// DescribeClustersRequest generates a request for the DescribeClusters operation. +// DescribeClustersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeClusters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeClusters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeClustersRequest method. +// req, resp := client.DescribeClustersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECS) DescribeClustersRequest(input *DescribeClustersInput) (req *request.Request, output *DescribeClustersOutput) { op := &request.Operation{ Name: opDescribeClusters, @@ -285,7 +438,28 @@ func (c *ECS) DescribeClusters(input *DescribeClustersInput) (*DescribeClustersO const opDescribeContainerInstances = "DescribeContainerInstances" -// DescribeContainerInstancesRequest generates a request for the DescribeContainerInstances operation. +// DescribeContainerInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeContainerInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeContainerInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeContainerInstancesRequest method. +// req, resp := client.DescribeContainerInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECS) DescribeContainerInstancesRequest(input *DescribeContainerInstancesInput) (req *request.Request, output *DescribeContainerInstancesOutput) { op := &request.Operation{ Name: opDescribeContainerInstances, @@ -313,7 +487,28 @@ func (c *ECS) DescribeContainerInstances(input *DescribeContainerInstancesInput) const opDescribeServices = "DescribeServices" -// DescribeServicesRequest generates a request for the DescribeServices operation. +// DescribeServicesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeServices operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeServices method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeServicesRequest method. +// req, resp := client.DescribeServicesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECS) DescribeServicesRequest(input *DescribeServicesInput) (req *request.Request, output *DescribeServicesOutput) { op := &request.Operation{ Name: opDescribeServices, @@ -340,7 +535,28 @@ func (c *ECS) DescribeServices(input *DescribeServicesInput) (*DescribeServicesO const opDescribeTaskDefinition = "DescribeTaskDefinition" -// DescribeTaskDefinitionRequest generates a request for the DescribeTaskDefinition operation. +// DescribeTaskDefinitionRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTaskDefinition operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTaskDefinition method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTaskDefinitionRequest method. +// req, resp := client.DescribeTaskDefinitionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECS) DescribeTaskDefinitionRequest(input *DescribeTaskDefinitionInput) (req *request.Request, output *DescribeTaskDefinitionOutput) { op := &request.Operation{ Name: opDescribeTaskDefinition, @@ -372,7 +588,28 @@ func (c *ECS) DescribeTaskDefinition(input *DescribeTaskDefinitionInput) (*Descr const opDescribeTasks = "DescribeTasks" -// DescribeTasksRequest generates a request for the DescribeTasks operation. +// DescribeTasksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTasks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTasks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTasksRequest method. +// req, resp := client.DescribeTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECS) DescribeTasksRequest(input *DescribeTasksInput) (req *request.Request, output *DescribeTasksOutput) { op := &request.Operation{ Name: opDescribeTasks, @@ -399,7 +636,28 @@ func (c *ECS) DescribeTasks(input *DescribeTasksInput) (*DescribeTasksOutput, er const opDiscoverPollEndpoint = "DiscoverPollEndpoint" -// DiscoverPollEndpointRequest generates a request for the DiscoverPollEndpoint operation. +// DiscoverPollEndpointRequest generates a "aws/request.Request" representing the +// client's request for the DiscoverPollEndpoint operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DiscoverPollEndpoint method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DiscoverPollEndpointRequest method. +// req, resp := client.DiscoverPollEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECS) DiscoverPollEndpointRequest(input *DiscoverPollEndpointInput) (req *request.Request, output *DiscoverPollEndpointOutput) { op := &request.Operation{ Name: opDiscoverPollEndpoint, @@ -420,8 +678,8 @@ func (c *ECS) DiscoverPollEndpointRequest(input *DiscoverPollEndpointInput) (req // This action is only used by the Amazon EC2 Container Service agent, and it // is not intended for use outside of the agent. // -// Returns an endpoint for the Amazon EC2 Container Service agent to poll for -// updates. +// Returns an endpoint for the Amazon EC2 Container Service agent to poll +// for updates. func (c *ECS) DiscoverPollEndpoint(input *DiscoverPollEndpointInput) (*DiscoverPollEndpointOutput, error) { req, out := c.DiscoverPollEndpointRequest(input) err := req.Send() @@ -430,7 +688,28 @@ func (c *ECS) DiscoverPollEndpoint(input *DiscoverPollEndpointInput) (*DiscoverP const opListClusters = "ListClusters" -// ListClustersRequest generates a request for the ListClusters operation. +// ListClustersRequest generates a "aws/request.Request" representing the +// client's request for the ListClusters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListClusters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListClustersRequest method. +// req, resp := client.ListClustersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECS) ListClustersRequest(input *ListClustersInput) (req *request.Request, output *ListClustersOutput) { op := &request.Operation{ Name: opListClusters, @@ -461,6 +740,23 @@ func (c *ECS) ListClusters(input *ListClustersInput) (*ListClustersOutput, error return out, err } +// ListClustersPages iterates over the pages of a ListClusters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListClusters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListClusters operation. +// pageNum := 0 +// err := client.ListClustersPages(params, +// func(page *ListClustersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *ECS) ListClustersPages(input *ListClustersInput, fn func(p *ListClustersOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListClustersRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -471,7 +767,28 @@ func (c *ECS) ListClustersPages(input *ListClustersInput, fn func(p *ListCluster const opListContainerInstances = "ListContainerInstances" -// ListContainerInstancesRequest generates a request for the ListContainerInstances operation. +// ListContainerInstancesRequest generates a "aws/request.Request" representing the +// client's request for the ListContainerInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListContainerInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListContainerInstancesRequest method. +// req, resp := client.ListContainerInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECS) ListContainerInstancesRequest(input *ListContainerInstancesInput) (req *request.Request, output *ListContainerInstancesOutput) { op := &request.Operation{ Name: opListContainerInstances, @@ -502,6 +819,23 @@ func (c *ECS) ListContainerInstances(input *ListContainerInstancesInput) (*ListC return out, err } +// ListContainerInstancesPages iterates over the pages of a ListContainerInstances operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListContainerInstances method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListContainerInstances operation. +// pageNum := 0 +// err := client.ListContainerInstancesPages(params, +// func(page *ListContainerInstancesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *ECS) ListContainerInstancesPages(input *ListContainerInstancesInput, fn func(p *ListContainerInstancesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListContainerInstancesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -512,7 +846,28 @@ func (c *ECS) ListContainerInstancesPages(input *ListContainerInstancesInput, fn const opListServices = "ListServices" -// ListServicesRequest generates a request for the ListServices operation. +// ListServicesRequest generates a "aws/request.Request" representing the +// client's request for the ListServices operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListServices method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListServicesRequest method. +// req, resp := client.ListServicesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECS) ListServicesRequest(input *ListServicesInput) (req *request.Request, output *ListServicesOutput) { op := &request.Operation{ Name: opListServices, @@ -543,6 +898,23 @@ func (c *ECS) ListServices(input *ListServicesInput) (*ListServicesOutput, error return out, err } +// ListServicesPages iterates over the pages of a ListServices operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListServices method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListServices operation. +// pageNum := 0 +// err := client.ListServicesPages(params, +// func(page *ListServicesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *ECS) ListServicesPages(input *ListServicesInput, fn func(p *ListServicesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListServicesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -553,7 +925,28 @@ func (c *ECS) ListServicesPages(input *ListServicesInput, fn func(p *ListService const opListTaskDefinitionFamilies = "ListTaskDefinitionFamilies" -// ListTaskDefinitionFamiliesRequest generates a request for the ListTaskDefinitionFamilies operation. +// ListTaskDefinitionFamiliesRequest generates a "aws/request.Request" representing the +// client's request for the ListTaskDefinitionFamilies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTaskDefinitionFamilies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTaskDefinitionFamiliesRequest method. +// req, resp := client.ListTaskDefinitionFamiliesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECS) ListTaskDefinitionFamiliesRequest(input *ListTaskDefinitionFamiliesInput) (req *request.Request, output *ListTaskDefinitionFamiliesOutput) { op := &request.Operation{ Name: opListTaskDefinitionFamilies, @@ -579,13 +972,34 @@ func (c *ECS) ListTaskDefinitionFamiliesRequest(input *ListTaskDefinitionFamilie // Returns a list of task definition families that are registered to your account // (which may include task definition families that no longer have any ACTIVE -// task definitions). You can filter the results with the familyPrefix parameter. +// task definition revisions). +// +// You can filter out task definition families that do not contain any ACTIVE +// task definition revisions by setting the status parameter to ACTIVE. You +// can also filter the results with the familyPrefix parameter. func (c *ECS) ListTaskDefinitionFamilies(input *ListTaskDefinitionFamiliesInput) (*ListTaskDefinitionFamiliesOutput, error) { req, out := c.ListTaskDefinitionFamiliesRequest(input) err := req.Send() return out, err } +// ListTaskDefinitionFamiliesPages iterates over the pages of a ListTaskDefinitionFamilies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTaskDefinitionFamilies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTaskDefinitionFamilies operation. +// pageNum := 0 +// err := client.ListTaskDefinitionFamiliesPages(params, +// func(page *ListTaskDefinitionFamiliesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *ECS) ListTaskDefinitionFamiliesPages(input *ListTaskDefinitionFamiliesInput, fn func(p *ListTaskDefinitionFamiliesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListTaskDefinitionFamiliesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -596,7 +1010,28 @@ func (c *ECS) ListTaskDefinitionFamiliesPages(input *ListTaskDefinitionFamiliesI const opListTaskDefinitions = "ListTaskDefinitions" -// ListTaskDefinitionsRequest generates a request for the ListTaskDefinitions operation. +// ListTaskDefinitionsRequest generates a "aws/request.Request" representing the +// client's request for the ListTaskDefinitions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTaskDefinitions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTaskDefinitionsRequest method. +// req, resp := client.ListTaskDefinitionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECS) ListTaskDefinitionsRequest(input *ListTaskDefinitionsInput) (req *request.Request, output *ListTaskDefinitionsOutput) { op := &request.Operation{ Name: opListTaskDefinitions, @@ -629,6 +1064,23 @@ func (c *ECS) ListTaskDefinitions(input *ListTaskDefinitionsInput) (*ListTaskDef return out, err } +// ListTaskDefinitionsPages iterates over the pages of a ListTaskDefinitions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTaskDefinitions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTaskDefinitions operation. +// pageNum := 0 +// err := client.ListTaskDefinitionsPages(params, +// func(page *ListTaskDefinitionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *ECS) ListTaskDefinitionsPages(input *ListTaskDefinitionsInput, fn func(p *ListTaskDefinitionsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListTaskDefinitionsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -639,7 +1091,28 @@ func (c *ECS) ListTaskDefinitionsPages(input *ListTaskDefinitionsInput, fn func( const opListTasks = "ListTasks" -// ListTasksRequest generates a request for the ListTasks operation. +// ListTasksRequest generates a "aws/request.Request" representing the +// client's request for the ListTasks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTasks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTasksRequest method. +// req, resp := client.ListTasksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECS) ListTasksRequest(input *ListTasksInput) (req *request.Request, output *ListTasksOutput) { op := &request.Operation{ Name: opListTasks, @@ -666,12 +1139,32 @@ func (c *ECS) ListTasksRequest(input *ListTasksInput) (req *request.Request, out // Returns a list of tasks for a specified cluster. You can filter the results // by family name, by a particular container instance, or by the desired status // of the task with the family, containerInstance, and desiredStatus parameters. +// +// Recently-stopped tasks might appear in the returned results. Currently, +// stopped tasks appear in the returned results for at least one hour. func (c *ECS) ListTasks(input *ListTasksInput) (*ListTasksOutput, error) { req, out := c.ListTasksRequest(input) err := req.Send() return out, err } +// ListTasksPages iterates over the pages of a ListTasks operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTasks method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTasks operation. +// pageNum := 0 +// err := client.ListTasksPages(params, +// func(page *ListTasksOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *ECS) ListTasksPages(input *ListTasksInput, fn func(p *ListTasksOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListTasksRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -682,7 +1175,28 @@ func (c *ECS) ListTasksPages(input *ListTasksInput, fn func(p *ListTasksOutput, const opRegisterContainerInstance = "RegisterContainerInstance" -// RegisterContainerInstanceRequest generates a request for the RegisterContainerInstance operation. +// RegisterContainerInstanceRequest generates a "aws/request.Request" representing the +// client's request for the RegisterContainerInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterContainerInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterContainerInstanceRequest method. +// req, resp := client.RegisterContainerInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECS) RegisterContainerInstanceRequest(input *RegisterContainerInstanceInput) (req *request.Request, output *RegisterContainerInstanceOutput) { op := &request.Operation{ Name: opRegisterContainerInstance, @@ -703,7 +1217,7 @@ func (c *ECS) RegisterContainerInstanceRequest(input *RegisterContainerInstanceI // This action is only used by the Amazon EC2 Container Service agent, and it // is not intended for use outside of the agent. // -// Registers an EC2 instance into the specified cluster. This instance becomes +// Registers an EC2 instance into the specified cluster. This instance becomes // available to place containers on. func (c *ECS) RegisterContainerInstance(input *RegisterContainerInstanceInput) (*RegisterContainerInstanceOutput, error) { req, out := c.RegisterContainerInstanceRequest(input) @@ -713,7 +1227,28 @@ func (c *ECS) RegisterContainerInstance(input *RegisterContainerInstanceInput) ( const opRegisterTaskDefinition = "RegisterTaskDefinition" -// RegisterTaskDefinitionRequest generates a request for the RegisterTaskDefinition operation. +// RegisterTaskDefinitionRequest generates a "aws/request.Request" representing the +// client's request for the RegisterTaskDefinition operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterTaskDefinition method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterTaskDefinitionRequest method. +// req, resp := client.RegisterTaskDefinitionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECS) RegisterTaskDefinitionRequest(input *RegisterTaskDefinitionInput) (req *request.Request, output *RegisterTaskDefinitionOutput) { op := &request.Operation{ Name: opRegisterTaskDefinition, @@ -736,6 +1271,13 @@ func (c *ECS) RegisterTaskDefinitionRequest(input *RegisterTaskDefinitionInput) // parameter. For more information about task definition parameters and defaults, // see Amazon ECS Task Definitions (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_defintions.html) // in the Amazon EC2 Container Service Developer Guide. +// +// You may also specify an IAM role for your task with the taskRoleArn parameter. +// When you specify an IAM role for a task, its containers can then use the +// latest versions of the AWS CLI or SDKs to make API requests to the AWS services +// that are specified in the IAM policy associated with the role. For more information, +// see IAM Roles for Tasks (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html) +// in the Amazon EC2 Container Service Developer Guide. func (c *ECS) RegisterTaskDefinition(input *RegisterTaskDefinitionInput) (*RegisterTaskDefinitionOutput, error) { req, out := c.RegisterTaskDefinitionRequest(input) err := req.Send() @@ -744,7 +1286,28 @@ func (c *ECS) RegisterTaskDefinition(input *RegisterTaskDefinitionInput) (*Regis const opRunTask = "RunTask" -// RunTaskRequest generates a request for the RunTask operation. +// RunTaskRequest generates a "aws/request.Request" representing the +// client's request for the RunTask operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RunTask method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RunTaskRequest method. +// req, resp := client.RunTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECS) RunTaskRequest(input *RunTaskInput) (req *request.Request, output *RunTaskOutput) { op := &request.Operation{ Name: opRunTask, @@ -775,7 +1338,28 @@ func (c *ECS) RunTask(input *RunTaskInput) (*RunTaskOutput, error) { const opStartTask = "StartTask" -// StartTaskRequest generates a request for the StartTask operation. +// StartTaskRequest generates a "aws/request.Request" representing the +// client's request for the StartTask operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StartTask method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StartTaskRequest method. +// req, resp := client.StartTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECS) StartTaskRequest(input *StartTaskInput) (req *request.Request, output *StartTaskOutput) { op := &request.Operation{ Name: opStartTask, @@ -806,7 +1390,28 @@ func (c *ECS) StartTask(input *StartTaskInput) (*StartTaskOutput, error) { const opStopTask = "StopTask" -// StopTaskRequest generates a request for the StopTask operation. +// StopTaskRequest generates a "aws/request.Request" representing the +// client's request for the StopTask operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StopTask method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StopTaskRequest method. +// req, resp := client.StopTaskRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECS) StopTaskRequest(input *StopTaskInput) (req *request.Request, output *StopTaskOutput) { op := &request.Operation{ Name: opStopTask, @@ -839,7 +1444,28 @@ func (c *ECS) StopTask(input *StopTaskInput) (*StopTaskOutput, error) { const opSubmitContainerStateChange = "SubmitContainerStateChange" -// SubmitContainerStateChangeRequest generates a request for the SubmitContainerStateChange operation. +// SubmitContainerStateChangeRequest generates a "aws/request.Request" representing the +// client's request for the SubmitContainerStateChange operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SubmitContainerStateChange method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SubmitContainerStateChangeRequest method. +// req, resp := client.SubmitContainerStateChangeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECS) SubmitContainerStateChangeRequest(input *SubmitContainerStateChangeInput) (req *request.Request, output *SubmitContainerStateChangeOutput) { op := &request.Operation{ Name: opSubmitContainerStateChange, @@ -860,7 +1486,7 @@ func (c *ECS) SubmitContainerStateChangeRequest(input *SubmitContainerStateChang // This action is only used by the Amazon EC2 Container Service agent, and it // is not intended for use outside of the agent. // -// Sent to acknowledge that a container changed states. +// Sent to acknowledge that a container changed states. func (c *ECS) SubmitContainerStateChange(input *SubmitContainerStateChangeInput) (*SubmitContainerStateChangeOutput, error) { req, out := c.SubmitContainerStateChangeRequest(input) err := req.Send() @@ -869,7 +1495,28 @@ func (c *ECS) SubmitContainerStateChange(input *SubmitContainerStateChangeInput) const opSubmitTaskStateChange = "SubmitTaskStateChange" -// SubmitTaskStateChangeRequest generates a request for the SubmitTaskStateChange operation. +// SubmitTaskStateChangeRequest generates a "aws/request.Request" representing the +// client's request for the SubmitTaskStateChange operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SubmitTaskStateChange method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SubmitTaskStateChangeRequest method. +// req, resp := client.SubmitTaskStateChangeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECS) SubmitTaskStateChangeRequest(input *SubmitTaskStateChangeInput) (req *request.Request, output *SubmitTaskStateChangeOutput) { op := &request.Operation{ Name: opSubmitTaskStateChange, @@ -890,7 +1537,7 @@ func (c *ECS) SubmitTaskStateChangeRequest(input *SubmitTaskStateChangeInput) (r // This action is only used by the Amazon EC2 Container Service agent, and it // is not intended for use outside of the agent. // -// Sent to acknowledge that a task changed states. +// Sent to acknowledge that a task changed states. func (c *ECS) SubmitTaskStateChange(input *SubmitTaskStateChangeInput) (*SubmitTaskStateChangeOutput, error) { req, out := c.SubmitTaskStateChangeRequest(input) err := req.Send() @@ -899,7 +1546,28 @@ func (c *ECS) SubmitTaskStateChange(input *SubmitTaskStateChangeInput) (*SubmitT const opUpdateContainerAgent = "UpdateContainerAgent" -// UpdateContainerAgentRequest generates a request for the UpdateContainerAgent operation. +// UpdateContainerAgentRequest generates a "aws/request.Request" representing the +// client's request for the UpdateContainerAgent operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateContainerAgent method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateContainerAgentRequest method. +// req, resp := client.UpdateContainerAgentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECS) UpdateContainerAgentRequest(input *UpdateContainerAgentInput) (req *request.Request, output *UpdateContainerAgentOutput) { op := &request.Operation{ Name: opUpdateContainerAgent, @@ -923,7 +1591,7 @@ func (c *ECS) UpdateContainerAgentRequest(input *UpdateContainerAgentInput) (req // differs depending on whether your container instance was launched with the // Amazon ECS-optimized AMI or another operating system. // -// UpdateContainerAgent requires the Amazon ECS-optimized AMI or Amazon Linux +// UpdateContainerAgent requires the Amazon ECS-optimized AMI or Amazon Linux // with the ecs-init service installed and running. For help updating the Amazon // ECS container agent on other operating systems, see Manually Updating the // Amazon ECS Container Agent (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-update.html#manually_update_agent) @@ -936,7 +1604,28 @@ func (c *ECS) UpdateContainerAgent(input *UpdateContainerAgentInput) (*UpdateCon const opUpdateService = "UpdateService" -// UpdateServiceRequest generates a request for the UpdateService operation. +// UpdateServiceRequest generates a "aws/request.Request" representing the +// client's request for the UpdateService operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateService method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateServiceRequest method. +// req, resp := client.UpdateServiceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ECS) UpdateServiceRequest(input *UpdateServiceInput) (req *request.Request, output *UpdateServiceOutput) { op := &request.Operation{ Name: opUpdateService, @@ -1062,7 +1751,7 @@ type Cluster struct { // The Amazon Resource Name (ARN) that identifies the cluster. The ARN contains // the arn:aws:ecs namespace, followed by the region of the cluster, the AWS // account ID of the cluster owner, the cluster namespace, and then the cluster - // name. For example, arn:aws:ecs:region:012345678910:cluster/test. + // name. For example, arn:aws:ecs:region:012345678910:cluster/test .. ClusterArn *string `locationName:"clusterArn" type:"string"` // A user-generated string that you use to identify your cluster. @@ -1175,11 +1864,13 @@ type ContainerDefinition struct { // 2 (including null), the behavior varies based on your Amazon ECS container // agent version: // - // Agent versions less than or equal to 1.1.0: Null and zero CPU values are - // passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU - // values of 1 are passed to Docker as 1, which the Linux kernel converts to - // 2 CPU shares. Agent versions greater than or equal to 1.2.0: Null, zero, - // and CPU values of 1 are passed to Docker as 2. + // Agent versions less than or equal to 1.1.0: Null and zero CPU values + // are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. + // CPU values of 1 are passed to Docker as 1, which the Linux kernel converts + // to 2 CPU shares. + // + // Agent versions greater than or equal to 1.2.0: Null, zero, and CPU values + // of 1 are passed to Docker as 2. Cpu *int64 `locationName:"cpu" type:"integer"` // When this parameter is true, networking is disabled within the container. @@ -1219,7 +1910,7 @@ type ContainerDefinition struct { // with the ECS_SELINUX_CAPABLE=true or ECS_APPARMOR_CAPABLE=true environment // variables before containers placed on that instance can use these security // options. For more information, see Amazon ECS Container Agent Configuration - // (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/developerguide/ecs-agent-config.html) + // (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) // in the Amazon EC2 Container Service Developer Guide. DockerSecurityOptions []*string `locationName:"dockerSecurityOptions" type:"list"` @@ -1244,12 +1935,18 @@ type ContainerDefinition struct { // information, such as credential data. Environment []*KeyValuePair `locationName:"environment" type:"list"` - // If the essential parameter of a container is marked as true, the failure - // of that container stops the task. If the essential parameter of a container - // is marked as false, then its failure does not affect the rest of the containers - // in a task. If this parameter is omitted, a container is assumed to be essential. + // If the essential parameter of a container is marked as true, and that container + // fails or stops for any reason, all other containers that are part of the + // task are stopped. If the essential parameter of a container is marked as + // false, then its failure does not affect the rest of the containers in a task. + // If this parameter is omitted, a container is assumed to be essential. // - // All tasks must have at least one essential container. + // All tasks must have at least one essential container. If you have an application + // that is composed of multiple containers, you should group containers that + // are used for a common purpose into components, and separate the different + // components into multiple task definitions. For more information, see Application + // Architecture (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/application_architecture.html) + // in the Amazon EC2 Container Service Developer Guide. Essential *bool `locationName:"essential" type:"boolean"` // A list of hostnames and IP address mappings to append to the /etc/hosts file @@ -1267,18 +1964,21 @@ type ContainerDefinition struct { // The image used to start a container. This string is passed directly to the // Docker daemon. Images in the Docker Hub registry are available by default. - // Other repositories are specified with repository-url/image:tag. Up to 255 + // Other repositories are specified with repository-url/image:tag . Up to 255 // letters (uppercase and lowercase), numbers, hyphens, underscores, colons, // periods, forward slashes, and number signs are allowed. This parameter maps // to Image in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) // and the IMAGE parameter of docker run (https://docs.docker.com/reference/commandline/run/). // - // Images in official repositories on Docker Hub use a single name (for example, - // ubuntu or mongo). Images in other repositories on Docker Hub are qualified - // with an organization name (for example, amazon/amazon-ecs-agent). Images - // in other online repositories are qualified further by a domain name (for - // example, quay.io/assemblyline/ubuntu). + // Images in official repositories on Docker Hub use a single name (for example, + // ubuntu or mongo). + // + // Images in other repositories on Docker Hub are qualified with an organization + // name (for example, amazon/amazon-ecs-agent). + // + // Images in other online repositories are qualified further by a domain + // name (for example, quay.io/assemblyline/ubuntu). Image *string `locationName:"image" type:"string"` // The link parameter allows containers to communicate with each other without @@ -1289,7 +1989,7 @@ type ContainerDefinition struct { // containers, see https://docs.docker.com/userguide/dockerlinks/ (https://docs.docker.com/userguide/dockerlinks/). // This parameter maps to Links in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) - // and the --link option to docker run (https://docs.docker.com/reference/commandline/run/). + // and the --link option to docker run (https://docs.docker.com/reference/commandline/run/). // // Containers that are collocated on a single container instance may be able // to communicate with each other without requiring links or host port mappings. @@ -1301,17 +2001,30 @@ type ContainerDefinition struct { // to LogConfig in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) // and the --log-driver option to docker run (https://docs.docker.com/reference/commandline/run/). - // Valid log drivers are displayed in the LogConfiguration data type. This parameter - // requires version 1.18 of the Docker Remote API or greater on your container - // instance. To check the Docker Remote API version on your container instance, - // log into your container instance and run the following command: sudo docker - // version | grep "Server API version" + // By default, containers use the same logging driver that the Docker daemon + // uses; however the container may use a different logging driver than the Docker + // daemon by specifying a log driver with this parameter in the container definition. + // To use a different logging driver for a container, the log system must be + // configured properly on the container instance (or on a different log server + // for remote logging options). For more information on the options for different + // supported log drivers, see Configure logging drivers (https://docs.docker.com/engine/admin/logging/overview/) + // in the Docker documentation. + // + // Amazon ECS currently supports a subset of the logging drivers available + // to the Docker daemon (shown in the LogConfiguration data type). Currently + // unsupported log drivers may be available in future releases of the Amazon + // ECS container agent. + // + // This parameter requires version 1.18 of the Docker Remote API or greater + // on your container instance. To check the Docker Remote API version on your + // container instance, log into your container instance and run the following + // command: sudo docker version | grep "Server API version" // // The Amazon ECS container agent running on a container instance must register // the logging drivers available on that instance with the ECS_AVAILABLE_LOGGING_DRIVERS // environment variable before containers placed on that instance can use these // log configuration options. For more information, see Amazon ECS Container - // Agent Configuration (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/developerguide/ecs-agent-config.html) + // Agent Configuration (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-agent-config.html) // in the Amazon EC2 Container Service Developer Guide. LogConfiguration *LogConfiguration `locationName:"logConfiguration" type:"structure"` @@ -1463,7 +2176,8 @@ type ContainerInstance struct { // The Amazon Resource Name (ARN) of the container instance. The ARN contains // the arn:aws:ecs namespace, followed by the region of the container instance, // the AWS account ID of the container instance owner, the container-instance - // namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID. + // namespace, and then the container instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID + // . ContainerInstanceArn *string `locationName:"containerInstanceArn" type:"string"` // The EC2 instance ID of the container instance. @@ -1590,10 +2304,18 @@ type CreateServiceInput struct { // access from the load balancer. LoadBalancers []*LoadBalancer `locationName:"loadBalancers" type:"list"` - // The name or full Amazon Resource Name (ARN) of the IAM role that allows your - // Amazon ECS container agent to make calls to your load balancer on your behalf. - // This parameter is only required if you are using a load balancer with your - // service. + // The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon + // ECS to make calls to your load balancer on your behalf. This parameter is + // required if you are using a load balancer with your service. If you specify + // the role parameter, you must also specify a load balancer object with the + // loadBalancers parameter. + // + // If your specified role has a path other than /, then you must either specify + // the full role ARN (this is recommended) or prefix the role name with the + // path. For example, if a role with the name bar has a path of /foo/ then you + // would specify /foo/bar as the role name. For more information, see Friendly + // Names and Paths (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html#identifiers-friendly-names) + // in the IAM User Guide. Role *string `locationName:"role" type:"string"` // The name of your service. Up to 255 letters (uppercase and lowercase), numbers, @@ -1836,7 +2558,8 @@ type DeregisterContainerInstanceInput struct { // instance to deregister. The ARN contains the arn:aws:ecs namespace, followed // by the region of the container instance, the AWS account ID of the container // instance owner, the container-instance namespace, and then the container - // instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID. + // instance ID. For example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID + // . ContainerInstance *string `locationName:"containerInstance" type:"string" required:"true"` // Forces the deregistration of the container instance. If you have tasks running @@ -1942,8 +2665,9 @@ func (s DeregisterTaskDefinitionOutput) GoString() string { type DescribeClustersInput struct { _ struct{} `type:"structure"` - // A space-separated list of cluster names or full cluster Amazon Resource Name - // (ARN) entries. If you do not specify a cluster, the default cluster is assumed. + // A space-separated list of up to 100 cluster names or full cluster Amazon + // Resource Name (ARN) entries. If you do not specify a cluster, the default + // cluster is assumed. Clusters []*string `locationName:"clusters" type:"list"` } @@ -2201,7 +2925,8 @@ type DiscoverPollEndpointInput struct { // instance. The ARN contains the arn:aws:ecs namespace, followed by the region // of the container instance, the AWS account ID of the container instance owner, // the container-instance namespace, and then the container instance ID. For - // example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID. + // example, arn:aws:ecs:region:aws_account_id:container-instance/container_instance_ID + // . ContainerInstance *string `locationName:"containerInstance" type:"string"` } @@ -2357,6 +3082,9 @@ type ListClustersInput struct { // where maxResults was used and the results exceeded the value of that parameter. // Pagination continues from the end of the previous results that returned the // nextToken value. This value is null when there are no more results to return. + // + // This token should be treated as an opaque identifier that is only used + // to retrieve the next items in a list and not for other programmatic purposes. NextToken *string `locationName:"nextToken" type:"string"` } @@ -2417,6 +3145,9 @@ type ListContainerInstancesInput struct { // parameter. Pagination continues from the end of the previous results that // returned the nextToken value. This value is null when there are no more results // to return. + // + // This token should be treated as an opaque identifier that is only used + // to retrieve the next items in a list and not for other programmatic purposes. NextToken *string `locationName:"nextToken" type:"string"` } @@ -2475,6 +3206,9 @@ type ListServicesInput struct { // where maxResults was used and the results exceeded the value of that parameter. // Pagination continues from the end of the previous results that returned the // nextToken value. This value is null when there are no more results to return. + // + // This token should be treated as an opaque identifier that is only used + // to retrieve the next items in a list and not for other programmatic purposes. NextToken *string `locationName:"nextToken" type:"string"` } @@ -2535,7 +3269,19 @@ type ListTaskDefinitionFamiliesInput struct { // parameter. Pagination continues from the end of the previous results that // returned the nextToken value. This value is null when there are no more results // to return. + // + // This token should be treated as an opaque identifier that is only used + // to retrieve the next items in a list and not for other programmatic purposes. NextToken *string `locationName:"nextToken" type:"string"` + + // The task definition family status with which to filter the ListTaskDefinitionFamilies + // results. By default, both ACTIVE and INACTIVE task definition families are + // listed. If this parameter is set to ACTIVE, only task definition families + // that have an ACTIVE task definition revision are returned. If this parameter + // is set to INACTIVE, only task definition families that do not have any ACTIVE + // task definition revisions are returned. If you paginate the resulting output, + // be sure to keep the status value constant in each subsequent request. + Status *string `locationName:"status" type:"string" enum:"TaskDefinitionFamilyStatus"` } // String returns the string representation @@ -2594,6 +3340,9 @@ type ListTaskDefinitionsInput struct { // parameter. Pagination continues from the end of the previous results that // returned the nextToken value. This value is null when there are no more results // to return. + // + // This token should be treated as an opaque identifier that is only used + // to retrieve the next items in a list and not for other programmatic purposes. NextToken *string `locationName:"nextToken" type:"string"` // The order in which to sort the results. Valid values are ASC and DESC. By @@ -2662,7 +3411,12 @@ type ListTasksInput struct { // The task status with which to filter the ListTasks results. Specifying a // desiredStatus of STOPPED limits the results to tasks that are in the STOPPED // status, which can be useful for debugging tasks that are not starting properly - // or have died or finished. The default status filter is RUNNING. + // or have died or finished. The default status filter is status filter is RUNNING, + // which shows tasks that ECS has set the desired status to RUNNING. + // + // Although you can filter results based on a desired status of PENDING, this + // will not return any results because ECS never sets the desired status of + // a task to that value (only a task's lastStatus may have a value of PENDING). DesiredStatus *string `locationName:"desiredStatus" type:"string" enum:"DesiredStatus"` // The name of the family with which to filter the ListTasks results. Specifying @@ -2682,6 +3436,9 @@ type ListTasksInput struct { // where maxResults was used and the results exceeded the value of that parameter. // Pagination continues from the end of the previous results that returned the // nextToken value. This value is null when there are no more results to return. + // + // This token should be treated as an opaque identifier that is only used + // to retrieve the next items in a list and not for other programmatic purposes. NextToken *string `locationName:"nextToken" type:"string"` // The name of the service with which to filter the ListTasks results. Specifying @@ -2758,11 +3515,22 @@ func (s LoadBalancer) GoString() string { type LogConfiguration struct { _ struct{} `type:"structure"` - // The log driver to use for the container. This parameter requires version - // 1.18 of the Docker Remote API or greater on your container instance. To check - // the Docker Remote API version on your container instance, log into your container - // instance and run the following command: sudo docker version | grep "Server - // API version" + // The log driver to use for the container. The valid values listed for this + // parameter are log drivers that the Amazon ECS container agent can communicate + // with by default. + // + // If you have a custom driver that is not listed above that you would like + // to work with the Amazon ECS container agent, you can fork the Amazon ECS + // container agent project that is available on GitHub (https://github.com/aws/amazon-ecs-agent) + // and customize it to work with that driver. We encourage you to submit pull + // requests for changes that you would like to have included. However, Amazon + // Web Services does not currently provide support for running modified copies + // of this software. + // + // This parameter requires version 1.18 of the Docker Remote API or greater + // on your container instance. To check the Docker Remote API version on your + // container instance, log into your container instance and run the following + // command: sudo docker version | grep "Server API version" LogDriver *string `locationName:"logDriver" type:"string" required:"true" enum:"LogDriver"` // The configuration options to send to the log driver. This parameter requires @@ -2863,7 +3631,9 @@ type PortMapping struct { // The port number on the container that is bound to the user-specified or automatically // assigned host port. If you specify a container port and not a host port, // your container automatically receives a host port in the ephemeral port range - // (for more information, see hostPort). + // (for more information, see hostPort). Port mappings that are automatically + // assigned in this way do not count toward the 100 reserved ports limit of + // a container instance. ContainerPort *int64 `locationName:"containerPort" type:"integer"` // The port number on the container instance to reserve for your container. @@ -2885,8 +3655,9 @@ type PortMapping struct { // specified in a running task is also reserved while the task is running (after // a task stops, the host port is released).The current reserved ports are displayed // in the remainingResources of DescribeContainerInstances output, and a container - // instance may have up to 50 reserved ports at a time, including the default - // reserved ports (automatically assigned ports do not count toward this limit). + // instance may have up to 100 reserved ports at a time, including the default + // reserved ports (automatically assigned ports do not count toward the 100 + // reserved ports limit). HostPort *int64 `locationName:"hostPort" type:"integer"` // The protocol used for the port mapping. Valid values are tcp and udp. The @@ -2997,6 +3768,11 @@ type RegisterTaskDefinitionInput struct { // hyphens, and underscores are allowed. Family *string `locationName:"family" type:"string" required:"true"` + // The Amazon Resource Name (ARN) of the IAM role that containers in this task + // can assume. All containers in this task are granted the permissions that + // are specified in this role. + TaskRoleArn *string `locationName:"taskRoleArn" type:"string"` + // A list of volume definitions in JSON format that containers in your task // may use. Volumes []*Volume `locationName:"volumes" type:"list"` @@ -3120,7 +3896,8 @@ type RunTaskInput struct { // trigger a task to run a batch process job, you could apply a unique identifier // for that job to your task with the startedBy parameter. You can then identify // which tasks belong to that job by filtering the results of a ListTasks call - // with the startedBy value. + // with the startedBy value. Up to 36 letters (uppercase and lowercase), numbers, + // hyphens, and underscores are allowed. // // If a task is started by an Amazon ECS service, then the startedBy parameter // contains the deployment ID of the service that starts it. @@ -3180,9 +3957,12 @@ func (s RunTaskOutput) GoString() string { type Service struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the of the cluster that hosts the service. + // The Amazon Resource Name (ARN) of the cluster that hosts the service. ClusterArn *string `locationName:"clusterArn" type:"string"` + // The Unix time in seconds and milliseconds when the service was created. + CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unix"` + // Optional deployment parameters that control how many tasks run during the // deployment and the ordering of stopping and starting tasks. DeploymentConfiguration *DeploymentConfiguration `locationName:"deploymentConfiguration" type:"structure"` @@ -3218,7 +3998,7 @@ type Service struct { // The Amazon Resource Name (ARN) that identifies the service. The ARN contains // the arn:aws:ecs namespace, followed by the region of the service, the AWS // account ID of the service owner, the service namespace, and then the service - // name. For example, arn:aws:ecs:region:012345678910:service/my-service. + // name. For example, arn:aws:ecs:region:012345678910:service/my-service . ServiceArn *string `locationName:"serviceArn" type:"string"` // The name of your service. Up to 255 letters (uppercase and lowercase), numbers, @@ -3300,7 +4080,8 @@ type StartTaskInput struct { // trigger a task to run a batch process job, you could apply a unique identifier // for that job to your task with the startedBy parameter. You can then identify // which tasks belong to that job by filtering the results of a ListTasks call - // with the startedBy value. + // with the startedBy value. Up to 36 letters (uppercase and lowercase), numbers, + // hyphens, and underscores are allowed. // // If a task is started by an Amazon ECS service, then the startedBy parameter // contains the deployment ID of the service that starts it. @@ -3520,7 +4301,7 @@ func (s SubmitTaskStateChangeOutput) GoString() string { type Task struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the of the cluster that hosts the task. + // The Amazon Resource Name (ARN) of the cluster that hosts the task. ClusterArn *string `locationName:"clusterArn" type:"string"` // The Amazon Resource Name (ARN) of the container instances that host the task. @@ -3561,8 +4342,7 @@ type Task struct { // The Amazon Resource Name (ARN) of the task. TaskArn *string `locationName:"taskArn" type:"string"` - // The Amazon Resource Name (ARN) of the of the task definition that creates - // the task. + // The Amazon Resource Name (ARN) of the task definition that creates the task. TaskDefinitionArn *string `locationName:"taskDefinitionArn" type:"string"` } @@ -3582,7 +4362,7 @@ type TaskDefinition struct { // A list of container definitions in JSON format that describe the different // containers that make up your task. For more information about container definition - // parameters and defaults, see Amazon ECS Task Definitions (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_defintions.html) + // parameters and defaults, see Amazon ECS Task Definitions (http://docs.aws.amazon.com/http:/docs.aws.amazon.com/AmazonECS/latest/developerguidetask_defintions.html) // in the Amazon EC2 Container Service Developer Guide. ContainerDefinitions []*ContainerDefinition `locationName:"containerDefinitions" type:"list"` @@ -3602,11 +4382,16 @@ type TaskDefinition struct { // The status of the task definition. Status *string `locationName:"status" type:"string" enum:"TaskDefinitionStatus"` - // The full Amazon Resource Name (ARN) of the of the task definition. + // The full Amazon Resource Name (ARN) of the task definition. TaskDefinitionArn *string `locationName:"taskDefinitionArn" type:"string"` + // The Amazon Resource Name (ARN) of the IAM role that containers in this task + // can assume. All containers in this task are granted the permissions that + // are specified in this role. + TaskRoleArn *string `locationName:"taskRoleArn" type:"string"` + // The list of volumes in a task. For more information about volume definition - // parameters and defaults, see Amazon ECS Task Definitions (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_defintions.html) + // parameters and defaults, see Amazon ECS Task Definitions (http://docs.aws.amazon.com/http:/docs.aws.amazon.com/AmazonECS/latest/developerguidetask_defintions.html) // in the Amazon EC2 Container Service Developer Guide. Volumes []*Volume `locationName:"volumes" type:"list"` } @@ -3627,6 +4412,11 @@ type TaskOverride struct { // One or more container overrides sent to a task. ContainerOverrides []*ContainerOverride `locationName:"containerOverrides" type:"list"` + + // The Amazon Resource Name (ARN) of the IAM role that containers in this task + // can assume. All containers in this task are granted the permissions that + // are specified in this role. + TaskRoleArn *string `locationName:"taskRoleArn" type:"string"` } // String returns the string representation @@ -3915,6 +4705,8 @@ const ( LogDriverGelf = "gelf" // @enum LogDriver LogDriverFluentd = "fluentd" + // @enum LogDriver + LogDriverAwslogs = "awslogs" ) const ( @@ -3924,6 +4716,15 @@ const ( SortOrderDesc = "DESC" ) +const ( + // @enum TaskDefinitionFamilyStatus + TaskDefinitionFamilyStatusActive = "ACTIVE" + // @enum TaskDefinitionFamilyStatus + TaskDefinitionFamilyStatusInactive = "INACTIVE" + // @enum TaskDefinitionFamilyStatus + TaskDefinitionFamilyStatusAll = "ALL" +) + const ( // @enum TaskDefinitionStatus TaskDefinitionStatusActive = "ACTIVE" diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecs/service.go b/vendor/github.com/aws/aws-sdk-go/service/ecs/service.go index 260b39edc..fe99d735a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecs/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecs/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // Amazon EC2 Container Service (Amazon ECS) is a highly scalable, fast, container @@ -71,7 +71,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/efs/api.go b/vendor/github.com/aws/aws-sdk-go/service/efs/api.go index 0fb29b491..1245ef45d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/efs/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/efs/api.go @@ -15,7 +15,28 @@ import ( const opCreateFileSystem = "CreateFileSystem" -// CreateFileSystemRequest generates a request for the CreateFileSystem operation. +// CreateFileSystemRequest generates a "aws/request.Request" representing the +// client's request for the CreateFileSystem operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateFileSystem method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateFileSystemRequest method. +// req, resp := client.CreateFileSystemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EFS) CreateFileSystemRequest(input *CreateFileSystemInput) (req *request.Request, output *FileSystemDescription) { op := &request.Operation{ Name: opCreateFileSystem, @@ -39,30 +60,45 @@ func (c *EFS) CreateFileSystemRequest(input *CreateFileSystemInput) (req *reques // not currently exist that is owned by the caller's AWS account with the specified // creation token, this operation does the following: // -// Creates a new, empty file system. The file system will have an Amazon EFS -// assigned ID, and an initial lifecycle state "creating". Returns with the -// description of the created file system. Otherwise, this operation returns -// a FileSystemAlreadyExists error with the ID of the existing file system. +// Creates a new, empty file system. The file system will have an Amazon +// EFS assigned ID, and an initial lifecycle state creating. // -// For basic use cases, you can use a randomly generated UUID for the creation -// token. The idempotent operation allows you to retry a CreateFileSystem call -// without risk of creating an extra file system. This can happen when an initial -// call fails in a way that leaves it uncertain whether or not a file system -// was actually created. An example might be that a transport level timeout -// occurred or your connection was reset. As long as you use the same creation -// token, if the initial call had succeeded in creating a file system, the client -// can learn of its existence from the FileSystemAlreadyExists error. +// Returns with the description of the created file system. // -// The CreateFileSystem call returns while the file system's lifecycle state -// is still "creating". You can check the file system creation status by calling -// the DescribeFileSystems API, which among other things returns the file system -// state. After the file system is fully created, Amazon EFS sets its lifecycle -// state to "available", at which point you can create one or more mount targets -// for the file system (CreateMountTarget) in your VPC. You mount your Amazon -// EFS file system on an EC2 instances in your VPC via the mount target. For -// more information, see Amazon EFS: How it Works (http://docs.aws.amazon.com/efs/latest/ug/how-it-works.html) +// Otherwise, this operation returns a FileSystemAlreadyExists error with +// the ID of the existing file system. // -// This operation requires permission for the elasticfilesystem:CreateFileSystem +// For basic use cases, you can use a randomly generated UUID for the creation +// token. +// +// The idempotent operation allows you to retry a CreateFileSystem call without +// risk of creating an extra file system. This can happen when an initial call +// fails in a way that leaves it uncertain whether or not a file system was +// actually created. An example might be that a transport level timeout occurred +// or your connection was reset. As long as you use the same creation token, +// if the initial call had succeeded in creating a file system, the client can +// learn of its existence from the FileSystemAlreadyExists error. +// +// The CreateFileSystem call returns while the file system's lifecycle state +// is still creating. You can check the file system creation status by calling +// the DescribeFileSystems operation, which among other things returns the file +// system state. +// +// This operation also takes an optional PerformanceMode parameter that you +// choose for your file system. We recommend generalPurpose performance mode +// for most file systems. File systems using the maxIO performance mode can +// scale to higher levels of aggregate throughput and operations per second +// with a tradeoff of slightly higher latencies for most file operations. The +// performance mode can't be changed after the file system has been created. +// For more information, see Amazon EFS: Performance Modes (http://docs.aws.amazon.com/efs/latest/ug/performance.html#performancemodes.html). +// +// After the file system is fully created, Amazon EFS sets its lifecycle state +// to available, at which point you can create one or more mount targets for +// the file system in your VPC. For more information, see CreateMountTarget. +// You mount your Amazon EFS file system on an EC2 instances in your VPC via +// the mount target. For more information, see Amazon EFS: How it Works (http://docs.aws.amazon.com/efs/latest/ug/how-it-works.html). +// +// This operation requires permissions for the elasticfilesystem:CreateFileSystem // action. func (c *EFS) CreateFileSystem(input *CreateFileSystemInput) (*FileSystemDescription, error) { req, out := c.CreateFileSystemRequest(input) @@ -72,7 +108,28 @@ func (c *EFS) CreateFileSystem(input *CreateFileSystemInput) (*FileSystemDescrip const opCreateMountTarget = "CreateMountTarget" -// CreateMountTargetRequest generates a request for the CreateMountTarget operation. +// CreateMountTargetRequest generates a "aws/request.Request" representing the +// client's request for the CreateMountTarget operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateMountTarget method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateMountTargetRequest method. +// req, resp := client.CreateMountTargetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EFS) CreateMountTargetRequest(input *CreateMountTargetInput) (req *request.Request, output *MountTargetDescription) { op := &request.Operation{ Name: opCreateMountTarget, @@ -101,15 +158,18 @@ func (c *EFS) CreateMountTargetRequest(input *CreateMountTargetInput) (req *requ // file system. For more information, see Amazon EFS: How it Works (http://docs.aws.amazon.com/efs/latest/ug/how-it-works.html). // // In the request, you also specify a file system ID for which you are creating -// the mount target and the file system's lifecycle state must be "available" -// (see DescribeFileSystems). +// the mount target and the file system's lifecycle state must be available. +// For more information, see DescribeFileSystems. // -// In the request, you also provide a subnet ID, which serves several purposes: +// In the request, you also provide a subnet ID, which determines the following: +// +// VPC in which Amazon EFS creates the mount target +// +// Availability Zone in which Amazon EFS creates the mount target +// +// IP address range from which Amazon EFS selects the IP address of the mount +// target (if you don't specify an IP address in the request) // -// It determines the VPC in which Amazon EFS creates the mount target. It -// determines the Availability Zone in which Amazon EFS creates the mount target. -// It determines the IP address range from which Amazon EFS selects the IP -// address of the mount target if you don't specify an IP address in the request. // After creating the mount target, Amazon EFS returns a response that includes, // a MountTargetId and an IpAddress. You use this IP address when mounting the // file system in an EC2 instance. You can also use the mount target's DNS name @@ -118,54 +178,71 @@ func (c *EFS) CreateMountTargetRequest(input *CreateMountTargetInput) (req *requ // IP address. For more information, see How it Works: Implementation Overview // (http://docs.aws.amazon.com/efs/latest/ug/how-it-works.html#how-it-works-implementation). // -// Note that you can create mount targets for a file system in only one VPC, +// Note that you can create mount targets for a file system in only one VPC, // and there can be only one mount target per Availability Zone. That is, if // the file system already has one or more mount targets created for it, the -// request to add another mount target must meet the following requirements: +// subnet specified in the request to add another mount target must meet the +// following requirements: // -// The subnet specified in the request must belong to the same VPC as the -// subnets of the existing mount targets. +// Must belong to the same VPC as the subnets of the existing mount targets // -// The subnet specified in the request must not be in the same Availability -// Zone as any of the subnets of the existing mount targets. If the request -// satisfies the requirements, Amazon EFS does the following: +// Must not be in the same Availability Zone as any of the subnets of the +// existing mount targets // -// Creates a new mount target in the specified subnet. Also creates a new -// network interface in the subnet as follows: If the request provides an IpAddress, -// Amazon EFS assigns that IP address to the network interface. Otherwise, Amazon -// EFS assigns a free address in the subnet (in the same way that the Amazon -// EC2 CreateNetworkInterface call does when a request does not specify a primary -// private IP address). If the request provides SecurityGroups, this network -// interface is associated with those security groups. Otherwise, it belongs -// to the default security group for the subnet's VPC. Assigns the description -// "Mount target fsmt-id for file system fs-id" where fsmt-id is the mount target -// ID, and fs-id is the FileSystemId. Sets the requesterManaged property of -// the network interface to "true", and the requesterId value to "EFS". Each -// Amazon EFS mount target has one corresponding requestor-managed EC2 network -// interface. After the network interface is created, Amazon EFS sets the NetworkInterfaceId -// field in the mount target's description to the network interface ID, and -// the IpAddress field to its address. If network interface creation fails, -// the entire CreateMountTarget operation fails. +// If the request satisfies the requirements, Amazon EFS does the following: // -// The CreateMountTarget call returns only after creating the network interface, -// but while the mount target state is still "creating". You can check the mount -// target creation status by calling the DescribeFileSystems API, which among -// other things returns the mount target state. We recommend you create a mount -// target in each of the Availability Zones. There are cost considerations for -// using a file system in an Availability Zone through a mount target created -// in another Availability Zone. For more information, go to Amazon EFS (http://aws.amazon.com/efs/) -// product detail page. In addition, by always using a mount target local to -// the instance's Availability Zone, you eliminate a partial failure scenario; -// if the Availability Zone in which your mount target is created goes down, -// then you won't be able to access your file system through that mount target. +// Creates a new mount target in the specified subnet. // -// This operation requires permission for the following action on the file +// Also creates a new network interface in the subnet as follows: +// +// If the request provides an IpAddress, Amazon EFS assigns that IP address +// to the network interface. Otherwise, Amazon EFS assigns a free address in +// the subnet (in the same way that the Amazon EC2 CreateNetworkInterface call +// does when a request does not specify a primary private IP address). +// +// If the request provides SecurityGroups, this network interface is associated +// with those security groups. Otherwise, it belongs to the default security +// group for the subnet's VPC. +// +// Assigns the description Mount target fsmt-id for file system fs-id where +// fsmt-id is the mount target ID, and fs-id is the FileSystemId. +// +// Sets the requesterManaged property of the network interface to true, and +// the requesterId value to EFS. +// +// Each Amazon EFS mount target has one corresponding requestor-managed EC2 +// network interface. After the network interface is created, Amazon EFS sets +// the NetworkInterfaceId field in the mount target's description to the network +// interface ID, and the IpAddress field to its address. If network interface +// creation fails, the entire CreateMountTarget operation fails. +// +// The CreateMountTarget call returns only after creating the network interface, +// but while the mount target state is still creating. You can check the mount +// target creation status by calling the DescribeFileSystems operation, which +// among other things returns the mount target state. +// +// We recommend you create a mount target in each of the Availability Zones. +// There are cost considerations for using a file system in an Availability +// Zone through a mount target created in another Availability Zone. For more +// information, see Amazon EFS (http://aws.amazon.com/efs/). In addition, by +// always using a mount target local to the instance's Availability Zone, you +// eliminate a partial failure scenario. If the Availability Zone in which your +// mount target is created goes down, then you won't be able to access your +// file system through that mount target. +// +// This operation requires permissions for the following action on the file // system: // -// elasticfilesystem:CreateMountTarget This operation also requires permission -// for the following Amazon EC2 actions: +// elasticfilesystem:CreateMountTarget // -// ec2:DescribeSubnets ec2:DescribeNetworkInterfaces ec2:CreateNetworkInterface +// This operation also requires permissions for the following Amazon EC2 +// actions: +// +// ec2:DescribeSubnets +// +// ec2:DescribeNetworkInterfaces +// +// ec2:CreateNetworkInterface func (c *EFS) CreateMountTarget(input *CreateMountTargetInput) (*MountTargetDescription, error) { req, out := c.CreateMountTargetRequest(input) err := req.Send() @@ -174,7 +251,28 @@ func (c *EFS) CreateMountTarget(input *CreateMountTargetInput) (*MountTargetDesc const opCreateTags = "CreateTags" -// CreateTagsRequest generates a request for the CreateTags operation. +// CreateTagsRequest generates a "aws/request.Request" representing the +// client's request for the CreateTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateTagsRequest method. +// req, resp := client.CreateTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EFS) CreateTagsRequest(input *CreateTagsInput) (req *request.Request, output *CreateTagsOutput) { op := &request.Operation{ Name: opCreateTags, @@ -197,8 +295,8 @@ func (c *EFS) CreateTagsRequest(input *CreateTagsInput) (req *request.Request, o // Creates or overwrites tags associated with a file system. Each tag is a key-value // pair. If a tag key specified in the request already exists on the file system, // this operation overwrites its value with the value provided in the request. -// If you add the "Name" tag to your file system, Amazon EFS returns it in the -// response to the DescribeFileSystems API. +// If you add the Name tag to your file system, Amazon EFS returns it in the +// response to the DescribeFileSystems operation. // // This operation requires permission for the elasticfilesystem:CreateTags // action. @@ -210,7 +308,28 @@ func (c *EFS) CreateTags(input *CreateTagsInput) (*CreateTagsOutput, error) { const opDeleteFileSystem = "DeleteFileSystem" -// DeleteFileSystemRequest generates a request for the DeleteFileSystem operation. +// DeleteFileSystemRequest generates a "aws/request.Request" representing the +// client's request for the DeleteFileSystem operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteFileSystem method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteFileSystemRequest method. +// req, resp := client.DeleteFileSystemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EFS) DeleteFileSystemRequest(input *DeleteFileSystemInput) (req *request.Request, output *DeleteFileSystemOutput) { op := &request.Operation{ Name: opDeleteFileSystem, @@ -231,19 +350,21 @@ func (c *EFS) DeleteFileSystemRequest(input *DeleteFileSystemInput) (req *reques } // Deletes a file system, permanently severing access to its contents. Upon -// return, the file system no longer exists and you will not be able to access -// any contents of the deleted file system. +// return, the file system no longer exists and you can't access any contents +// of the deleted file system. // -// You cannot delete a file system that is in use. That is, if the file system +// You can't delete a file system that is in use. That is, if the file system // has any mount targets, you must first delete them. For more information, // see DescribeMountTargets and DeleteMountTarget. // -// The DeleteFileSystem call returns while the file system state is still "deleting". -// You can check the file system deletion status by calling the DescribeFileSystems -// API, which returns a list of file systems in your account. If you pass file -// system ID or creation token for the deleted file system, the DescribeFileSystems -// will return a 404 "FileSystemNotFound" error. This operation requires permission -// for the elasticfilesystem:DeleteFileSystem action. +// The DeleteFileSystem call returns while the file system state is still +// deleting. You can check the file system deletion status by calling the DescribeFileSystems +// operation, which returns a list of file systems in your account. If you pass +// file system ID or creation token for the deleted file system, the DescribeFileSystems +// returns a 404 FileSystemNotFound error. +// +// This operation requires permissions for the elasticfilesystem:DeleteFileSystem +// action. func (c *EFS) DeleteFileSystem(input *DeleteFileSystemInput) (*DeleteFileSystemOutput, error) { req, out := c.DeleteFileSystemRequest(input) err := req.Send() @@ -252,7 +373,28 @@ func (c *EFS) DeleteFileSystem(input *DeleteFileSystemInput) (*DeleteFileSystemO const opDeleteMountTarget = "DeleteMountTarget" -// DeleteMountTargetRequest generates a request for the DeleteMountTarget operation. +// DeleteMountTargetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteMountTarget operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteMountTarget method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteMountTargetRequest method. +// req, resp := client.DeleteMountTargetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EFS) DeleteMountTargetRequest(input *DeleteMountTargetInput) (req *request.Request, output *DeleteMountTargetOutput) { op := &request.Operation{ Name: opDeleteMountTarget, @@ -274,26 +416,29 @@ func (c *EFS) DeleteMountTargetRequest(input *DeleteMountTargetInput) (req *requ // Deletes the specified mount target. // -// This operation forcibly breaks any mounts of the file system via the mount -// target being deleted, which might disrupt instances or applications using -// those mounts. To avoid applications getting cut off abruptly, you might consider -// unmounting any mounts of the mount target, if feasible. The operation also -// deletes the associated network interface. Uncommitted writes may be lost, -// but breaking a mount target using this operation does not corrupt the file -// system itself. The file system you created remains. You can mount an EC2 -// instance in your VPC using another mount target. +// This operation forcibly breaks any mounts of the file system via the mount +// target that is being deleted, which might disrupt instances or applications +// using those mounts. To avoid applications getting cut off abruptly, you might +// consider unmounting any mounts of the mount target, if feasible. The operation +// also deletes the associated network interface. Uncommitted writes may be +// lost, but breaking a mount target using this operation does not corrupt the +// file system itself. The file system you created remains. You can mount an +// EC2 instance in your VPC via another mount target. // -// This operation requires permission for the following action on the file +// This operation requires permissions for the following action on the file // system: // -// elasticfilesystem:DeleteMountTarget The DeleteMountTarget call returns -// while the mount target state is still "deleting". You can check the mount -// target deletion by calling the DescribeMountTargets API, which returns a -// list of mount target descriptions for the given file system. The operation -// also requires permission for the following Amazon EC2 action on the mount -// target's network interface: +// elasticfilesystem:DeleteMountTarget // -// ec2:DeleteNetworkInterface +// The DeleteMountTarget call returns while the mount target state is still +// deleting. You can check the mount target deletion by calling the DescribeMountTargets +// operation, which returns a list of mount target descriptions for the given +// file system. +// +// The operation also requires permissions for the following Amazon EC2 action +// on the mount target's network interface: +// +// ec2:DeleteNetworkInterface func (c *EFS) DeleteMountTarget(input *DeleteMountTargetInput) (*DeleteMountTargetOutput, error) { req, out := c.DeleteMountTargetRequest(input) err := req.Send() @@ -302,7 +447,28 @@ func (c *EFS) DeleteMountTarget(input *DeleteMountTargetInput) (*DeleteMountTarg const opDeleteTags = "DeleteTags" -// DeleteTagsRequest generates a request for the DeleteTags operation. +// DeleteTagsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteTagsRequest method. +// req, resp := client.DeleteTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EFS) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Request, output *DeleteTagsOutput) { op := &request.Operation{ Name: opDeleteTags, @@ -323,12 +489,12 @@ func (c *EFS) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Request, o } // Deletes the specified tags from a file system. If the DeleteTags request -// includes a tag key that does not exist, Amazon EFS ignores it; it is not -// an error. For more information about tags and related restrictions, go to -// Tag Restrictions (http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) +// includes a tag key that does not exist, Amazon EFS ignores it and doesn't +// cause an error. For more information about tags and related restrictions, +// see Tag Restrictions (http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html) // in the AWS Billing and Cost Management User Guide. // -// This operation requires permission for the elasticfilesystem:DeleteTags +// This operation requires permissions for the elasticfilesystem:DeleteTags // action. func (c *EFS) DeleteTags(input *DeleteTagsInput) (*DeleteTagsOutput, error) { req, out := c.DeleteTagsRequest(input) @@ -338,7 +504,28 @@ func (c *EFS) DeleteTags(input *DeleteTagsInput) (*DeleteTagsOutput, error) { const opDescribeFileSystems = "DescribeFileSystems" -// DescribeFileSystemsRequest generates a request for the DescribeFileSystems operation. +// DescribeFileSystemsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeFileSystems operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeFileSystems method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeFileSystemsRequest method. +// req, resp := client.DescribeFileSystemsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EFS) DescribeFileSystemsRequest(input *DescribeFileSystemsInput) (req *request.Request, output *DescribeFileSystemsOutput) { op := &request.Operation{ Name: opDescribeFileSystems, @@ -357,9 +544,9 @@ func (c *EFS) DescribeFileSystemsRequest(input *DescribeFileSystemsInput) (req * } // Returns the description of a specific Amazon EFS file system if either the -// file system CreationToken or the FileSystemId is provided; otherwise, returns -// descriptions of all file systems owned by the caller's AWS account in the -// AWS region of the endpoint that you're calling. +// file system CreationToken or the FileSystemId is provided. Otherwise, it +// returns descriptions of all file systems owned by the caller's AWS account +// in the AWS Region of the endpoint that you're calling. // // When retrieving all file system descriptions, you can optionally specify // the MaxItems parameter to limit the number of descriptions in a response. @@ -367,20 +554,20 @@ func (c *EFS) DescribeFileSystemsRequest(input *DescribeFileSystemsInput) (req * // an opaque token, in the response. In this case, you should send a subsequent // request with the Marker request parameter set to the value of NextMarker. // -// So to retrieve a list of your file system descriptions, the expected usage -// of this API is an iterative process of first calling DescribeFileSystems -// without the Marker and then continuing to call it with the Marker parameter +// To retrieve a list of your file system descriptions, this operation is used +// in an iterative process, where DescribeFileSystems is called first without +// the Marker and then the operation continues to call it with the Marker parameter // set to the value of the NextMarker from the previous response until the response // has no NextMarker. // -// Note that the implementation may return fewer than MaxItems file system -// descriptions while still including a NextMarker value. +// The implementation may return fewer than MaxItems file system descriptions +// while still including a NextMarker value. // // The order of file systems returned in the response of one DescribeFileSystems -// call, and the order of file systems returned across the responses of a multi-call -// iteration, is unspecified. +// call and the order of file systems returned across the responses of a multi-call +// iteration is unspecified. // -// This operation requires permission for the elasticfilesystem:DescribeFileSystems +// This operation requires permissions for the elasticfilesystem:DescribeFileSystems // action. func (c *EFS) DescribeFileSystems(input *DescribeFileSystemsInput) (*DescribeFileSystemsOutput, error) { req, out := c.DescribeFileSystemsRequest(input) @@ -390,7 +577,28 @@ func (c *EFS) DescribeFileSystems(input *DescribeFileSystemsInput) (*DescribeFil const opDescribeMountTargetSecurityGroups = "DescribeMountTargetSecurityGroups" -// DescribeMountTargetSecurityGroupsRequest generates a request for the DescribeMountTargetSecurityGroups operation. +// DescribeMountTargetSecurityGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMountTargetSecurityGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeMountTargetSecurityGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeMountTargetSecurityGroupsRequest method. +// req, resp := client.DescribeMountTargetSecurityGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EFS) DescribeMountTargetSecurityGroupsRequest(input *DescribeMountTargetSecurityGroupsInput) (req *request.Request, output *DescribeMountTargetSecurityGroupsOutput) { op := &request.Operation{ Name: opDescribeMountTargetSecurityGroups, @@ -410,13 +618,15 @@ func (c *EFS) DescribeMountTargetSecurityGroupsRequest(input *DescribeMountTarge // Returns the security groups currently in effect for a mount target. This // operation requires that the network interface of the mount target has been -// created and the life cycle state of the mount target is not "deleted". +// created and the lifecycle state of the mount target is not deleted. // // This operation requires permissions for the following actions: // -// elasticfilesystem:DescribeMountTargetSecurityGroups action on the mount -// target's file system. ec2:DescribeNetworkInterfaceAttribute action on the -// mount target's network interface. +// elasticfilesystem:DescribeMountTargetSecurityGroups action on the mount +// target's file system. +// +// ec2:DescribeNetworkInterfaceAttribute action on the mount target's network +// interface. func (c *EFS) DescribeMountTargetSecurityGroups(input *DescribeMountTargetSecurityGroupsInput) (*DescribeMountTargetSecurityGroupsOutput, error) { req, out := c.DescribeMountTargetSecurityGroupsRequest(input) err := req.Send() @@ -425,7 +635,28 @@ func (c *EFS) DescribeMountTargetSecurityGroups(input *DescribeMountTargetSecuri const opDescribeMountTargets = "DescribeMountTargets" -// DescribeMountTargetsRequest generates a request for the DescribeMountTargets operation. +// DescribeMountTargetsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMountTargets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeMountTargets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeMountTargetsRequest method. +// req, resp := client.DescribeMountTargetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EFS) DescribeMountTargetsRequest(input *DescribeMountTargetsInput) (req *request.Request, output *DescribeMountTargetsOutput) { op := &request.Operation{ Name: opDescribeMountTargets, @@ -447,8 +678,8 @@ func (c *EFS) DescribeMountTargetsRequest(input *DescribeMountTargetsInput) (req // mount target, for a file system. When requesting all of the current mount // targets, the order of mount targets returned in the response is unspecified. // -// This operation requires permission for the elasticfilesystem:DescribeMountTargets -// action, on either the file system id that you specify in FileSystemId, or +// This operation requires permissions for the elasticfilesystem:DescribeMountTargets +// action, on either the file system ID that you specify in FileSystemId, or // on the file system of the mount target that you specify in MountTargetId. func (c *EFS) DescribeMountTargets(input *DescribeMountTargetsInput) (*DescribeMountTargetsOutput, error) { req, out := c.DescribeMountTargetsRequest(input) @@ -458,7 +689,28 @@ func (c *EFS) DescribeMountTargets(input *DescribeMountTargetsInput) (*DescribeM const opDescribeTags = "DescribeTags" -// DescribeTagsRequest generates a request for the DescribeTags operation. +// DescribeTagsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTagsRequest method. +// req, resp := client.DescribeTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EFS) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Request, output *DescribeTagsOutput) { op := &request.Operation{ Name: opDescribeTags, @@ -477,11 +729,10 @@ func (c *EFS) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Reques } // Returns the tags associated with a file system. The order of tags returned -// in the response of one DescribeTags call, and the order of tags returned -// across the responses of a multi-call iteration (when using pagination), is -// unspecified. +// in the response of one DescribeTags call and the order of tags returned across +// the responses of a multi-call iteration (when using pagination) is unspecified. // -// This operation requires permission for the elasticfilesystem:DescribeTags +// This operation requires permissions for the elasticfilesystem:DescribeTags // action. func (c *EFS) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error) { req, out := c.DescribeTagsRequest(input) @@ -491,7 +742,28 @@ func (c *EFS) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error const opModifyMountTargetSecurityGroups = "ModifyMountTargetSecurityGroups" -// ModifyMountTargetSecurityGroupsRequest generates a request for the ModifyMountTargetSecurityGroups operation. +// ModifyMountTargetSecurityGroupsRequest generates a "aws/request.Request" representing the +// client's request for the ModifyMountTargetSecurityGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyMountTargetSecurityGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyMountTargetSecurityGroupsRequest method. +// req, resp := client.ModifyMountTargetSecurityGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EFS) ModifyMountTargetSecurityGroupsRequest(input *ModifyMountTargetSecurityGroupsInput) (req *request.Request, output *ModifyMountTargetSecurityGroupsOutput) { op := &request.Operation{ Name: opModifyMountTargetSecurityGroups, @@ -513,18 +785,20 @@ func (c *EFS) ModifyMountTargetSecurityGroupsRequest(input *ModifyMountTargetSec // Modifies the set of security groups in effect for a mount target. // -// When you create a mount target, Amazon EFS also creates a new network interface -// (see CreateMountTarget). This operation replaces the security groups in effect -// for the network interface associated with a mount target, with the SecurityGroups -// provided in the request. This operation requires that the network interface -// of the mount target has been created and the life cycle state of the mount -// target is not "deleted". +// When you create a mount target, Amazon EFS also creates a new network interface. +// For more information, see CreateMountTarget. This operation replaces the +// security groups in effect for the network interface associated with a mount +// target, with the SecurityGroups provided in the request. This operation requires +// that the network interface of the mount target has been created and the lifecycle +// state of the mount target is not deleted. // // The operation requires permissions for the following actions: // -// elasticfilesystem:ModifyMountTargetSecurityGroups action on the mount -// target's file system. ec2:ModifyNetworkInterfaceAttribute action on the -// mount target's network interface. +// elasticfilesystem:ModifyMountTargetSecurityGroups action on the mount +// target's file system. +// +// ec2:ModifyNetworkInterfaceAttribute action on the mount target's network +// interface. func (c *EFS) ModifyMountTargetSecurityGroups(input *ModifyMountTargetSecurityGroupsInput) (*ModifyMountTargetSecurityGroupsOutput, error) { req, out := c.ModifyMountTargetSecurityGroupsRequest(input) err := req.Send() @@ -537,6 +811,13 @@ type CreateFileSystemInput struct { // String of up to 64 ASCII characters. Amazon EFS uses this to ensure idempotent // creation. CreationToken *string `min:"1" type:"string" required:"true"` + + // The PerformanceMode of the file system. We recommend generalPurpose performance + // mode for most file systems. File systems using the maxIO performance mode + // can scale to higher levels of aggregate throughput and operations per second + // with a tradeoff of slightly higher latencies for most file operations. This + // can't be changed after the file system has been created. + PerformanceMode *string `type:"string" enum:"PerformanceMode"` } // String returns the string representation @@ -568,17 +849,17 @@ func (s *CreateFileSystemInput) Validate() error { type CreateMountTargetInput struct { _ struct{} `type:"structure"` - // The ID of the file system for which to create the mount target. + // ID of the file system for which to create the mount target. FileSystemId *string `type:"string" required:"true"` - // A valid IPv4 address within the address range of the specified subnet. + // Valid IPv4 address within the address range of the specified subnet. IpAddress *string `type:"string"` - // Up to 5 VPC security group IDs, of the form "sg-xxxxxxxx". These must be + // Up to five VPC security group IDs, of the form sg-xxxxxxxx. These must be // for the same VPC as subnet specified. SecurityGroups []*string `type:"list"` - // The ID of the subnet to add the mount target in. + // ID of the subnet to add the mount target in. SubnetId *string `type:"string" required:"true"` } @@ -611,11 +892,11 @@ func (s *CreateMountTargetInput) Validate() error { type CreateTagsInput struct { _ struct{} `type:"structure"` - // String. The ID of the file system whose tags you want to modify. This operation - // modifies only the tags and not the file system. + // ID of the file system whose tags you want to modify (String). This operation + // modifies the tags only, not the file system. FileSystemId *string `location:"uri" locationName:"FileSystemId" type:"string" required:"true"` - // An array of Tag objects to add. Each Tag object is a key-value pair. + // Array of Tag objects to add. Each Tag object is a key-value pair. Tags []*Tag `type:"list" required:"true"` } @@ -672,7 +953,7 @@ func (s CreateTagsOutput) GoString() string { type DeleteFileSystemInput struct { _ struct{} `type:"structure"` - // The ID of the file system you want to delete. + // ID of the file system you want to delete. FileSystemId *string `location:"uri" locationName:"FileSystemId" type:"string" required:"true"` } @@ -716,7 +997,7 @@ func (s DeleteFileSystemOutput) GoString() string { type DeleteMountTargetInput struct { _ struct{} `type:"structure"` - // String. The ID of the mount target to delete. + // ID of the mount target to delete (String). MountTargetId *string `location:"uri" locationName:"MountTargetId" type:"string" required:"true"` } @@ -760,10 +1041,10 @@ func (s DeleteMountTargetOutput) GoString() string { type DeleteTagsInput struct { _ struct{} `type:"structure"` - // String. The ID of the file system whose tags you want to delete. + // ID of the file system whose tags you want to delete (String). FileSystemId *string `location:"uri" locationName:"FileSystemId" type:"string" required:"true"` - // A list of tag keys to delete. + // List of tag keys to delete. TagKeys []*string `type:"list" required:"true"` } @@ -810,22 +1091,22 @@ func (s DeleteTagsOutput) GoString() string { type DescribeFileSystemsInput struct { _ struct{} `type:"structure"` - // Optional string. Restricts the list to the file system with this creation - // token (you specify a creation token at the time of creating an Amazon EFS - // file system). + // (Optional) Restricts the list to the file system with this creation token + // (String). You specify a creation token when you create an Amazon EFS file + // system. CreationToken *string `location:"querystring" locationName:"CreationToken" min:"1" type:"string"` - // Optional string. File system ID whose description you want to retrieve. + // (Optional) ID of the file system whose description you want to retrieve (String). FileSystemId *string `location:"querystring" locationName:"FileSystemId" type:"string"` - // Optional string. Opaque pagination token returned from a previous DescribeFileSystems - // operation. If present, specifies to continue the list from where the returning - // call had left off. + // (Optional) Opaque pagination token returned from a previous DescribeFileSystems + // operation (String). If present, specifies to continue the list from where + // the returning call had left off. Marker *string `location:"querystring" locationName:"Marker" type:"string"` - // Optional integer. Specifies the maximum number of file systems to return - // in the response. This parameter value must be greater than 0. The number - // of items Amazon EFS returns will be the minimum of the MaxItems parameter + // (Optional) Specifies the maximum number of file systems to return in the + // response (integer). This parameter value must be greater than 0. The number + // of items that Amazon EFS returns is the minimum of the MaxItems parameter // specified in the request and the service's internal maximum number of items // per page. MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` @@ -860,13 +1141,13 @@ func (s *DescribeFileSystemsInput) Validate() error { type DescribeFileSystemsOutput struct { _ struct{} `type:"structure"` - // An array of file system descriptions. + // Array of file system descriptions. FileSystems []*FileSystemDescription `type:"list"` - // A string, present if provided by caller in the request. + // Present if provided by caller in the request (String). Marker *string `type:"string"` - // A string, present if there are more file systems than returned in the response. + // Present if there are more file systems than returned in the response (String). // You can use the NextMarker in the subsequent request to fetch the descriptions. NextMarker *string `type:"string"` } @@ -884,7 +1165,7 @@ func (s DescribeFileSystemsOutput) GoString() string { type DescribeMountTargetSecurityGroupsInput struct { _ struct{} `type:"structure"` - // The ID of the mount target whose security groups you want to retrieve. + // ID of the mount target whose security groups you want to retrieve. MountTargetId *string `location:"uri" locationName:"MountTargetId" type:"string" required:"true"` } @@ -914,7 +1195,7 @@ func (s *DescribeMountTargetSecurityGroupsInput) Validate() error { type DescribeMountTargetSecurityGroupsOutput struct { _ struct{} `type:"structure"` - // An array of security groups. + // Array of security groups. SecurityGroups []*string `type:"list" required:"true"` } @@ -931,20 +1212,20 @@ func (s DescribeMountTargetSecurityGroupsOutput) GoString() string { type DescribeMountTargetsInput struct { _ struct{} `type:"structure"` - // Optional. String. The ID of the file system whose mount targets you want - // to list. It must be included in your request if MountTargetId is not included. + // (Optional) ID of the file system whose mount targets you want to list (String). + // It must be included in your request if MountTargetId is not included. FileSystemId *string `location:"querystring" locationName:"FileSystemId" type:"string"` - // Optional. String. Opaque pagination token returned from a previous DescribeMountTargets - // operation. If present, it specifies to continue the list from where the previous - // returning call left off. + // (Optional) Opaque pagination token returned from a previous DescribeMountTargets + // operation (String). If present, it specifies to continue the list from where + // the previous returning call left off. Marker *string `location:"querystring" locationName:"Marker" type:"string"` - // Optional. Maximum number of mount targets to return in the response. It must - // be an integer with a value greater than zero. + // (Optional) Maximum number of mount targets to return in the response. It + // must be an integer with a value greater than zero. MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` - // Optional. String. The ID of the mount target that you want to have described. + // (Optional) ID of the mount target that you want to have described (String). // It must be included in your request if FileSystemId is not included. MountTargetId *string `location:"querystring" locationName:"MountTargetId" type:"string"` } @@ -1002,16 +1283,16 @@ func (s DescribeMountTargetsOutput) GoString() string { type DescribeTagsInput struct { _ struct{} `type:"structure"` - // The ID of the file system whose tag set you want to retrieve. + // ID of the file system whose tag set you want to retrieve. FileSystemId *string `location:"uri" locationName:"FileSystemId" type:"string" required:"true"` - // Optional. String. Opaque pagination token returned from a previous DescribeTags - // operation. If present, it specifies to continue the list from where the previous - // call left off. + // (Optional) Opaque pagination token returned from a previous DescribeTags + // operation (String). If present, it specifies to continue the list from where + // the previous call left off. Marker *string `location:"querystring" locationName:"Marker" type:"string"` - // Optional. Maximum number of file system tags to return in the response. It - // must be an integer with a value greater than zero. + // (Optional) Maximum number of file system tags to return in the response. + // It must be an integer with a value greater than zero. MaxItems *int64 `location:"querystring" locationName:"MaxItems" min:"1" type:"integer"` } @@ -1067,45 +1348,47 @@ func (s DescribeTagsOutput) GoString() string { return s.String() } -// This object provides description of a file system. +// Description of the file system. type FileSystemDescription struct { _ struct{} `type:"structure"` - // The time at which the file system was created, in seconds, since 1970-01-01T00:00:00Z. + // Time that the file system was created, in seconds (since 1970-01-01T00:00:00Z). CreationTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` // Opaque string specified in the request. CreationToken *string `min:"1" type:"string" required:"true"` - // The file system ID assigned by Amazon EFS. + // ID of the file system, assigned by Amazon EFS. FileSystemId *string `type:"string" required:"true"` - // A predefined string value that indicates the lifecycle phase of the file - // system. + // Lifecycle phase of the file system. LifeCycleState *string `type:"string" required:"true" enum:"LifeCycleState"` - // You can add tags to a file system (see CreateTags) including a "Name" tag. - // If the file system has a "Name" tag, Amazon EFS returns the value in this - // field. + // You can add tags to a file system, including a Name tag. For more information, + // see CreateTags. If the file system has a Name tag, Amazon EFS returns the + // value in this field. Name *string `type:"string"` - // The current number of mount targets (see CreateMountTarget) the file system - // has. + // Current number of mount targets that the file system has. For more information, + // see CreateMountTarget. NumberOfMountTargets *int64 `type:"integer" required:"true"` - // The AWS account that created the file system. If the file system was created + // AWS account that created the file system. If the file system was created // by an IAM user, the parent account to which the user belongs is the owner. OwnerId *string `type:"string" required:"true"` - // This object provides the latest known metered size of data stored in the - // file system, in bytes, in its Value field, and the time at which that size - // was determined in its Timestamp field. The Timestamp value is the integer - // number of seconds since 1970-01-01T00:00:00Z. Note that the value does not - // represent the size of a consistent snapshot of the file system, but it is - // eventually consistent when there are no writes to the file system. That is, - // the value will represent actual size only if the file system is not modified - // for a period longer than a couple of hours. Otherwise, the value is not the - // exact size the file system was at any instant in time. + // The PerformanceMode of the file system. + PerformanceMode *string `type:"string" required:"true" enum:"PerformanceMode"` + + // Latest known metered size (in bytes) of data stored in the file system, in + // bytes, in its Value field, and the time at which that size was determined + // in its Timestamp field. The Timestamp value is the integer number of seconds + // since 1970-01-01T00:00:00Z. Note that the value does not represent the size + // of a consistent snapshot of the file system, but it is eventually consistent + // when there are no writes to the file system. That is, the value will represent + // actual size only if the file system is not modified for a period longer than + // a couple of hours. Otherwise, the value is not the exact size the file system + // was at any instant in time. SizeInBytes *FileSystemSize `type:"structure" required:"true"` } @@ -1119,22 +1402,22 @@ func (s FileSystemDescription) GoString() string { return s.String() } -// This object provides the latest known metered size, in bytes, of data stored -// in the file system, in its Value field, and the time at which that size was -// determined in its Timestamp field. Note that the value does not represent -// the size of a consistent snapshot of the file system, but it is eventually -// consistent when there are no writes to the file system. That is, the value -// will represent the actual size only if the file system is not modified for -// a period longer than a couple of hours. Otherwise, the value is not necessarily -// the exact size the file system was at any instant in time. +// Latest known metered size (in bytes) of data stored in the file system, in +// its Value field, and the time at which that size was determined in its Timestamp +// field. Note that the value does not represent the size of a consistent snapshot +// of the file system, but it is eventually consistent when there are no writes +// to the file system. That is, the value will represent the actual size only +// if the file system is not modified for a period longer than a couple of hours. +// Otherwise, the value is not necessarily the exact size the file system was +// at any instant in time. type FileSystemSize struct { _ struct{} `type:"structure"` - // The time at which the size of data, returned in the Value field, was determined. + // Time at which the size of data, returned in the Value field, was determined. // The value is the integer number of seconds since 1970-01-01T00:00:00Z. Timestamp *time.Time `type:"timestamp" timestampFormat:"unix"` - // The latest known metered size, in bytes, of data stored in the file system. + // Latest known metered size (in bytes) of data stored in the file system. Value *int64 `type:"long" required:"true"` } @@ -1151,10 +1434,10 @@ func (s FileSystemSize) GoString() string { type ModifyMountTargetSecurityGroupsInput struct { _ struct{} `type:"structure"` - // The ID of the mount target whose security groups you want to modify. + // ID of the mount target whose security groups you want to modify. MountTargetId *string `location:"uri" locationName:"MountTargetId" type:"string" required:"true"` - // An array of up to five VPC security group IDs. + // Array of up to five VPC security group IDs. SecurityGroups []*string `type:"list"` } @@ -1195,30 +1478,30 @@ func (s ModifyMountTargetSecurityGroupsOutput) GoString() string { return s.String() } -// This object provides description of a mount target. +// Provides a description of a mount target. type MountTargetDescription struct { _ struct{} `type:"structure"` - // The ID of the file system for which the mount target is intended. + // ID of the file system for which the mount target is intended. FileSystemId *string `type:"string" required:"true"` - // The address at which the file system may be mounted via the mount target. + // Address at which the file system may be mounted via the mount target. IpAddress *string `type:"string"` - // The lifecycle state the mount target is in. + // Lifecycle state of the mount target. LifeCycleState *string `type:"string" required:"true" enum:"LifeCycleState"` - // The system-assigned mount target ID. + // System-assigned mount target ID. MountTargetId *string `type:"string" required:"true"` - // The ID of the network interface that Amazon EFS created when it created the - // mount target. + // ID of the network interface that Amazon EFS created when it created the mount + // target. NetworkInterfaceId *string `type:"string"` - // The AWS account ID that owns the resource. + // AWS account ID that owns the resource. OwnerId *string `type:"string"` - // The ID of the subnet that the mount target is in. + // ID of the mount target's subnet. SubnetId *string `type:"string" required:"true"` } @@ -1232,13 +1515,12 @@ func (s MountTargetDescription) GoString() string { return s.String() } -// A tag is a pair of key and value. The allowed characters in keys and values -// are letters, whitespace, and numbers, representable in UTF-8, and the characters -// '+', '-', '=', '.', '_', ':', and '/'. +// A tag is a key-value pair. Allowed characters: letters, whitespace, and numbers, +// representable in UTF-8, and the following characters: + - = . _ : / type Tag struct { _ struct{} `type:"structure"` - // Tag key, a string. The key must not start with "aws:". + // Tag key (String). The key can't start with aws:. Key *string `min:"1" type:"string" required:"true"` // Value of the tag key. @@ -1284,3 +1566,10 @@ const ( // @enum LifeCycleState LifeCycleStateDeleted = "deleted" ) + +const ( + // @enum PerformanceMode + PerformanceModeGeneralPurpose = "generalPurpose" + // @enum PerformanceMode + PerformanceModeMaxIo = "maxIO" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/efs/service.go b/vendor/github.com/aws/aws-sdk-go/service/efs/service.go index cf1ecde58..de3a48ac3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/efs/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/efs/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/restjson" - "github.com/aws/aws-sdk-go/private/signer/v4" ) //The service client's operations are safe to be used concurrently. @@ -57,7 +57,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go b/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go index 3409de9d6..f1d9a70f1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticache/api.go @@ -14,7 +14,28 @@ import ( const opAddTagsToResource = "AddTagsToResource" -// AddTagsToResourceRequest generates a request for the AddTagsToResource operation. +// AddTagsToResourceRequest generates a "aws/request.Request" representing the +// client's request for the AddTagsToResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddTagsToResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddTagsToResourceRequest method. +// req, resp := client.AddTagsToResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) AddTagsToResourceRequest(input *AddTagsToResourceInput) (req *request.Request, output *TagListMessage) { op := &request.Operation{ Name: opAddTagsToResource, @@ -42,7 +63,8 @@ func (c *ElastiCache) AddTagsToResourceRequest(input *AddTagsToResourceInput) (r // costs aggregated by your tags. You can apply tags that represent business // categories (such as cost centers, application names, or owners) to organize // your costs across multiple services. For more information, see Using Cost -// Allocation Tags in Amazon ElastiCache (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Tagging.html). +// Allocation Tags in Amazon ElastiCache (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Tagging.html) +// in the ElastiCache User Guide. func (c *ElastiCache) AddTagsToResource(input *AddTagsToResourceInput) (*TagListMessage, error) { req, out := c.AddTagsToResourceRequest(input) err := req.Send() @@ -51,7 +73,28 @@ func (c *ElastiCache) AddTagsToResource(input *AddTagsToResourceInput) (*TagList const opAuthorizeCacheSecurityGroupIngress = "AuthorizeCacheSecurityGroupIngress" -// AuthorizeCacheSecurityGroupIngressRequest generates a request for the AuthorizeCacheSecurityGroupIngress operation. +// AuthorizeCacheSecurityGroupIngressRequest generates a "aws/request.Request" representing the +// client's request for the AuthorizeCacheSecurityGroupIngress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AuthorizeCacheSecurityGroupIngress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AuthorizeCacheSecurityGroupIngressRequest method. +// req, resp := client.AuthorizeCacheSecurityGroupIngressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) AuthorizeCacheSecurityGroupIngressRequest(input *AuthorizeCacheSecurityGroupIngressInput) (req *request.Request, output *AuthorizeCacheSecurityGroupIngressOutput) { op := &request.Operation{ Name: opAuthorizeCacheSecurityGroupIngress, @@ -73,7 +116,7 @@ func (c *ElastiCache) AuthorizeCacheSecurityGroupIngressRequest(input *Authorize // cache security group. Applications using ElastiCache must be running on Amazon // EC2, and Amazon EC2 security groups are used as the authorization mechanism. // -// You cannot authorize ingress from an Amazon EC2 security group in one region +// You cannot authorize ingress from an Amazon EC2 security group in one region // to an ElastiCache cluster in another region. func (c *ElastiCache) AuthorizeCacheSecurityGroupIngress(input *AuthorizeCacheSecurityGroupIngressInput) (*AuthorizeCacheSecurityGroupIngressOutput, error) { req, out := c.AuthorizeCacheSecurityGroupIngressRequest(input) @@ -83,7 +126,28 @@ func (c *ElastiCache) AuthorizeCacheSecurityGroupIngress(input *AuthorizeCacheSe const opCopySnapshot = "CopySnapshot" -// CopySnapshotRequest generates a request for the CopySnapshot operation. +// CopySnapshotRequest generates a "aws/request.Request" representing the +// client's request for the CopySnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CopySnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CopySnapshotRequest method. +// req, resp := client.CopySnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) CopySnapshotRequest(input *CopySnapshotInput) (req *request.Request, output *CopySnapshotOutput) { op := &request.Operation{ Name: opCopySnapshot, @@ -102,6 +166,18 @@ func (c *ElastiCache) CopySnapshotRequest(input *CopySnapshotInput) (req *reques } // The CopySnapshot action makes a copy of an existing snapshot. +// +// Users or groups that have permissions to use the CopySnapshot API can create +// their own Amazon S3 buckets and copy snapshots to it. To control access to +// your snapshots, use an IAM policy to control who has the ability to use the +// CopySnapshot API. For more information about using IAM to control the use +// of ElastiCache APIs, see Exporting Snapshots (http://docs.aws.amazon.com/ElastiCache/latest/Snapshots.Exporting.html) +// and Authentication & Access Control (http://docs.aws.amazon.com/ElastiCache/latest/IAM.html). +// +// Erorr Message: Error Message: The authenticated user does not have +// sufficient permissions to perform the desired activity. +// +// Solution: Contact your system administrator to get the needed permissions. func (c *ElastiCache) CopySnapshot(input *CopySnapshotInput) (*CopySnapshotOutput, error) { req, out := c.CopySnapshotRequest(input) err := req.Send() @@ -110,7 +186,28 @@ func (c *ElastiCache) CopySnapshot(input *CopySnapshotInput) (*CopySnapshotOutpu const opCreateCacheCluster = "CreateCacheCluster" -// CreateCacheClusterRequest generates a request for the CreateCacheCluster operation. +// CreateCacheClusterRequest generates a "aws/request.Request" representing the +// client's request for the CreateCacheCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateCacheCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateCacheClusterRequest method. +// req, resp := client.CreateCacheClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) CreateCacheClusterRequest(input *CreateCacheClusterInput) (req *request.Request, output *CreateCacheClusterOutput) { op := &request.Operation{ Name: opCreateCacheCluster, @@ -139,7 +236,28 @@ func (c *ElastiCache) CreateCacheCluster(input *CreateCacheClusterInput) (*Creat const opCreateCacheParameterGroup = "CreateCacheParameterGroup" -// CreateCacheParameterGroupRequest generates a request for the CreateCacheParameterGroup operation. +// CreateCacheParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateCacheParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateCacheParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateCacheParameterGroupRequest method. +// req, resp := client.CreateCacheParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) CreateCacheParameterGroupRequest(input *CreateCacheParameterGroupInput) (req *request.Request, output *CreateCacheParameterGroupOutput) { op := &request.Operation{ Name: opCreateCacheParameterGroup, @@ -168,7 +286,28 @@ func (c *ElastiCache) CreateCacheParameterGroup(input *CreateCacheParameterGroup const opCreateCacheSecurityGroup = "CreateCacheSecurityGroup" -// CreateCacheSecurityGroupRequest generates a request for the CreateCacheSecurityGroup operation. +// CreateCacheSecurityGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateCacheSecurityGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateCacheSecurityGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateCacheSecurityGroupRequest method. +// req, resp := client.CreateCacheSecurityGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) CreateCacheSecurityGroupRequest(input *CreateCacheSecurityGroupInput) (req *request.Request, output *CreateCacheSecurityGroupOutput) { op := &request.Operation{ Name: opCreateCacheSecurityGroup, @@ -201,7 +340,28 @@ func (c *ElastiCache) CreateCacheSecurityGroup(input *CreateCacheSecurityGroupIn const opCreateCacheSubnetGroup = "CreateCacheSubnetGroup" -// CreateCacheSubnetGroupRequest generates a request for the CreateCacheSubnetGroup operation. +// CreateCacheSubnetGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateCacheSubnetGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateCacheSubnetGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateCacheSubnetGroupRequest method. +// req, resp := client.CreateCacheSubnetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) CreateCacheSubnetGroupRequest(input *CreateCacheSubnetGroupInput) (req *request.Request, output *CreateCacheSubnetGroupOutput) { op := &request.Operation{ Name: opCreateCacheSubnetGroup, @@ -231,7 +391,28 @@ func (c *ElastiCache) CreateCacheSubnetGroup(input *CreateCacheSubnetGroupInput) const opCreateReplicationGroup = "CreateReplicationGroup" -// CreateReplicationGroupRequest generates a request for the CreateReplicationGroup operation. +// CreateReplicationGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateReplicationGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateReplicationGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateReplicationGroupRequest method. +// req, resp := client.CreateReplicationGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) CreateReplicationGroupRequest(input *CreateReplicationGroupInput) (req *request.Request, output *CreateReplicationGroupOutput) { op := &request.Operation{ Name: opCreateReplicationGroup, @@ -259,7 +440,7 @@ func (c *ElastiCache) CreateReplicationGroupRequest(input *CreateReplicationGrou // successfully created, you can add one or more read replica replicas to it, // up to a total of five read replicas. // -// Note: This action is valid only for Redis. +// This action is valid only for Redis. func (c *ElastiCache) CreateReplicationGroup(input *CreateReplicationGroupInput) (*CreateReplicationGroupOutput, error) { req, out := c.CreateReplicationGroupRequest(input) err := req.Send() @@ -268,7 +449,28 @@ func (c *ElastiCache) CreateReplicationGroup(input *CreateReplicationGroupInput) const opCreateSnapshot = "CreateSnapshot" -// CreateSnapshotRequest generates a request for the CreateSnapshot operation. +// CreateSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the CreateSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateSnapshotRequest method. +// req, resp := client.CreateSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) CreateSnapshotRequest(input *CreateSnapshotInput) (req *request.Request, output *CreateSnapshotOutput) { op := &request.Operation{ Name: opCreateSnapshot, @@ -296,7 +498,28 @@ func (c *ElastiCache) CreateSnapshot(input *CreateSnapshotInput) (*CreateSnapsho const opDeleteCacheCluster = "DeleteCacheCluster" -// DeleteCacheClusterRequest generates a request for the DeleteCacheCluster operation. +// DeleteCacheClusterRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCacheCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteCacheCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteCacheClusterRequest method. +// req, resp := client.DeleteCacheClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) DeleteCacheClusterRequest(input *DeleteCacheClusterInput) (req *request.Request, output *DeleteCacheClusterOutput) { op := &request.Operation{ Name: opDeleteCacheCluster, @@ -330,7 +553,28 @@ func (c *ElastiCache) DeleteCacheCluster(input *DeleteCacheClusterInput) (*Delet const opDeleteCacheParameterGroup = "DeleteCacheParameterGroup" -// DeleteCacheParameterGroupRequest generates a request for the DeleteCacheParameterGroup operation. +// DeleteCacheParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCacheParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteCacheParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteCacheParameterGroupRequest method. +// req, resp := client.DeleteCacheParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) DeleteCacheParameterGroupRequest(input *DeleteCacheParameterGroupInput) (req *request.Request, output *DeleteCacheParameterGroupOutput) { op := &request.Operation{ Name: opDeleteCacheParameterGroup, @@ -361,7 +605,28 @@ func (c *ElastiCache) DeleteCacheParameterGroup(input *DeleteCacheParameterGroup const opDeleteCacheSecurityGroup = "DeleteCacheSecurityGroup" -// DeleteCacheSecurityGroupRequest generates a request for the DeleteCacheSecurityGroup operation. +// DeleteCacheSecurityGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCacheSecurityGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteCacheSecurityGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteCacheSecurityGroupRequest method. +// req, resp := client.DeleteCacheSecurityGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) DeleteCacheSecurityGroupRequest(input *DeleteCacheSecurityGroupInput) (req *request.Request, output *DeleteCacheSecurityGroupOutput) { op := &request.Operation{ Name: opDeleteCacheSecurityGroup, @@ -383,7 +648,7 @@ func (c *ElastiCache) DeleteCacheSecurityGroupRequest(input *DeleteCacheSecurity // The DeleteCacheSecurityGroup action deletes a cache security group. // -// You cannot delete a cache security group if it is associated with any cache +// You cannot delete a cache security group if it is associated with any cache // clusters. func (c *ElastiCache) DeleteCacheSecurityGroup(input *DeleteCacheSecurityGroupInput) (*DeleteCacheSecurityGroupOutput, error) { req, out := c.DeleteCacheSecurityGroupRequest(input) @@ -393,7 +658,28 @@ func (c *ElastiCache) DeleteCacheSecurityGroup(input *DeleteCacheSecurityGroupIn const opDeleteCacheSubnetGroup = "DeleteCacheSubnetGroup" -// DeleteCacheSubnetGroupRequest generates a request for the DeleteCacheSubnetGroup operation. +// DeleteCacheSubnetGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCacheSubnetGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteCacheSubnetGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteCacheSubnetGroupRequest method. +// req, resp := client.DeleteCacheSubnetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) DeleteCacheSubnetGroupRequest(input *DeleteCacheSubnetGroupInput) (req *request.Request, output *DeleteCacheSubnetGroupOutput) { op := &request.Operation{ Name: opDeleteCacheSubnetGroup, @@ -415,7 +701,7 @@ func (c *ElastiCache) DeleteCacheSubnetGroupRequest(input *DeleteCacheSubnetGrou // The DeleteCacheSubnetGroup action deletes a cache subnet group. // -// You cannot delete a cache subnet group if it is associated with any cache +// You cannot delete a cache subnet group if it is associated with any cache // clusters. func (c *ElastiCache) DeleteCacheSubnetGroup(input *DeleteCacheSubnetGroupInput) (*DeleteCacheSubnetGroupOutput, error) { req, out := c.DeleteCacheSubnetGroupRequest(input) @@ -425,7 +711,28 @@ func (c *ElastiCache) DeleteCacheSubnetGroup(input *DeleteCacheSubnetGroupInput) const opDeleteReplicationGroup = "DeleteReplicationGroup" -// DeleteReplicationGroupRequest generates a request for the DeleteReplicationGroup operation. +// DeleteReplicationGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteReplicationGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteReplicationGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteReplicationGroupRequest method. +// req, resp := client.DeleteReplicationGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) DeleteReplicationGroupRequest(input *DeleteReplicationGroupInput) (req *request.Request, output *DeleteReplicationGroupOutput) { op := &request.Operation{ Name: opDeleteReplicationGroup, @@ -459,7 +766,28 @@ func (c *ElastiCache) DeleteReplicationGroup(input *DeleteReplicationGroupInput) const opDeleteSnapshot = "DeleteSnapshot" -// DeleteSnapshotRequest generates a request for the DeleteSnapshot operation. +// DeleteSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteSnapshotRequest method. +// req, resp := client.DeleteSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) DeleteSnapshotRequest(input *DeleteSnapshotInput) (req *request.Request, output *DeleteSnapshotOutput) { op := &request.Operation{ Name: opDeleteSnapshot, @@ -488,7 +816,28 @@ func (c *ElastiCache) DeleteSnapshot(input *DeleteSnapshotInput) (*DeleteSnapsho const opDescribeCacheClusters = "DescribeCacheClusters" -// DescribeCacheClustersRequest generates a request for the DescribeCacheClusters operation. +// DescribeCacheClustersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCacheClusters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeCacheClusters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeCacheClustersRequest method. +// req, resp := client.DescribeCacheClustersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) DescribeCacheClustersRequest(input *DescribeCacheClustersInput) (req *request.Request, output *DescribeCacheClustersOutput) { op := &request.Operation{ Name: opDescribeCacheClusters, @@ -540,6 +889,23 @@ func (c *ElastiCache) DescribeCacheClusters(input *DescribeCacheClustersInput) ( return out, err } +// DescribeCacheClustersPages iterates over the pages of a DescribeCacheClusters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeCacheClusters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeCacheClusters operation. +// pageNum := 0 +// err := client.DescribeCacheClustersPages(params, +// func(page *DescribeCacheClustersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *ElastiCache) DescribeCacheClustersPages(input *DescribeCacheClustersInput, fn func(p *DescribeCacheClustersOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeCacheClustersRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -550,7 +916,28 @@ func (c *ElastiCache) DescribeCacheClustersPages(input *DescribeCacheClustersInp const opDescribeCacheEngineVersions = "DescribeCacheEngineVersions" -// DescribeCacheEngineVersionsRequest generates a request for the DescribeCacheEngineVersions operation. +// DescribeCacheEngineVersionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCacheEngineVersions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeCacheEngineVersions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeCacheEngineVersionsRequest method. +// req, resp := client.DescribeCacheEngineVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) DescribeCacheEngineVersionsRequest(input *DescribeCacheEngineVersionsInput) (req *request.Request, output *DescribeCacheEngineVersionsOutput) { op := &request.Operation{ Name: opDescribeCacheEngineVersions, @@ -582,6 +969,23 @@ func (c *ElastiCache) DescribeCacheEngineVersions(input *DescribeCacheEngineVers return out, err } +// DescribeCacheEngineVersionsPages iterates over the pages of a DescribeCacheEngineVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeCacheEngineVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeCacheEngineVersions operation. +// pageNum := 0 +// err := client.DescribeCacheEngineVersionsPages(params, +// func(page *DescribeCacheEngineVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *ElastiCache) DescribeCacheEngineVersionsPages(input *DescribeCacheEngineVersionsInput, fn func(p *DescribeCacheEngineVersionsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeCacheEngineVersionsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -592,7 +996,28 @@ func (c *ElastiCache) DescribeCacheEngineVersionsPages(input *DescribeCacheEngin const opDescribeCacheParameterGroups = "DescribeCacheParameterGroups" -// DescribeCacheParameterGroupsRequest generates a request for the DescribeCacheParameterGroups operation. +// DescribeCacheParameterGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCacheParameterGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeCacheParameterGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeCacheParameterGroupsRequest method. +// req, resp := client.DescribeCacheParameterGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) DescribeCacheParameterGroupsRequest(input *DescribeCacheParameterGroupsInput) (req *request.Request, output *DescribeCacheParameterGroupsOutput) { op := &request.Operation{ Name: opDescribeCacheParameterGroups, @@ -625,6 +1050,23 @@ func (c *ElastiCache) DescribeCacheParameterGroups(input *DescribeCacheParameter return out, err } +// DescribeCacheParameterGroupsPages iterates over the pages of a DescribeCacheParameterGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeCacheParameterGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeCacheParameterGroups operation. +// pageNum := 0 +// err := client.DescribeCacheParameterGroupsPages(params, +// func(page *DescribeCacheParameterGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *ElastiCache) DescribeCacheParameterGroupsPages(input *DescribeCacheParameterGroupsInput, fn func(p *DescribeCacheParameterGroupsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeCacheParameterGroupsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -635,7 +1077,28 @@ func (c *ElastiCache) DescribeCacheParameterGroupsPages(input *DescribeCachePara const opDescribeCacheParameters = "DescribeCacheParameters" -// DescribeCacheParametersRequest generates a request for the DescribeCacheParameters operation. +// DescribeCacheParametersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCacheParameters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeCacheParameters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeCacheParametersRequest method. +// req, resp := client.DescribeCacheParametersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) DescribeCacheParametersRequest(input *DescribeCacheParametersInput) (req *request.Request, output *DescribeCacheParametersOutput) { op := &request.Operation{ Name: opDescribeCacheParameters, @@ -667,6 +1130,23 @@ func (c *ElastiCache) DescribeCacheParameters(input *DescribeCacheParametersInpu return out, err } +// DescribeCacheParametersPages iterates over the pages of a DescribeCacheParameters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeCacheParameters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeCacheParameters operation. +// pageNum := 0 +// err := client.DescribeCacheParametersPages(params, +// func(page *DescribeCacheParametersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *ElastiCache) DescribeCacheParametersPages(input *DescribeCacheParametersInput, fn func(p *DescribeCacheParametersOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeCacheParametersRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -677,7 +1157,28 @@ func (c *ElastiCache) DescribeCacheParametersPages(input *DescribeCacheParameter const opDescribeCacheSecurityGroups = "DescribeCacheSecurityGroups" -// DescribeCacheSecurityGroupsRequest generates a request for the DescribeCacheSecurityGroups operation. +// DescribeCacheSecurityGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCacheSecurityGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeCacheSecurityGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeCacheSecurityGroupsRequest method. +// req, resp := client.DescribeCacheSecurityGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) DescribeCacheSecurityGroupsRequest(input *DescribeCacheSecurityGroupsInput) (req *request.Request, output *DescribeCacheSecurityGroupsOutput) { op := &request.Operation{ Name: opDescribeCacheSecurityGroups, @@ -710,6 +1211,23 @@ func (c *ElastiCache) DescribeCacheSecurityGroups(input *DescribeCacheSecurityGr return out, err } +// DescribeCacheSecurityGroupsPages iterates over the pages of a DescribeCacheSecurityGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeCacheSecurityGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeCacheSecurityGroups operation. +// pageNum := 0 +// err := client.DescribeCacheSecurityGroupsPages(params, +// func(page *DescribeCacheSecurityGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *ElastiCache) DescribeCacheSecurityGroupsPages(input *DescribeCacheSecurityGroupsInput, fn func(p *DescribeCacheSecurityGroupsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeCacheSecurityGroupsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -720,7 +1238,28 @@ func (c *ElastiCache) DescribeCacheSecurityGroupsPages(input *DescribeCacheSecur const opDescribeCacheSubnetGroups = "DescribeCacheSubnetGroups" -// DescribeCacheSubnetGroupsRequest generates a request for the DescribeCacheSubnetGroups operation. +// DescribeCacheSubnetGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCacheSubnetGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeCacheSubnetGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeCacheSubnetGroupsRequest method. +// req, resp := client.DescribeCacheSubnetGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) DescribeCacheSubnetGroupsRequest(input *DescribeCacheSubnetGroupsInput) (req *request.Request, output *DescribeCacheSubnetGroupsOutput) { op := &request.Operation{ Name: opDescribeCacheSubnetGroups, @@ -753,6 +1292,23 @@ func (c *ElastiCache) DescribeCacheSubnetGroups(input *DescribeCacheSubnetGroups return out, err } +// DescribeCacheSubnetGroupsPages iterates over the pages of a DescribeCacheSubnetGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeCacheSubnetGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeCacheSubnetGroups operation. +// pageNum := 0 +// err := client.DescribeCacheSubnetGroupsPages(params, +// func(page *DescribeCacheSubnetGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *ElastiCache) DescribeCacheSubnetGroupsPages(input *DescribeCacheSubnetGroupsInput, fn func(p *DescribeCacheSubnetGroupsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeCacheSubnetGroupsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -763,7 +1319,28 @@ func (c *ElastiCache) DescribeCacheSubnetGroupsPages(input *DescribeCacheSubnetG const opDescribeEngineDefaultParameters = "DescribeEngineDefaultParameters" -// DescribeEngineDefaultParametersRequest generates a request for the DescribeEngineDefaultParameters operation. +// DescribeEngineDefaultParametersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEngineDefaultParameters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEngineDefaultParameters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEngineDefaultParametersRequest method. +// req, resp := client.DescribeEngineDefaultParametersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) DescribeEngineDefaultParametersRequest(input *DescribeEngineDefaultParametersInput) (req *request.Request, output *DescribeEngineDefaultParametersOutput) { op := &request.Operation{ Name: opDescribeEngineDefaultParameters, @@ -795,6 +1372,23 @@ func (c *ElastiCache) DescribeEngineDefaultParameters(input *DescribeEngineDefau return out, err } +// DescribeEngineDefaultParametersPages iterates over the pages of a DescribeEngineDefaultParameters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeEngineDefaultParameters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeEngineDefaultParameters operation. +// pageNum := 0 +// err := client.DescribeEngineDefaultParametersPages(params, +// func(page *DescribeEngineDefaultParametersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *ElastiCache) DescribeEngineDefaultParametersPages(input *DescribeEngineDefaultParametersInput, fn func(p *DescribeEngineDefaultParametersOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeEngineDefaultParametersRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -805,7 +1399,28 @@ func (c *ElastiCache) DescribeEngineDefaultParametersPages(input *DescribeEngine const opDescribeEvents = "DescribeEvents" -// DescribeEventsRequest generates a request for the DescribeEvents operation. +// DescribeEventsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEvents operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEvents method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEventsRequest method. +// req, resp := client.DescribeEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) DescribeEventsRequest(input *DescribeEventsInput) (req *request.Request, output *DescribeEventsOutput) { op := &request.Operation{ Name: opDescribeEvents, @@ -842,6 +1457,23 @@ func (c *ElastiCache) DescribeEvents(input *DescribeEventsInput) (*DescribeEvent return out, err } +// DescribeEventsPages iterates over the pages of a DescribeEvents operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeEvents method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeEvents operation. +// pageNum := 0 +// err := client.DescribeEventsPages(params, +// func(page *DescribeEventsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *ElastiCache) DescribeEventsPages(input *DescribeEventsInput, fn func(p *DescribeEventsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeEventsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -852,7 +1484,28 @@ func (c *ElastiCache) DescribeEventsPages(input *DescribeEventsInput, fn func(p const opDescribeReplicationGroups = "DescribeReplicationGroups" -// DescribeReplicationGroupsRequest generates a request for the DescribeReplicationGroups operation. +// DescribeReplicationGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReplicationGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeReplicationGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeReplicationGroupsRequest method. +// req, resp := client.DescribeReplicationGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) DescribeReplicationGroupsRequest(input *DescribeReplicationGroupsInput) (req *request.Request, output *DescribeReplicationGroupsOutput) { op := &request.Operation{ Name: opDescribeReplicationGroups, @@ -885,6 +1538,23 @@ func (c *ElastiCache) DescribeReplicationGroups(input *DescribeReplicationGroups return out, err } +// DescribeReplicationGroupsPages iterates over the pages of a DescribeReplicationGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeReplicationGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeReplicationGroups operation. +// pageNum := 0 +// err := client.DescribeReplicationGroupsPages(params, +// func(page *DescribeReplicationGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *ElastiCache) DescribeReplicationGroupsPages(input *DescribeReplicationGroupsInput, fn func(p *DescribeReplicationGroupsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeReplicationGroupsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -895,7 +1565,28 @@ func (c *ElastiCache) DescribeReplicationGroupsPages(input *DescribeReplicationG const opDescribeReservedCacheNodes = "DescribeReservedCacheNodes" -// DescribeReservedCacheNodesRequest generates a request for the DescribeReservedCacheNodes operation. +// DescribeReservedCacheNodesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReservedCacheNodes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeReservedCacheNodes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeReservedCacheNodesRequest method. +// req, resp := client.DescribeReservedCacheNodesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) DescribeReservedCacheNodesRequest(input *DescribeReservedCacheNodesInput) (req *request.Request, output *DescribeReservedCacheNodesOutput) { op := &request.Operation{ Name: opDescribeReservedCacheNodes, @@ -927,6 +1618,23 @@ func (c *ElastiCache) DescribeReservedCacheNodes(input *DescribeReservedCacheNod return out, err } +// DescribeReservedCacheNodesPages iterates over the pages of a DescribeReservedCacheNodes operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeReservedCacheNodes method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeReservedCacheNodes operation. +// pageNum := 0 +// err := client.DescribeReservedCacheNodesPages(params, +// func(page *DescribeReservedCacheNodesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *ElastiCache) DescribeReservedCacheNodesPages(input *DescribeReservedCacheNodesInput, fn func(p *DescribeReservedCacheNodesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeReservedCacheNodesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -937,7 +1645,28 @@ func (c *ElastiCache) DescribeReservedCacheNodesPages(input *DescribeReservedCac const opDescribeReservedCacheNodesOfferings = "DescribeReservedCacheNodesOfferings" -// DescribeReservedCacheNodesOfferingsRequest generates a request for the DescribeReservedCacheNodesOfferings operation. +// DescribeReservedCacheNodesOfferingsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReservedCacheNodesOfferings operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeReservedCacheNodesOfferings method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeReservedCacheNodesOfferingsRequest method. +// req, resp := client.DescribeReservedCacheNodesOfferingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) DescribeReservedCacheNodesOfferingsRequest(input *DescribeReservedCacheNodesOfferingsInput) (req *request.Request, output *DescribeReservedCacheNodesOfferingsOutput) { op := &request.Operation{ Name: opDescribeReservedCacheNodesOfferings, @@ -969,6 +1698,23 @@ func (c *ElastiCache) DescribeReservedCacheNodesOfferings(input *DescribeReserve return out, err } +// DescribeReservedCacheNodesOfferingsPages iterates over the pages of a DescribeReservedCacheNodesOfferings operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeReservedCacheNodesOfferings method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeReservedCacheNodesOfferings operation. +// pageNum := 0 +// err := client.DescribeReservedCacheNodesOfferingsPages(params, +// func(page *DescribeReservedCacheNodesOfferingsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *ElastiCache) DescribeReservedCacheNodesOfferingsPages(input *DescribeReservedCacheNodesOfferingsInput, fn func(p *DescribeReservedCacheNodesOfferingsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeReservedCacheNodesOfferingsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -979,7 +1725,28 @@ func (c *ElastiCache) DescribeReservedCacheNodesOfferingsPages(input *DescribeRe const opDescribeSnapshots = "DescribeSnapshots" -// DescribeSnapshotsRequest generates a request for the DescribeSnapshots operation. +// DescribeSnapshotsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSnapshots operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSnapshots method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSnapshotsRequest method. +// req, resp := client.DescribeSnapshotsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) DescribeSnapshotsRequest(input *DescribeSnapshotsInput) (req *request.Request, output *DescribeSnapshotsOutput) { op := &request.Operation{ Name: opDescribeSnapshots, @@ -1013,6 +1780,23 @@ func (c *ElastiCache) DescribeSnapshots(input *DescribeSnapshotsInput) (*Describ return out, err } +// DescribeSnapshotsPages iterates over the pages of a DescribeSnapshots operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeSnapshots method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeSnapshots operation. +// pageNum := 0 +// err := client.DescribeSnapshotsPages(params, +// func(page *DescribeSnapshotsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *ElastiCache) DescribeSnapshotsPages(input *DescribeSnapshotsInput, fn func(p *DescribeSnapshotsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeSnapshotsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1023,7 +1807,28 @@ func (c *ElastiCache) DescribeSnapshotsPages(input *DescribeSnapshotsInput, fn f const opListAllowedNodeTypeModifications = "ListAllowedNodeTypeModifications" -// ListAllowedNodeTypeModificationsRequest generates a request for the ListAllowedNodeTypeModifications operation. +// ListAllowedNodeTypeModificationsRequest generates a "aws/request.Request" representing the +// client's request for the ListAllowedNodeTypeModifications operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListAllowedNodeTypeModifications method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListAllowedNodeTypeModificationsRequest method. +// req, resp := client.ListAllowedNodeTypeModificationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) ListAllowedNodeTypeModificationsRequest(input *ListAllowedNodeTypeModificationsInput) (req *request.Request, output *ListAllowedNodeTypeModificationsOutput) { op := &request.Operation{ Name: opListAllowedNodeTypeModifications, @@ -1056,7 +1861,28 @@ func (c *ElastiCache) ListAllowedNodeTypeModifications(input *ListAllowedNodeTyp const opListTagsForResource = "ListTagsForResource" -// ListTagsForResourceRequest generates a request for the ListTagsForResource operation. +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTagsForResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *TagListMessage) { op := &request.Operation{ Name: opListTagsForResource, @@ -1090,7 +1916,28 @@ func (c *ElastiCache) ListTagsForResource(input *ListTagsForResourceInput) (*Tag const opModifyCacheCluster = "ModifyCacheCluster" -// ModifyCacheClusterRequest generates a request for the ModifyCacheCluster operation. +// ModifyCacheClusterRequest generates a "aws/request.Request" representing the +// client's request for the ModifyCacheCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyCacheCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyCacheClusterRequest method. +// req, resp := client.ModifyCacheClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) ModifyCacheClusterRequest(input *ModifyCacheClusterInput) (req *request.Request, output *ModifyCacheClusterOutput) { op := &request.Operation{ Name: opModifyCacheCluster, @@ -1119,7 +1966,28 @@ func (c *ElastiCache) ModifyCacheCluster(input *ModifyCacheClusterInput) (*Modif const opModifyCacheParameterGroup = "ModifyCacheParameterGroup" -// ModifyCacheParameterGroupRequest generates a request for the ModifyCacheParameterGroup operation. +// ModifyCacheParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the ModifyCacheParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyCacheParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyCacheParameterGroupRequest method. +// req, resp := client.ModifyCacheParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) ModifyCacheParameterGroupRequest(input *ModifyCacheParameterGroupInput) (req *request.Request, output *CacheParameterGroupNameMessage) { op := &request.Operation{ Name: opModifyCacheParameterGroup, @@ -1148,7 +2016,28 @@ func (c *ElastiCache) ModifyCacheParameterGroup(input *ModifyCacheParameterGroup const opModifyCacheSubnetGroup = "ModifyCacheSubnetGroup" -// ModifyCacheSubnetGroupRequest generates a request for the ModifyCacheSubnetGroup operation. +// ModifyCacheSubnetGroupRequest generates a "aws/request.Request" representing the +// client's request for the ModifyCacheSubnetGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyCacheSubnetGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyCacheSubnetGroupRequest method. +// req, resp := client.ModifyCacheSubnetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) ModifyCacheSubnetGroupRequest(input *ModifyCacheSubnetGroupInput) (req *request.Request, output *ModifyCacheSubnetGroupOutput) { op := &request.Operation{ Name: opModifyCacheSubnetGroup, @@ -1175,7 +2064,28 @@ func (c *ElastiCache) ModifyCacheSubnetGroup(input *ModifyCacheSubnetGroupInput) const opModifyReplicationGroup = "ModifyReplicationGroup" -// ModifyReplicationGroupRequest generates a request for the ModifyReplicationGroup operation. +// ModifyReplicationGroupRequest generates a "aws/request.Request" representing the +// client's request for the ModifyReplicationGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyReplicationGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyReplicationGroupRequest method. +// req, resp := client.ModifyReplicationGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) ModifyReplicationGroupRequest(input *ModifyReplicationGroupInput) (req *request.Request, output *ModifyReplicationGroupOutput) { op := &request.Operation{ Name: opModifyReplicationGroup, @@ -1203,7 +2113,28 @@ func (c *ElastiCache) ModifyReplicationGroup(input *ModifyReplicationGroupInput) const opPurchaseReservedCacheNodesOffering = "PurchaseReservedCacheNodesOffering" -// PurchaseReservedCacheNodesOfferingRequest generates a request for the PurchaseReservedCacheNodesOffering operation. +// PurchaseReservedCacheNodesOfferingRequest generates a "aws/request.Request" representing the +// client's request for the PurchaseReservedCacheNodesOffering operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PurchaseReservedCacheNodesOffering method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PurchaseReservedCacheNodesOfferingRequest method. +// req, resp := client.PurchaseReservedCacheNodesOfferingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) PurchaseReservedCacheNodesOfferingRequest(input *PurchaseReservedCacheNodesOfferingInput) (req *request.Request, output *PurchaseReservedCacheNodesOfferingOutput) { op := &request.Operation{ Name: opPurchaseReservedCacheNodesOffering, @@ -1231,7 +2162,28 @@ func (c *ElastiCache) PurchaseReservedCacheNodesOffering(input *PurchaseReserved const opRebootCacheCluster = "RebootCacheCluster" -// RebootCacheClusterRequest generates a request for the RebootCacheCluster operation. +// RebootCacheClusterRequest generates a "aws/request.Request" representing the +// client's request for the RebootCacheCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RebootCacheCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RebootCacheClusterRequest method. +// req, resp := client.RebootCacheClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) RebootCacheClusterRequest(input *RebootCacheClusterInput) (req *request.Request, output *RebootCacheClusterOutput) { op := &request.Operation{ Name: opRebootCacheCluster, @@ -1267,7 +2219,28 @@ func (c *ElastiCache) RebootCacheCluster(input *RebootCacheClusterInput) (*Reboo const opRemoveTagsFromResource = "RemoveTagsFromResource" -// RemoveTagsFromResourceRequest generates a request for the RemoveTagsFromResource operation. +// RemoveTagsFromResourceRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTagsFromResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveTagsFromResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveTagsFromResourceRequest method. +// req, resp := client.RemoveTagsFromResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) RemoveTagsFromResourceRequest(input *RemoveTagsFromResourceInput) (req *request.Request, output *TagListMessage) { op := &request.Operation{ Name: opRemoveTagsFromResource, @@ -1295,7 +2268,28 @@ func (c *ElastiCache) RemoveTagsFromResource(input *RemoveTagsFromResourceInput) const opResetCacheParameterGroup = "ResetCacheParameterGroup" -// ResetCacheParameterGroupRequest generates a request for the ResetCacheParameterGroup operation. +// ResetCacheParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the ResetCacheParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ResetCacheParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ResetCacheParameterGroupRequest method. +// req, resp := client.ResetCacheParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) ResetCacheParameterGroupRequest(input *ResetCacheParameterGroupInput) (req *request.Request, output *CacheParameterGroupNameMessage) { op := &request.Operation{ Name: opResetCacheParameterGroup, @@ -1325,7 +2319,28 @@ func (c *ElastiCache) ResetCacheParameterGroup(input *ResetCacheParameterGroupIn const opRevokeCacheSecurityGroupIngress = "RevokeCacheSecurityGroupIngress" -// RevokeCacheSecurityGroupIngressRequest generates a request for the RevokeCacheSecurityGroupIngress operation. +// RevokeCacheSecurityGroupIngressRequest generates a "aws/request.Request" representing the +// client's request for the RevokeCacheSecurityGroupIngress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RevokeCacheSecurityGroupIngress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RevokeCacheSecurityGroupIngressRequest method. +// req, resp := client.RevokeCacheSecurityGroupIngressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElastiCache) RevokeCacheSecurityGroupIngressRequest(input *RevokeCacheSecurityGroupIngressInput) (req *request.Request, output *RevokeCacheSecurityGroupIngressOutput) { op := &request.Operation{ Name: opRevokeCacheSecurityGroupIngress, @@ -1446,7 +2461,11 @@ type AuthorizeCacheSecurityGroupIngressOutput struct { // Represents the output of one of the following actions: // - // AuthorizeCacheSecurityGroupIngress CreateCacheSecurityGroup RevokeCacheSecurityGroupIngress + // AuthorizeCacheSecurityGroupIngress + // + // CreateCacheSecurityGroup + // + // RevokeCacheSecurityGroupIngress CacheSecurityGroup *CacheSecurityGroup `type:"structure"` } @@ -1501,19 +2520,34 @@ type CacheCluster struct { // // Valid node types are as follows: // - // General purpose: Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, - // cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Previous - // generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, - // cache.m1.xlarge Compute optimized: cache.c1.xlarge Memory optimized Current - // generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, - // cache.r3.8xlarge Previous generation: cache.m2.xlarge, cache.m2.2xlarge, - // cache.m2.4xlarge Notes: + // General purpose: // - // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). - // Redis backup/restore is not supported for t2 instances. Redis Append-only - // files (AOF) functionality is not supported for t1 or t2 instances. For a - // complete listing of cache node types and specifications, see Amazon ElastiCache - // Product Features and Details (http://aws.amazon.com/elasticache/details) + // Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, + // cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // + // Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, + // cache.m1.large, cache.m1.xlarge + // + // Compute optimized: cache.c1.xlarge + // + // Memory optimized: + // + // Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, + // cache.r3.4xlarge, cache.r3.8xlarge + // + // Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // + // Notes: + // + // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). + // + // Redis backup/restore is not supported for t2 instances. + // + // Redis Append-only files (AOF) functionality is not supported for t1 or + // t2 instances. + // + // For a complete listing of cache node types and specifications, see Amazon + // ElastiCache Product Features and Details (http://aws.amazon.com/elasticache/details) // and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). CacheNodeType *string `type:"string"` @@ -1569,7 +2603,21 @@ type CacheCluster struct { // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid // values for ddd are: // - // sun mon tue wed thu fri sat Example: sun:05:00-sun:09:00 + // sun + // + // mon + // + // tue + // + // wed + // + // thu + // + // fri + // + // sat + // + // Example: sun:05:00-sun:09:00 PreferredMaintenanceWindow *string `type:"string"` // The replication group to which this cache cluster belongs. If this field @@ -1584,8 +2632,8 @@ type CacheCluster struct { // to 5, then a snapshot that was taken today will be retained for 5 days before // being deleted. // - // ImportantIf the value of SnapshotRetentionLimit is set to zero (0), backups - // are turned off. + // If the value of SnapshotRetentionLimit is set to zero (0), backups are + // turned off. SnapshotRetentionLimit *int64 `type:"integer"` // The daily time range (in UTC) during which ElastiCache will begin taking @@ -1641,20 +2689,35 @@ func (s CacheEngineVersion) GoString() string { // // Valid node types are as follows: // -// General purpose: Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, -// cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Previous -// generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, -// cache.m1.xlarge Compute optimized: cache.c1.xlarge Memory optimized Current -// generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, -// cache.r3.8xlarge Previous generation: cache.m2.xlarge, cache.m2.2xlarge, -// cache.m2.4xlarge Notes: +// General purpose: // -// All t2 instances are created in an Amazon Virtual Private Cloud (VPC). -// Redis backup/restore is not supported for t2 instances. Redis Append-only -// files (AOF) functionality is not supported for t1 or t2 instances. For a -// complete listing of cache node types and specifications, see Amazon ElastiCache -// Product Features and Details (http://aws.amazon.com/elasticache/details) -// and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) +// Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, +// cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge +// +// Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, +// cache.m1.large, cache.m1.xlarge +// +// Compute optimized: cache.c1.xlarge +// +// Memory optimized: +// +// Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, +// cache.r3.4xlarge, cache.r3.8xlarge +// +// Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge +// +// Notes: +// +// All t2 instances are created in an Amazon Virtual Private Cloud (VPC). +// +// Redis backup/restore is not supported for t2 instances. +// +// Redis Append-only files (AOF) functionality is not supported for t1 or +// t2 instances. +// +// For a complete listing of cache node types and specifications, see Amazon +// ElastiCache Product Features and Details (http://aws.amazon.com/elasticache/details) +// and either Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). type CacheNode struct { _ struct{} `type:"structure"` @@ -1707,6 +2770,12 @@ type CacheNodeTypeSpecificParameter struct { // A list of cache node types and their corresponding values for this parameter. CacheNodeTypeSpecificValues []*CacheNodeTypeSpecificValue `locationNameList:"CacheNodeTypeSpecificValue" type:"list"` + // ChangeType indicates whether a change to the parameter will be applied immediately + // or requires a reboot for the change to be applied. You can force a reboot + // or wait until the next maintenance window's reboot. For more information, + // see Rebooting a Cluster (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Clusters.Rebooting.html). + ChangeType *string `type:"string" enum:"ChangeType"` + // The valid data type for the parameter. DataType *string `type:"string"` @@ -1786,7 +2855,9 @@ func (s CacheParameterGroup) GoString() string { // Represents the output of one of the following actions: // -// ModifyCacheParameterGroup ResetCacheParameterGroup +// ModifyCacheParameterGroup +// +// ResetCacheParameterGroup type CacheParameterGroupNameMessage struct { _ struct{} `type:"structure"` @@ -1831,7 +2902,11 @@ func (s CacheParameterGroupStatus) GoString() string { // Represents the output of one of the following actions: // -// AuthorizeCacheSecurityGroupIngress CreateCacheSecurityGroup RevokeCacheSecurityGroupIngress +// AuthorizeCacheSecurityGroupIngress +// +// CreateCacheSecurityGroup +// +// RevokeCacheSecurityGroupIngress type CacheSecurityGroup struct { _ struct{} `type:"structure"` @@ -1884,7 +2959,9 @@ func (s CacheSecurityGroupMembership) GoString() string { // Represents the output of one of the following actions: // -// CreateCacheSubnetGroup ModifyCacheSubnetGroup +// CreateCacheSubnetGroup +// +// ModifyCacheSubnetGroup type CacheSubnetGroup struct { _ struct{} `type:"structure"` @@ -1916,10 +2993,71 @@ func (s CacheSubnetGroup) GoString() string { type CopySnapshotInput struct { _ struct{} `type:"structure"` - // The name of an existing snapshot from which to copy. + // The name of an existing snapshot from which to make a copy. SourceSnapshotName *string `type:"string" required:"true"` - // A name for the copied snapshot. + // The Amazon S3 bucket to which the snapshot will be exported. This parameter + // is used only when exporting a snapshot for external access. + // + // When using this parameter to export a snapshot, be sure Amazon ElastiCache + // has the needed permissions to this S3 bucket. For more information, see Step + // 2: Grant ElastiCache Access to Your Amazon S3 Bucket (http://docs.aws.amazon.com/AmazonElastiCache/AmazonElastiCache/latest/UserGuide/Snapshots.Exporting.html#Snapshots.Exporting.GrantAccess) + // in the Amazon ElastiCache User Guide. + // + // Error Messages: + // + // You could receive one of the following error messages. + // + // Erorr Messages Error Message: ElastiCache has not been granted READ + // permissions %s on the S3 Bucket. + // + // Solution: Add List and Read permissions on the bucket. + // + // Error Message: ElastiCache has not been granted WRITE permissions %s + // on the S3 Bucket. + // + // Solution: Add Upload/Delete permissions on the bucket. + // + // Error Message: ElastiCache has not been granted READ_ACP permissions + // %s on the S3 Bucket. + // + // Solution: Add View Permissions permissions on the bucket. + // + // Error Message: The S3 bucket %s is outside of the region. + // + // Solution: Before exporting your snapshot, create a new Amazon S3 bucket + // in the same region as your snapshot. For more information, see Step 1: Create + // an Amazon S3 Bucket (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Snapshots.Exporting.html#Snapshots.Exporting.CreateBucket). + // + // Error Message: The S3 bucket %s does not exist. + // + // Solution: Create an Amazon S3 bucket in the same region as your snapshot. + // For more information, see Step 1: Create an Amazon S3 Bucket (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Snapshots.Exporting.html#Snapshots.Exporting.CreateBucket). + // + // Error Message: The S3 bucket %s is not owned by the authenticated user. + // + // Solution: Create an Amazon S3 bucket in the same region as your snapshot. + // For more information, see Step 1: Create an Amazon S3 Bucket (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Snapshots.Exporting.html#Snapshots.Exporting.CreateBucket). + // + // Error Message: The authenticated user does not have sufficient permissions + // to perform the desired activity. + // + // Solution: Contact your system administrator to get the needed permissions. + // + // For more information, see Exporting a Snapshot (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Snapshots.Exporting.html) + // in the Amazon ElastiCache User Guide. + TargetBucket *string `type:"string"` + + // A name for the snapshot copy. ElastiCache does not permit overwriting a snapshot, + // therefore this name must be unique within its context - ElastiCache or an + // Amazon S3 bucket if exporting. + // + // Error Message Error Message: The S3 bucket %s already contains an object + // with key %s. + // + // Solution: Give the TargetSnapshotName a new and unique value. If exporting + // a snapshot, you could alternatively create a new Amazon S3 bucket and use + // this same value for TargetSnapshotName. TargetSnapshotName *string `type:"string" required:"true"` } @@ -1986,30 +3124,47 @@ type CreateCacheClusterInput struct { // The node group identifier. This parameter is stored as a lowercase string. // - // Constraints: + // Constraints: // - // A name must contain from 1 to 20 alphanumeric characters or hyphens. The - // first character must be a letter. A name cannot end with a hyphen or contain - // two consecutive hyphens. + // A name must contain from 1 to 20 alphanumeric characters or hyphens. + // + // The first character must be a letter. + // + // A name cannot end with a hyphen or contain two consecutive hyphens. CacheClusterId *string `type:"string" required:"true"` // The compute and memory capacity of the nodes in the node group. // // Valid node types are as follows: // - // General purpose: Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, - // cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Previous - // generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, - // cache.m1.xlarge Compute optimized: cache.c1.xlarge Memory optimized Current - // generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, - // cache.r3.8xlarge Previous generation: cache.m2.xlarge, cache.m2.2xlarge, - // cache.m2.4xlarge Notes: + // General purpose: // - // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). - // Redis backup/restore is not supported for t2 instances. Redis Append-only - // files (AOF) functionality is not supported for t1 or t2 instances. For a - // complete listing of cache node types and specifications, see Amazon ElastiCache - // Product Features and Details (http://aws.amazon.com/elasticache/details) + // Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, + // cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // + // Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, + // cache.m1.large, cache.m1.xlarge + // + // Compute optimized: cache.c1.xlarge + // + // Memory optimized: + // + // Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, + // cache.r3.4xlarge, cache.r3.8xlarge + // + // Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // + // Notes: + // + // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). + // + // Redis backup/restore is not supported for t2 instances. + // + // Redis Append-only files (AOF) functionality is not supported for t1 or + // t2 instances. + // + // For a complete listing of cache node types and specifications, see Amazon + // ElastiCache Product Features and Details (http://aws.amazon.com/elasticache/details) // and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). CacheNodeType *string `type:"string"` @@ -2035,14 +3190,14 @@ type CreateCacheClusterInput struct { // // Valid values for this parameter are: // - // memcached | redis + // memcached | redis Engine *string `type:"string"` // The version number of the cache engine to be used for this cache cluster. // To view the supported cache engine versions, use the DescribeCacheEngineVersions // action. // - // Important: You can upgrade to a newer engine version (see Selecting a Cache + // Important: You can upgrade to a newer engine version (see Selecting a Cache // Engine and Version (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/SelectEngine.html#VersionManagement)), // but you cannot downgrade to an earlier engine version. If you want to use // an earlier engine version, you must delete the existing cache cluster or @@ -2052,7 +3207,7 @@ type CreateCacheClusterInput struct { // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service // (SNS) topic to which notifications will be sent. // - // The Amazon SNS topic owner must be the same as the cache cluster owner. + // The Amazon SNS topic owner must be the same as the cache cluster owner. NotificationTopicArn *string `type:"string"` // The initial number of cache nodes that the cache cluster will have. @@ -2094,8 +3249,9 @@ type CreateCacheClusterInput struct { // Default: System chosen Availability Zones. // // Example: One Memcached node in each of three different Availability Zones: + // PreferredAvailabilityZones.member.1=us-west-2a&PreferredAvailabilityZones.member.2=us-west-2b&PreferredAvailabilityZones.member.3=us-west-2c // - // Example: All three Memcached nodes in one Availability Zone: + // Example: All three Memcached nodes in one Availability Zone: PreferredAvailabilityZones.member.1=us-west-2a&PreferredAvailabilityZones.member.2=us-west-2a&PreferredAvailabilityZones.member.3=us-west-2a PreferredAvailabilityZones []*string `locationNameList:"PreferredAvailabilityZone" type:"list"` // Specifies the weekly time range during which maintenance on the cache cluster @@ -2103,7 +3259,21 @@ type CreateCacheClusterInput struct { // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid // values for ddd are: // - // sun mon tue wed thu fri sat Example: sun:05:00-sun:09:00 + // sun + // + // mon + // + // tue + // + // wed + // + // thu + // + // fri + // + // sat + // + // Example: sun:05:00-sun:09:00 PreferredMaintenanceWindow *string `type:"string"` // The ID of the replication group to which this cache cluster should belong. @@ -2115,7 +3285,7 @@ type CreateCacheClusterInput struct { // zone is not specified, the cache cluster will be created in availability // zones that provide the best spread of read replicas across availability zones. // - // Note: This parameter is only valid if the Engine parameter is redis. + // This parameter is only valid if the Engine parameter is redis. ReplicationGroupId *string `type:"string"` // One or more VPC security groups associated with the cache cluster. @@ -2129,16 +3299,16 @@ type CreateCacheClusterInput struct { // file will be used to populate the node group. The Amazon S3 object name in // the ARN cannot contain any commas. // - // Note: This parameter is only valid if the Engine parameter is redis. + // This parameter is only valid if the Engine parameter is redis. // - // Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb + // Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb SnapshotArns []*string `locationNameList:"SnapshotArn" type:"list"` // The name of a snapshot from which to restore data into the new node group. // The snapshot status changes to restoring while the new node group is being // created. // - // Note: This parameter is only valid if the Engine parameter is redis. + // This parameter is only valid if the Engine parameter is redis. SnapshotName *string `type:"string"` // The number of days for which ElastiCache will retain automatic snapshots @@ -2146,9 +3316,9 @@ type CreateCacheClusterInput struct { // then a snapshot that was taken today will be retained for 5 days before being // deleted. // - // Note: This parameter is only valid if the Engine parameter is redis. + // This parameter is only valid if the Engine parameter is redis. // - // Default: 0 (i.e., automatic backups are disabled for this cache cluster). + // Default: 0 (i.e., automatic backups are disabled for this cache cluster). SnapshotRetentionLimit *int64 `type:"integer"` // The daily time range (in UTC) during which ElastiCache will begin taking @@ -2159,7 +3329,7 @@ type CreateCacheClusterInput struct { // If you do not specify this parameter, then ElastiCache will automatically // choose an appropriate time range. // - // Note: This parameter is only valid if the Engine parameter is redis. + // Note: This parameter is only valid if the Engine parameter is redis. SnapshotWindow *string `type:"string"` // A list of cost allocation tags to be added to this resource. A tag is a key-value @@ -2318,7 +3488,11 @@ type CreateCacheSecurityGroupOutput struct { // Represents the output of one of the following actions: // - // AuthorizeCacheSecurityGroupIngress CreateCacheSecurityGroup RevokeCacheSecurityGroupIngress + // AuthorizeCacheSecurityGroupIngress + // + // CreateCacheSecurityGroup + // + // RevokeCacheSecurityGroupIngress CacheSecurityGroup *CacheSecurityGroup `type:"structure"` } @@ -2384,7 +3558,9 @@ type CreateCacheSubnetGroupOutput struct { // Represents the output of one of the following actions: // - // CreateCacheSubnetGroup ModifyCacheSubnetGroup + // CreateCacheSubnetGroup + // + // ModifyCacheSubnetGroup CacheSubnetGroup *CacheSubnetGroup `type:"structure"` } @@ -2413,28 +3589,45 @@ type CreateReplicationGroupInput struct { // // Default: false // - // ElastiCache Multi-AZ replication groups is not supported on: + // ElastiCache Multi-AZ replication groups is not supported on: // - // Redis versions earlier than 2.8.6. T1 and T2 cache node types. + // Redis versions earlier than 2.8.6. + // + // T1 and T2 cache node types. AutomaticFailoverEnabled *bool `type:"boolean"` // The compute and memory capacity of the nodes in the node group. // // Valid node types are as follows: // - // General purpose: Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, - // cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Previous - // generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, - // cache.m1.xlarge Compute optimized: cache.c1.xlarge Memory optimized Current - // generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, - // cache.r3.8xlarge Previous generation: cache.m2.xlarge, cache.m2.2xlarge, - // cache.m2.4xlarge Notes: + // General purpose: // - // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). - // Redis backup/restore is not supported for t2 instances. Redis Append-only - // files (AOF) functionality is not supported for t1 or t2 instances. For a - // complete listing of cache node types and specifications, see Amazon ElastiCache - // Product Features and Details (http://aws.amazon.com/elasticache/details) + // Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, + // cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // + // Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, + // cache.m1.large, cache.m1.xlarge + // + // Compute optimized: cache.c1.xlarge + // + // Memory optimized: + // + // Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, + // cache.r3.4xlarge, cache.r3.8xlarge + // + // Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // + // Notes: + // + // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). + // + // Redis backup/restore is not supported for t2 instances. + // + // Redis Append-only files (AOF) functionality is not supported for t1 or + // t2 instances. + // + // For a complete listing of cache node types and specifications, see Amazon + // ElastiCache Product Features and Details (http://aws.amazon.com/elasticache/details) // and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). CacheNodeType *string `type:"string"` @@ -2460,17 +3653,18 @@ type CreateReplicationGroupInput struct { // in this replication group. To view the supported cache engine versions, use // the DescribeCacheEngineVersions action. // - // Important: You can upgrade to a newer engine version (see Selecting a Cache - // Engine and Version (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/SelectEngine.html#VersionManagement)), - // but you cannot downgrade to an earlier engine version. If you want to use - // an earlier engine version, you must delete the existing cache cluster or - // replication group and create it anew with the earlier engine version. + // Important: You can upgrade to a newer engine version (see Selecting a Cache + // Engine and Version (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/SelectEngine.html#VersionManagement)) + // in the ElastiCache User Guide, but you cannot downgrade to an earlier engine + // version. If you want to use an earlier engine version, you must delete the + // existing cache cluster or replication group and create it anew with the earlier + // engine version. EngineVersion *string `type:"string"` // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service // (SNS) topic to which notifications will be sent. // - // The Amazon SNS topic owner must be the same as the cache cluster owner. + // The Amazon SNS topic owner must be the same as the cache cluster owner. NotificationTopicArn *string `type:"string"` // The number of cache clusters this replication group will initially have. @@ -2490,14 +3684,18 @@ type CreateReplicationGroupInput struct { // A list of EC2 availability zones in which the replication group's cache clusters // will be created. The order of the availability zones in the list is not important. // - // If you are creating your replication group in an Amazon VPC (recommended), + // If you are creating your replication group in an Amazon VPC (recommended), // you can only locate cache clusters in availability zones associated with - // the subnets in the selected subnet group. The number of availability zones - // listed must equal the value of NumCacheClusters. + // the subnets in the selected subnet group. // - // Default: system chosen availability zones. + // The number of availability zones listed must equal the value of NumCacheClusters. + // + // Default: system chosen availability zones. // // Example: One Redis cache cluster in each of three availability zones. + // + // PreferredAvailabilityZones.member.1=us-west-2a PreferredAvailabilityZones.member.2=us-west-2c + // PreferredAvailabilityZones.member.3=us-west-2c PreferredCacheClusterAZs []*string `locationNameList:"AvailabilityZone" type:"list"` // Specifies the weekly time range during which maintenance on the cache cluster @@ -2505,7 +3703,21 @@ type CreateReplicationGroupInput struct { // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid // values for ddd are: // - // sun mon tue wed thu fri sat Example: sun:05:00-sun:09:00 + // sun + // + // mon + // + // tue + // + // wed + // + // thu + // + // fri + // + // sat + // + // Example: sun:05:00-sun:09:00 PreferredMaintenanceWindow *string `type:"string"` // The identifier of the cache cluster that will serve as the primary for this @@ -2523,9 +3735,11 @@ type CreateReplicationGroupInput struct { // // Constraints: // - // A name must contain from 1 to 20 alphanumeric characters or hyphens. The - // first character must be a letter. A name cannot end with a hyphen or contain - // two consecutive hyphens. + // A name must contain from 1 to 20 alphanumeric characters or hyphens. + // + // The first character must be a letter. + // + // A name cannot end with a hyphen or contain two consecutive hyphens. ReplicationGroupId *string `type:"string" required:"true"` // One or more Amazon VPC security groups associated with this replication group. @@ -2539,16 +3753,16 @@ type CreateReplicationGroupInput struct { // file will be used to populate the node group. The Amazon S3 object name in // the ARN cannot contain any commas. // - // Note: This parameter is only valid if the Engine parameter is redis. + // This parameter is only valid if the Engine parameter is redis. // - // Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb + // Example of an Amazon S3 ARN: arn:aws:s3:::my_bucket/snapshot1.rdb SnapshotArns []*string `locationNameList:"SnapshotArn" type:"list"` // The name of a snapshot from which to restore data into the new node group. // The snapshot status changes to restoring while the new node group is being // created. // - // Note: This parameter is only valid if the Engine parameter is redis. + // This parameter is only valid if the Engine parameter is redis. SnapshotName *string `type:"string"` // The number of days for which ElastiCache will retain automatic snapshots @@ -2556,9 +3770,9 @@ type CreateReplicationGroupInput struct { // then a snapshot that was taken today will be retained for 5 days before being // deleted. // - // Note: This parameter is only valid if the Engine parameter is redis. + // This parameter is only valid if the Engine parameter is redis. // - // Default: 0 (i.e., automatic backups are disabled for this cache cluster). + // Default: 0 (i.e., automatic backups are disabled for this cache cluster). SnapshotRetentionLimit *int64 `type:"integer"` // The daily time range (in UTC) during which ElastiCache will begin taking @@ -2569,7 +3783,7 @@ type CreateReplicationGroupInput struct { // If you do not specify this parameter, then ElastiCache will automatically // choose an appropriate time range. // - // Note: This parameter is only valid if the Engine parameter is redis. + // This parameter is only valid if the Engine parameter is redis. SnapshotWindow *string `type:"string"` // A list of cost allocation tags to be added to this resource. A tag is a key-value @@ -2736,7 +3950,7 @@ type DeleteCacheParameterGroupInput struct { // The name of the cache parameter group to delete. // - // The specified cache security group must not be associated with any cache + // The specified cache security group must not be associated with any cache // clusters. CacheParameterGroupName *string `type:"string" required:"true"` } @@ -2784,7 +3998,7 @@ type DeleteCacheSecurityGroupInput struct { // The name of the cache security group to delete. // - // You cannot delete the default security group. + // You cannot delete the default security group. CacheSecurityGroupName *string `type:"string" required:"true"` } @@ -3048,8 +4262,11 @@ type DescribeCacheEngineVersionsInput struct { // // Constraints: // - // Must be 1 to 255 alphanumeric characters First character must be a letter - // Cannot end with a hyphen or contain two consecutive hyphens + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens CacheParameterGroupFamily *string `type:"string"` // If true, specifies that only the default version of the specified engine @@ -3177,7 +4394,7 @@ type DescribeCacheParametersInput struct { // includes only records beyond the marker, up to the value specified by MaxRecords. Marker *string `type:"string"` - // The maximum number of records to include in the response. If more records + // The maximum number of brecords to include in the response. If more records // exist than the specified MaxRecords value, a marker is included in the response // so that the remaining results can be retrieved. // @@ -3551,19 +4768,34 @@ type DescribeReservedCacheNodesInput struct { // // Valid node types are as follows: // - // General purpose: Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, - // cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Previous - // generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, - // cache.m1.xlarge Compute optimized: cache.c1.xlarge Memory optimized Current - // generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, - // cache.r3.8xlarge Previous generation: cache.m2.xlarge, cache.m2.2xlarge, - // cache.m2.4xlarge Notes: + // General purpose: // - // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). - // Redis backup/restore is not supported for t2 instances. Redis Append-only - // files (AOF) functionality is not supported for t1 or t2 instances. For a - // complete listing of cache node types and specifications, see Amazon ElastiCache - // Product Features and Details (http://aws.amazon.com/elasticache/details) + // Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, + // cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // + // Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, + // cache.m1.large, cache.m1.xlarge + // + // Compute optimized: cache.c1.xlarge + // + // Memory optimized: + // + // Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, + // cache.r3.4xlarge, cache.r3.8xlarge + // + // Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // + // Notes: + // + // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). + // + // Redis backup/restore is not supported for t2 instances. + // + // Redis Append-only files (AOF) functionality is not supported for t1 or + // t2 instances. + // + // For a complete listing of cache node types and specifications, see Amazon + // ElastiCache Product Features and Details (http://aws.amazon.com/elasticache/details) // and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). CacheNodeType *string `type:"string"` @@ -3626,19 +4858,34 @@ type DescribeReservedCacheNodesOfferingsInput struct { // // Valid node types are as follows: // - // General purpose: Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, - // cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Previous - // generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, - // cache.m1.xlarge Compute optimized: cache.c1.xlarge Memory optimized Current - // generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, - // cache.r3.8xlarge Previous generation: cache.m2.xlarge, cache.m2.2xlarge, - // cache.m2.4xlarge Notes: + // General purpose: // - // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). - // Redis backup/restore is not supported for t2 instances. Redis Append-only - // files (AOF) functionality is not supported for t1 or t2 instances. For a - // complete listing of cache node types and specifications, see Amazon ElastiCache - // Product Features and Details (http://aws.amazon.com/elasticache/details) + // Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, + // cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // + // Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, + // cache.m1.large, cache.m1.xlarge + // + // Compute optimized: cache.c1.xlarge + // + // Memory optimized: + // + // Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, + // cache.r3.4xlarge, cache.r3.8xlarge + // + // Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // + // Notes: + // + // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). + // + // Redis backup/restore is not supported for t2 instances. + // + // Redis Append-only files (AOF) functionality is not supported for t1 or + // t2 instances. + // + // For a complete listing of cache node types and specifications, see Amazon + // ElastiCache Product Features and Details (http://aws.amazon.com/elasticache/details) // and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). CacheNodeType *string `type:"string"` @@ -3917,8 +5164,7 @@ type ListAllowedNodeTypeModificationsInput struct { // this cluster and from that to to create a list of node types you can scale // up to. // - // Important: You must provide a value for either the CacheClusterId or the - // ReplicationGroupId. + // You must provide a value for either the CacheClusterId or the ReplicationGroupId. CacheClusterId *string `type:"string"` // The name of the replication group want to scale up to a larger node type. @@ -3926,8 +5172,7 @@ type ListAllowedNodeTypeModificationsInput struct { // being used by this replication group, and from that to create a list of node // types you can scale up to. // - // Important: You must provide a value for either the CacheClusterId or the - // ReplicationGroupId. + // You must provide a value for either the CacheClusterId or the ReplicationGroupId. ReplicationGroupId *string `type:"string"` } @@ -4012,7 +5257,7 @@ type ModifyCacheClusterInput struct { // // This option is only supported for Memcached cache clusters. // - // You cannot specify single-az if the Memcached cache cluster already has + // You cannot specify single-az if the Memcached cache cluster already has // cache nodes in different Availability Zones. If cross-az is specified, existing // Memcached nodes remain in their current Availability Zone. // @@ -4029,9 +5274,10 @@ type ModifyCacheClusterInput struct { // If false, then changes to the cache cluster are applied on the next maintenance // reboot, or the next failure reboot, whichever occurs first. // - // If you perform a ModifyCacheCluster before a pending modification is applied, - // the pending modification is replaced by the newer modification. Valid values: - // true | false + // If you perform a ModifyCacheCluster before a pending modification is applied, + // the pending modification is replaced by the newer modification. + // + // Valid values: true | false // // Default: false ApplyImmediately *bool `type:"boolean"` @@ -4076,7 +5322,7 @@ type ModifyCacheClusterInput struct { // The upgraded version of the cache engine to be run on the cache nodes. // - // Important: You can upgrade to a newer engine version (see Selecting a Cache + // Important: You can upgrade to a newer engine version (see Selecting a Cache // Engine and Version (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/SelectEngine.html#VersionManagement)), // but you cannot downgrade to an earlier engine version. If you want to use // an earlier engine version, you must delete the existing cache cluster and @@ -4093,15 +5339,20 @@ type ModifyCacheClusterInput struct { // // This option is only supported on Memcached clusters. // - // Scenarios: Scenario 1: You have 3 active nodes and wish to add 2 nodes. - // Specify NumCacheNodes=5 (3 + 2) and optionally specify two Availability Zones - // for the two new nodes. Scenario 2: You have 3 active nodes and 2 nodes pending - // creation (from the scenario 1 call) and want to add 1 more node. Specify - // NumCacheNodes=6 ((3 + 2) + 1) and optionally specify an Availability Zone - // for the new node. Scenario 3: You want to cancel all pending actions. Specify - // NumCacheNodes=3 to cancel all pending actions. + // Scenarios: // - // The Availability Zone placement of nodes pending creation cannot be modified. + // Scenario 1: You have 3 active nodes and wish to add 2 nodes. Specify + // NumCacheNodes=5 (3 + 2) and optionally specify two Availability Zones for + // the two new nodes. + // + // Scenario 2: You have 3 active nodes and 2 nodes pending creation (from + // the scenario 1 call) and want to add 1 more node. Specify NumCacheNodes=6 + // ((3 + 2) + 1) and optionally specify an Availability Zone for the new node. + // + // Scenario 3: You want to cancel all pending actions. Specify NumCacheNodes=3 + // to cancel all pending actions. + // + // The Availability Zone placement of nodes pending creation cannot be modified. // If you wish to cancel any nodes pending creation, add 0 nodes by setting // NumCacheNodes to the number of current nodes. // @@ -4111,24 +5362,53 @@ type ModifyCacheClusterInput struct { // Availability Zones, see the Availability Zone Considerations section of Cache // Node Considerations for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheNode.Memcached.html). // - // Impact of new add/remove requests upon pending requests + // Impact of new add/remove requests upon pending requests // - // Scenario-1 Pending Action: Delete New Request: Delete Result: The new - // delete, pending or immediate, replaces the pending delete. Scenario-2 Pending - // Action: Delete New Request: Create Result: The new create, pending or immediate, - // replaces the pending delete. Scenario-3 Pending Action: Create New Request: - // Delete Result: The new delete, pending or immediate, replaces the pending - // create. Scenario-4 Pending Action: Create New Request: Create Result: The - // new create is added to the pending create. Important:If the new create request - // is Apply Immediately - Yes, all creates are performed immediately. If the - // new create request is Apply Immediately - No, all creates are pending. - // Example: + // Scenario-1 + // + // Pending Action: Delete + // + // New Request: Delete + // + // Result: The new delete, pending or immediate, replaces the pending delete. + // + // Scenario-2 + // + // Pending Action: Delete + // + // New Request: Create + // + // Result: The new create, pending or immediate, replaces the pending delete. + // + // Scenario-3 + // + // Pending Action: Create + // + // New Request: Delete + // + // Result: The new delete, pending or immediate, replaces the pending create. + // + // Scenario-4 + // + // Pending Action: Create + // + // New Request: Create + // + // Result: The new create is added to the pending create. + // + // Important: If the new create request is Apply Immediately - Yes, all creates + // are performed immediately. If the new create request is Apply Immediately + // - No, all creates are pending. + // + // Example: + // + // NewAvailabilityZones.member.1=us-west-2a&NewAvailabilityZones.member.2=us-west-2b&NewAvailabilityZones.member.3=us-west-2c NewAvailabilityZones []*string `locationNameList:"PreferredAvailabilityZone" type:"list"` // The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications // will be sent. // - // The Amazon SNS topic owner must be same as the cache cluster owner. + // The Amazon SNS topic owner must be same as the cache cluster owner. NotificationTopicArn *string `type:"string"` // The status of the Amazon SNS notification topic. Notifications are sent only @@ -4150,23 +5430,24 @@ type ModifyCacheClusterInput struct { // For clusters running Redis, this value must be 1. For clusters running Memcached, // this value must be between 1 and 20. // - // Note:Adding or removing Memcached cache nodes can be applied immediately - // or as a pending action. See ApplyImmediately. A pending action to modify - // the number of cache nodes in a cluster during its maintenance window, whether - // by adding or removing nodes in accordance with the scale out architecture, - // is not queued. The customer's latest request to add or remove nodes to the - // cluster overrides any previous pending actions to modify the number of cache - // nodes in the cluster. For example, a request to remove 2 nodes would override - // a previous pending action to remove 3 nodes. Similarly, a request to add - // 2 nodes would override a previous pending action to remove 3 nodes and vice - // versa. As Memcached cache nodes may now be provisioned in different Availability - // Zones with flexible cache node placement, a request to add nodes does not - // automatically override a previous pending action to add nodes. The customer - // can modify the previous pending action to add more nodes or explicitly cancel - // the pending request and retry the new request. To cancel pending actions - // to modify the number of cache nodes in a cluster, use the ModifyCacheCluster - // request and set NumCacheNodes equal to the number of cache nodes currently - // in the cache cluster. + // Adding or removing Memcached cache nodes can be applied immediately or + // as a pending action. See ApplyImmediately. + // + // A pending action to modify the number of cache nodes in a cluster during + // its maintenance window, whether by adding or removing nodes in accordance + // with the scale out architecture, is not queued. The customer's latest request + // to add or remove nodes to the cluster overrides any previous pending actions + // to modify the number of cache nodes in the cluster. For example, a request + // to remove 2 nodes would override a previous pending action to remove 3 nodes. + // Similarly, a request to add 2 nodes would override a previous pending action + // to remove 3 nodes and vice versa. As Memcached cache nodes may now be provisioned + // in different Availability Zones with flexible cache node placement, a request + // to add nodes does not automatically override a previous pending action to + // add nodes. The customer can modify the previous pending action to add more + // nodes or explicitly cancel the pending request and retry the new request. + // To cancel pending actions to modify the number of cache nodes in a cluster, + // use the ModifyCacheCluster request and set NumCacheNodes equal to the number + // of cache nodes currently in the cache cluster. NumCacheNodes *int64 `type:"integer"` // Specifies the weekly time range during which maintenance on the cache cluster @@ -4174,7 +5455,21 @@ type ModifyCacheClusterInput struct { // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid // values for ddd are: // - // sun mon tue wed thu fri sat Example: sun:05:00-sun:09:00 + // sun + // + // mon + // + // tue + // + // wed + // + // thu + // + // fri + // + // sat + // + // Example: sun:05:00-sun:09:00 PreferredMaintenanceWindow *string `type:"string"` // Specifies the VPC Security Groups associated with the cache cluster. @@ -4188,8 +5483,8 @@ type ModifyCacheClusterInput struct { // to 5, then a snapshot that was taken today will be retained for 5 days before // being deleted. // - // ImportantIf the value of SnapshotRetentionLimit is set to zero (0), backups - // are turned off. + // If the value of SnapshotRetentionLimit is set to zero (0), backups are + // turned off. SnapshotRetentionLimit *int64 `type:"integer"` // The daily time range (in UTC) during which ElastiCache will begin taking @@ -4323,7 +5618,9 @@ type ModifyCacheSubnetGroupOutput struct { // Represents the output of one of the following actions: // - // CreateCacheSubnetGroup ModifyCacheSubnetGroup + // CreateCacheSubnetGroup + // + // ModifyCacheSubnetGroup CacheSubnetGroup *CacheSubnetGroup `type:"structure"` } @@ -4363,9 +5660,11 @@ type ModifyReplicationGroupInput struct { // // Valid values: true | false // - // ElastiCache Multi-AZ replication groups are not supported on: + // ElastiCache Multi-AZ replication groups are not supported on: // - // Redis versions earlier than 2.8.6. T1 and T2 cache node types. + // Redis versions earlier than 2.8.6. + // + // T1 and T2 cache node types. AutomaticFailoverEnabled *bool `type:"boolean"` // A valid cache node type that you want to scale this replication group to. @@ -4392,7 +5691,7 @@ type ModifyReplicationGroupInput struct { // The upgraded version of the cache engine to be run on the cache clusters // in the replication group. // - // Important: You can upgrade to a newer engine version (see Selecting a Cache + // Important: You can upgrade to a newer engine version (see Selecting a Cache // Engine and Version (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/SelectEngine.html#VersionManagement)), // but you cannot downgrade to an earlier engine version. If you want to use // an earlier engine version, you must delete the existing replication group @@ -4402,7 +5701,7 @@ type ModifyReplicationGroupInput struct { // The Amazon Resource Name (ARN) of the Amazon SNS topic to which notifications // will be sent. // - // The Amazon SNS topic owner must be same as the replication group owner. + // The Amazon SNS topic owner must be same as the replication group owner. NotificationTopicArn *string `type:"string"` // The status of the Amazon SNS notification topic for the replication group. @@ -4416,7 +5715,21 @@ type ModifyReplicationGroupInput struct { // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid // values for ddd are: // - // sun mon tue wed thu fri sat Example: sun:05:00-sun:09:00 + // sun + // + // mon + // + // tue + // + // wed + // + // thu + // + // fri + // + // sat + // + // Example: sun:05:00-sun:09:00 PreferredMaintenanceWindow *string `type:"string"` // If this parameter is specified, ElastiCache will promote the specified cluster @@ -4442,7 +5755,7 @@ type ModifyReplicationGroupInput struct { // to 5, then a snapshot that was taken today will be retained for 5 days before // being deleted. // - // ImportantIf the value of SnapshotRetentionLimit is set to zero (0), backups + // Important If the value of SnapshotRetentionLimit is set to zero (0), backups // are turned off. SnapshotRetentionLimit *int64 `type:"integer"` @@ -4620,6 +5933,12 @@ type Parameter struct { // The valid range of values for the parameter. AllowedValues *string `type:"string"` + // ChangeType indicates whether a change to the parameter will be applied immediately + // or requires a reboot for the change to be applied. You can force a reboot + // or wait until the next maintenance window's reboot. For more information, + // see Rebooting a Cluster (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Clusters.Rebooting.html). + ChangeType *string `type:"string" enum:"ChangeType"` + // The valid data type for the parameter. DataType *string `type:"string"` @@ -4719,11 +6038,11 @@ type PurchaseReservedCacheNodesOfferingInput struct { // A customer-specified identifier to track this reservation. // - // Note:The Reserved Cache Node ID is an unique customer-specified identifier - // to track this reservation. If this parameter is not specified, ElastiCache - // automatically generates an identifier for the reservation. + // The Reserved Cache Node ID is an unique customer-specified identifier to + // track this reservation. If this parameter is not specified, ElastiCache automatically + // generates an identifier for the reservation. // - // Example: myreservationID + // Example: myreservationID ReservedCacheNodeId *string `type:"string"` // The ID of the reserved cache node offering to purchase. @@ -4900,9 +6219,11 @@ type ReplicationGroup struct { // Indicates the status of Multi-AZ for this replication group. // - // ElastiCache Multi-AZ replication groups are not supported on: + // ElastiCache Multi-AZ replication groups are not supported on: // - // Redis versions earlier than 2.8.6. T1 and T2 cache node types. + // Redis versions earlier than 2.8.6. + // + // T1 and T2 cache node types. AutomaticFailover *string `type:"string" enum:"AutomaticFailoverStatus"` // The description of the replication group. @@ -4947,9 +6268,11 @@ type ReplicationGroupPendingModifiedValues struct { // Indicates the status of Multi-AZ for this replication group. // - // ElastiCache Multi-AZ replication groups are not supported on: + // ElastiCache Multi-AZ replication groups are not supported on: // - // Redis versions earlier than 2.8.6. T1 and T2 cache node types. + // Redis versions earlier than 2.8.6. + // + // T1 and T2 cache node types. AutomaticFailoverStatus *string `type:"string" enum:"PendingAutomaticFailoverStatus"` // The primary cluster ID which will be applied immediately (if --apply-immediately @@ -4978,19 +6301,34 @@ type ReservedCacheNode struct { // // Valid node types are as follows: // - // General purpose: Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, - // cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Previous - // generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, - // cache.m1.xlarge Compute optimized: cache.c1.xlarge Memory optimized Current - // generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, - // cache.r3.8xlarge Previous generation: cache.m2.xlarge, cache.m2.2xlarge, - // cache.m2.4xlarge Notes: + // General purpose: // - // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). - // Redis backup/restore is not supported for t2 instances. Redis Append-only - // files (AOF) functionality is not supported for t1 or t2 instances. For a - // complete listing of cache node types and specifications, see Amazon ElastiCache - // Product Features and Details (http://aws.amazon.com/elasticache/details) + // Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, + // cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // + // Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, + // cache.m1.large, cache.m1.xlarge + // + // Compute optimized: cache.c1.xlarge + // + // Memory optimized: + // + // Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, + // cache.r3.4xlarge, cache.r3.8xlarge + // + // Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // + // Notes: + // + // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). + // + // Redis backup/restore is not supported for t2 instances. + // + // Redis Append-only files (AOF) functionality is not supported for t1 or + // t2 instances. + // + // For a complete listing of cache node types and specifications, see Amazon + // ElastiCache Product Features and Details (http://aws.amazon.com/elasticache/details) // and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). CacheNodeType *string `type:"string"` @@ -5044,19 +6382,34 @@ type ReservedCacheNodesOffering struct { // // Valid node types are as follows: // - // General purpose: Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, - // cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Previous - // generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, - // cache.m1.xlarge Compute optimized: cache.c1.xlarge Memory optimized Current - // generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, - // cache.r3.8xlarge Previous generation: cache.m2.xlarge, cache.m2.2xlarge, - // cache.m2.4xlarge Notes: + // General purpose: // - // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). - // Redis backup/restore is not supported for t2 instances. Redis Append-only - // files (AOF) functionality is not supported for t1 or t2 instances. For a - // complete listing of cache node types and specifications, see Amazon ElastiCache - // Product Features and Details (http://aws.amazon.com/elasticache/details) + // Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, + // cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // + // Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, + // cache.m1.large, cache.m1.xlarge + // + // Compute optimized: cache.c1.xlarge + // + // Memory optimized: + // + // Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, + // cache.r3.4xlarge, cache.r3.8xlarge + // + // Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // + // Notes: + // + // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). + // + // Redis backup/restore is not supported for t2 instances. + // + // Redis Append-only files (AOF) functionality is not supported for t1 or + // t2 instances. + // + // For a complete listing of cache node types and specifications, see Amazon + // ElastiCache Product Features and Details (http://aws.amazon.com/elasticache/details) // and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). CacheNodeType *string `type:"string"` @@ -5100,12 +6453,13 @@ type ResetCacheParameterGroupInput struct { // The name of the cache parameter group to reset. CacheParameterGroupName *string `type:"string" required:"true"` - // An array of parameter names to be reset. If you are not resetting the entire - // cache parameter group, you must specify at least one parameter name. - ParameterNameValues []*ParameterNameValue `locationNameList:"ParameterNameValue" type:"list" required:"true"` + // An array of parameter names to reset to their default values. If ResetAllParameters + // is false, you must specify the name of at least one parameter to reset. + ParameterNameValues []*ParameterNameValue `locationNameList:"ParameterNameValue" type:"list"` - // If true, all parameters in the cache parameter group will be reset to default - // values. If false, no such action occurs. + // If true, all parameters in the cache parameter group will be reset to their + // default values. If false, only the parameters listed by ParameterNameValues + // are reset to their default values. // // Valid values: true | false ResetAllParameters *bool `type:"boolean"` @@ -5127,9 +6481,6 @@ func (s *ResetCacheParameterGroupInput) Validate() error { if s.CacheParameterGroupName == nil { invalidParams.Add(request.NewErrParamRequired("CacheParameterGroupName")) } - if s.ParameterNameValues == nil { - invalidParams.Add(request.NewErrParamRequired("ParameterNameValues")) - } if invalidParams.Len() > 0 { return invalidParams @@ -5187,7 +6538,11 @@ type RevokeCacheSecurityGroupIngressOutput struct { // Represents the output of one of the following actions: // - // AuthorizeCacheSecurityGroupIngress CreateCacheSecurityGroup RevokeCacheSecurityGroupIngress + // AuthorizeCacheSecurityGroupIngress + // + // CreateCacheSecurityGroup + // + // RevokeCacheSecurityGroupIngress CacheSecurityGroup *CacheSecurityGroup `type:"structure"` } @@ -5243,19 +6598,34 @@ type Snapshot struct { // // Valid node types are as follows: // - // General purpose: Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, - // cache.m3.medium, cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge Previous - // generation: cache.t1.micro, cache.m1.small, cache.m1.medium, cache.m1.large, - // cache.m1.xlarge Compute optimized: cache.c1.xlarge Memory optimized Current - // generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, cache.r3.4xlarge, - // cache.r3.8xlarge Previous generation: cache.m2.xlarge, cache.m2.2xlarge, - // cache.m2.4xlarge Notes: + // General purpose: // - // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). - // Redis backup/restore is not supported for t2 instances. Redis Append-only - // files (AOF) functionality is not supported for t1 or t2 instances. For a - // complete listing of cache node types and specifications, see Amazon ElastiCache - // Product Features and Details (http://aws.amazon.com/elasticache/details) + // Current generation: cache.t2.micro, cache.t2.small, cache.t2.medium, cache.m3.medium, + // cache.m3.large, cache.m3.xlarge, cache.m3.2xlarge + // + // Previous generation: cache.t1.micro, cache.m1.small, cache.m1.medium, + // cache.m1.large, cache.m1.xlarge + // + // Compute optimized: cache.c1.xlarge + // + // Memory optimized: + // + // Current generation: cache.r3.large, cache.r3.xlarge, cache.r3.2xlarge, + // cache.r3.4xlarge, cache.r3.8xlarge + // + // Previous generation: cache.m2.xlarge, cache.m2.2xlarge, cache.m2.4xlarge + // + // Notes: + // + // All t2 instances are created in an Amazon Virtual Private Cloud (VPC). + // + // Redis backup/restore is not supported for t2 instances. + // + // Redis Append-only files (AOF) functionality is not supported for t1 or + // t2 instances. + // + // For a complete listing of cache node types and specifications, see Amazon + // ElastiCache Product Features and Details (http://aws.amazon.com/elasticache/details) // and Cache Node Type-Specific Parameters for Memcached (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Memcached.html#CacheParameterGroups.Memcached.NodeSpecific) // or Cache Node Type-Specific Parameters for Redis (http://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/CacheParameterGroups.Redis.html#CacheParameterGroups.Redis.NodeSpecific). CacheNodeType *string `type:"string"` @@ -5294,7 +6664,21 @@ type Snapshot struct { // (24H Clock UTC). The minimum maintenance window is a 60 minute period. Valid // values for ddd are: // - // sun mon tue wed thu fri sat Example: sun:05:00-sun:09:00 + // sun + // + // mon + // + // tue + // + // wed + // + // thu + // + // fri + // + // sat + // + // Example: sun:05:00-sun:09:00 PreferredMaintenanceWindow *string `type:"string"` // The name of a snapshot. For an automatic snapshot, the name is system-generated; @@ -5309,7 +6693,7 @@ type Snapshot struct { // ignored: Manual snapshots do not expire, and can only be deleted using the // DeleteSnapshot action. // - // ImportantIf the value of SnapshotRetentionLimit is set to zero (0), backups + // Important If the value of SnapshotRetentionLimit is set to zero (0), backups // are turned off. SnapshotRetentionLimit *int64 `type:"integer"` @@ -5427,6 +6811,13 @@ const ( AutomaticFailoverStatusDisabling = "disabling" ) +const ( + // @enum ChangeType + ChangeTypeImmediate = "immediate" + // @enum ChangeType + ChangeTypeRequiresReboot = "requires-reboot" +) + const ( // @enum PendingAutomaticFailoverStatus PendingAutomaticFailoverStatusEnabled = "enabled" diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticache/service.go b/vendor/github.com/aws/aws-sdk-go/service/elasticache/service.go index 8e4cbb460..c13234d73 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elasticache/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticache/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/query" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // Amazon ElastiCache is a web service that makes it easier to set up, operate, @@ -68,7 +68,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(query.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/api.go b/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/api.go index 760a82b49..4f2deb7c2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/api.go @@ -15,7 +15,28 @@ import ( const opAbortEnvironmentUpdate = "AbortEnvironmentUpdate" -// AbortEnvironmentUpdateRequest generates a request for the AbortEnvironmentUpdate operation. +// AbortEnvironmentUpdateRequest generates a "aws/request.Request" representing the +// client's request for the AbortEnvironmentUpdate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AbortEnvironmentUpdate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AbortEnvironmentUpdateRequest method. +// req, resp := client.AbortEnvironmentUpdateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) AbortEnvironmentUpdateRequest(input *AbortEnvironmentUpdateInput) (req *request.Request, output *AbortEnvironmentUpdateOutput) { op := &request.Operation{ Name: opAbortEnvironmentUpdate, @@ -45,7 +66,28 @@ func (c *ElasticBeanstalk) AbortEnvironmentUpdate(input *AbortEnvironmentUpdateI const opApplyEnvironmentManagedAction = "ApplyEnvironmentManagedAction" -// ApplyEnvironmentManagedActionRequest generates a request for the ApplyEnvironmentManagedAction operation. +// ApplyEnvironmentManagedActionRequest generates a "aws/request.Request" representing the +// client's request for the ApplyEnvironmentManagedAction operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ApplyEnvironmentManagedAction method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ApplyEnvironmentManagedActionRequest method. +// req, resp := client.ApplyEnvironmentManagedActionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) ApplyEnvironmentManagedActionRequest(input *ApplyEnvironmentManagedActionInput) (req *request.Request, output *ApplyEnvironmentManagedActionOutput) { op := &request.Operation{ Name: opApplyEnvironmentManagedAction, @@ -74,7 +116,28 @@ func (c *ElasticBeanstalk) ApplyEnvironmentManagedAction(input *ApplyEnvironment const opCheckDNSAvailability = "CheckDNSAvailability" -// CheckDNSAvailabilityRequest generates a request for the CheckDNSAvailability operation. +// CheckDNSAvailabilityRequest generates a "aws/request.Request" representing the +// client's request for the CheckDNSAvailability operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CheckDNSAvailability method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CheckDNSAvailabilityRequest method. +// req, resp := client.CheckDNSAvailabilityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) CheckDNSAvailabilityRequest(input *CheckDNSAvailabilityInput) (req *request.Request, output *CheckDNSAvailabilityOutput) { op := &request.Operation{ Name: opCheckDNSAvailability, @@ -101,7 +164,28 @@ func (c *ElasticBeanstalk) CheckDNSAvailability(input *CheckDNSAvailabilityInput const opComposeEnvironments = "ComposeEnvironments" -// ComposeEnvironmentsRequest generates a request for the ComposeEnvironments operation. +// ComposeEnvironmentsRequest generates a "aws/request.Request" representing the +// client's request for the ComposeEnvironments operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ComposeEnvironments method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ComposeEnvironmentsRequest method. +// req, resp := client.ComposeEnvironmentsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) ComposeEnvironmentsRequest(input *ComposeEnvironmentsInput) (req *request.Request, output *EnvironmentDescriptionsMessage) { op := &request.Operation{ Name: opComposeEnvironments, @@ -134,7 +218,28 @@ func (c *ElasticBeanstalk) ComposeEnvironments(input *ComposeEnvironmentsInput) const opCreateApplication = "CreateApplication" -// CreateApplicationRequest generates a request for the CreateApplication operation. +// CreateApplicationRequest generates a "aws/request.Request" representing the +// client's request for the CreateApplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateApplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateApplicationRequest method. +// req, resp := client.CreateApplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) CreateApplicationRequest(input *CreateApplicationInput) (req *request.Request, output *ApplicationDescriptionMessage) { op := &request.Operation{ Name: opCreateApplication, @@ -162,7 +267,28 @@ func (c *ElasticBeanstalk) CreateApplication(input *CreateApplicationInput) (*Ap const opCreateApplicationVersion = "CreateApplicationVersion" -// CreateApplicationVersionRequest generates a request for the CreateApplicationVersion operation. +// CreateApplicationVersionRequest generates a "aws/request.Request" representing the +// client's request for the CreateApplicationVersion operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateApplicationVersion method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateApplicationVersionRequest method. +// req, resp := client.CreateApplicationVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) CreateApplicationVersionRequest(input *CreateApplicationVersionInput) (req *request.Request, output *ApplicationVersionDescriptionMessage) { op := &request.Operation{ Name: opCreateApplicationVersion, @@ -194,7 +320,28 @@ func (c *ElasticBeanstalk) CreateApplicationVersion(input *CreateApplicationVers const opCreateConfigurationTemplate = "CreateConfigurationTemplate" -// CreateConfigurationTemplateRequest generates a request for the CreateConfigurationTemplate operation. +// CreateConfigurationTemplateRequest generates a "aws/request.Request" representing the +// client's request for the CreateConfigurationTemplate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateConfigurationTemplate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateConfigurationTemplateRequest method. +// req, resp := client.CreateConfigurationTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) CreateConfigurationTemplateRequest(input *CreateConfigurationTemplateInput) (req *request.Request, output *ConfigurationSettingsDescription) { op := &request.Operation{ Name: opCreateConfigurationTemplate, @@ -227,7 +374,28 @@ func (c *ElasticBeanstalk) CreateConfigurationTemplate(input *CreateConfiguratio const opCreateEnvironment = "CreateEnvironment" -// CreateEnvironmentRequest generates a request for the CreateEnvironment operation. +// CreateEnvironmentRequest generates a "aws/request.Request" representing the +// client's request for the CreateEnvironment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateEnvironment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateEnvironmentRequest method. +// req, resp := client.CreateEnvironmentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) CreateEnvironmentRequest(input *CreateEnvironmentInput) (req *request.Request, output *EnvironmentDescription) { op := &request.Operation{ Name: opCreateEnvironment, @@ -255,7 +423,28 @@ func (c *ElasticBeanstalk) CreateEnvironment(input *CreateEnvironmentInput) (*En const opCreateStorageLocation = "CreateStorageLocation" -// CreateStorageLocationRequest generates a request for the CreateStorageLocation operation. +// CreateStorageLocationRequest generates a "aws/request.Request" representing the +// client's request for the CreateStorageLocation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateStorageLocation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateStorageLocationRequest method. +// req, resp := client.CreateStorageLocationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) CreateStorageLocationRequest(input *CreateStorageLocationInput) (req *request.Request, output *CreateStorageLocationOutput) { op := &request.Operation{ Name: opCreateStorageLocation, @@ -284,7 +473,28 @@ func (c *ElasticBeanstalk) CreateStorageLocation(input *CreateStorageLocationInp const opDeleteApplication = "DeleteApplication" -// DeleteApplicationRequest generates a request for the DeleteApplication operation. +// DeleteApplicationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteApplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteApplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteApplicationRequest method. +// req, resp := client.DeleteApplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) DeleteApplicationRequest(input *DeleteApplicationInput) (req *request.Request, output *DeleteApplicationOutput) { op := &request.Operation{ Name: opDeleteApplication, @@ -317,7 +527,28 @@ func (c *ElasticBeanstalk) DeleteApplication(input *DeleteApplicationInput) (*De const opDeleteApplicationVersion = "DeleteApplicationVersion" -// DeleteApplicationVersionRequest generates a request for the DeleteApplicationVersion operation. +// DeleteApplicationVersionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteApplicationVersion operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteApplicationVersion method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteApplicationVersionRequest method. +// req, resp := client.DeleteApplicationVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) DeleteApplicationVersionRequest(input *DeleteApplicationVersionInput) (req *request.Request, output *DeleteApplicationVersionOutput) { op := &request.Operation{ Name: opDeleteApplicationVersion, @@ -349,7 +580,28 @@ func (c *ElasticBeanstalk) DeleteApplicationVersion(input *DeleteApplicationVers const opDeleteConfigurationTemplate = "DeleteConfigurationTemplate" -// DeleteConfigurationTemplateRequest generates a request for the DeleteConfigurationTemplate operation. +// DeleteConfigurationTemplateRequest generates a "aws/request.Request" representing the +// client's request for the DeleteConfigurationTemplate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteConfigurationTemplate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteConfigurationTemplateRequest method. +// req, resp := client.DeleteConfigurationTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) DeleteConfigurationTemplateRequest(input *DeleteConfigurationTemplateInput) (req *request.Request, output *DeleteConfigurationTemplateOutput) { op := &request.Operation{ Name: opDeleteConfigurationTemplate, @@ -382,7 +634,28 @@ func (c *ElasticBeanstalk) DeleteConfigurationTemplate(input *DeleteConfiguratio const opDeleteEnvironmentConfiguration = "DeleteEnvironmentConfiguration" -// DeleteEnvironmentConfigurationRequest generates a request for the DeleteEnvironmentConfiguration operation. +// DeleteEnvironmentConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteEnvironmentConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteEnvironmentConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteEnvironmentConfigurationRequest method. +// req, resp := client.DeleteEnvironmentConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) DeleteEnvironmentConfigurationRequest(input *DeleteEnvironmentConfigurationInput) (req *request.Request, output *DeleteEnvironmentConfigurationOutput) { op := &request.Operation{ Name: opDeleteEnvironmentConfiguration, @@ -418,7 +691,28 @@ func (c *ElasticBeanstalk) DeleteEnvironmentConfiguration(input *DeleteEnvironme const opDescribeApplicationVersions = "DescribeApplicationVersions" -// DescribeApplicationVersionsRequest generates a request for the DescribeApplicationVersions operation. +// DescribeApplicationVersionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeApplicationVersions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeApplicationVersions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeApplicationVersionsRequest method. +// req, resp := client.DescribeApplicationVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) DescribeApplicationVersionsRequest(input *DescribeApplicationVersionsInput) (req *request.Request, output *DescribeApplicationVersionsOutput) { op := &request.Operation{ Name: opDescribeApplicationVersions, @@ -446,7 +740,28 @@ func (c *ElasticBeanstalk) DescribeApplicationVersions(input *DescribeApplicatio const opDescribeApplications = "DescribeApplications" -// DescribeApplicationsRequest generates a request for the DescribeApplications operation. +// DescribeApplicationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeApplications operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeApplications method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeApplicationsRequest method. +// req, resp := client.DescribeApplicationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) DescribeApplicationsRequest(input *DescribeApplicationsInput) (req *request.Request, output *DescribeApplicationsOutput) { op := &request.Operation{ Name: opDescribeApplications, @@ -473,7 +788,28 @@ func (c *ElasticBeanstalk) DescribeApplications(input *DescribeApplicationsInput const opDescribeConfigurationOptions = "DescribeConfigurationOptions" -// DescribeConfigurationOptionsRequest generates a request for the DescribeConfigurationOptions operation. +// DescribeConfigurationOptionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeConfigurationOptions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeConfigurationOptions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeConfigurationOptionsRequest method. +// req, resp := client.DescribeConfigurationOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) DescribeConfigurationOptionsRequest(input *DescribeConfigurationOptionsInput) (req *request.Request, output *DescribeConfigurationOptionsOutput) { op := &request.Operation{ Name: opDescribeConfigurationOptions, @@ -504,7 +840,28 @@ func (c *ElasticBeanstalk) DescribeConfigurationOptions(input *DescribeConfigura const opDescribeConfigurationSettings = "DescribeConfigurationSettings" -// DescribeConfigurationSettingsRequest generates a request for the DescribeConfigurationSettings operation. +// DescribeConfigurationSettingsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeConfigurationSettings operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeConfigurationSettings method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeConfigurationSettingsRequest method. +// req, resp := client.DescribeConfigurationSettingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) DescribeConfigurationSettingsRequest(input *DescribeConfigurationSettingsInput) (req *request.Request, output *DescribeConfigurationSettingsOutput) { op := &request.Operation{ Name: opDescribeConfigurationSettings, @@ -543,7 +900,28 @@ func (c *ElasticBeanstalk) DescribeConfigurationSettings(input *DescribeConfigur const opDescribeEnvironmentHealth = "DescribeEnvironmentHealth" -// DescribeEnvironmentHealthRequest generates a request for the DescribeEnvironmentHealth operation. +// DescribeEnvironmentHealthRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEnvironmentHealth operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEnvironmentHealth method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEnvironmentHealthRequest method. +// req, resp := client.DescribeEnvironmentHealthRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) DescribeEnvironmentHealthRequest(input *DescribeEnvironmentHealthInput) (req *request.Request, output *DescribeEnvironmentHealthOutput) { op := &request.Operation{ Name: opDescribeEnvironmentHealth, @@ -572,7 +950,28 @@ func (c *ElasticBeanstalk) DescribeEnvironmentHealth(input *DescribeEnvironmentH const opDescribeEnvironmentManagedActionHistory = "DescribeEnvironmentManagedActionHistory" -// DescribeEnvironmentManagedActionHistoryRequest generates a request for the DescribeEnvironmentManagedActionHistory operation. +// DescribeEnvironmentManagedActionHistoryRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEnvironmentManagedActionHistory operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEnvironmentManagedActionHistory method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEnvironmentManagedActionHistoryRequest method. +// req, resp := client.DescribeEnvironmentManagedActionHistoryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) DescribeEnvironmentManagedActionHistoryRequest(input *DescribeEnvironmentManagedActionHistoryInput) (req *request.Request, output *DescribeEnvironmentManagedActionHistoryOutput) { op := &request.Operation{ Name: opDescribeEnvironmentManagedActionHistory, @@ -599,7 +998,28 @@ func (c *ElasticBeanstalk) DescribeEnvironmentManagedActionHistory(input *Descri const opDescribeEnvironmentManagedActions = "DescribeEnvironmentManagedActions" -// DescribeEnvironmentManagedActionsRequest generates a request for the DescribeEnvironmentManagedActions operation. +// DescribeEnvironmentManagedActionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEnvironmentManagedActions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEnvironmentManagedActions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEnvironmentManagedActionsRequest method. +// req, resp := client.DescribeEnvironmentManagedActionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) DescribeEnvironmentManagedActionsRequest(input *DescribeEnvironmentManagedActionsInput) (req *request.Request, output *DescribeEnvironmentManagedActionsOutput) { op := &request.Operation{ Name: opDescribeEnvironmentManagedActions, @@ -626,7 +1046,28 @@ func (c *ElasticBeanstalk) DescribeEnvironmentManagedActions(input *DescribeEnvi const opDescribeEnvironmentResources = "DescribeEnvironmentResources" -// DescribeEnvironmentResourcesRequest generates a request for the DescribeEnvironmentResources operation. +// DescribeEnvironmentResourcesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEnvironmentResources operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEnvironmentResources method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEnvironmentResourcesRequest method. +// req, resp := client.DescribeEnvironmentResourcesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) DescribeEnvironmentResourcesRequest(input *DescribeEnvironmentResourcesInput) (req *request.Request, output *DescribeEnvironmentResourcesOutput) { op := &request.Operation{ Name: opDescribeEnvironmentResources, @@ -653,7 +1094,28 @@ func (c *ElasticBeanstalk) DescribeEnvironmentResources(input *DescribeEnvironme const opDescribeEnvironments = "DescribeEnvironments" -// DescribeEnvironmentsRequest generates a request for the DescribeEnvironments operation. +// DescribeEnvironmentsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEnvironments operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEnvironments method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEnvironmentsRequest method. +// req, resp := client.DescribeEnvironmentsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) DescribeEnvironmentsRequest(input *DescribeEnvironmentsInput) (req *request.Request, output *EnvironmentDescriptionsMessage) { op := &request.Operation{ Name: opDescribeEnvironments, @@ -680,7 +1142,28 @@ func (c *ElasticBeanstalk) DescribeEnvironments(input *DescribeEnvironmentsInput const opDescribeEvents = "DescribeEvents" -// DescribeEventsRequest generates a request for the DescribeEvents operation. +// DescribeEventsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEvents operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEvents method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEventsRequest method. +// req, resp := client.DescribeEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) DescribeEventsRequest(input *DescribeEventsInput) (req *request.Request, output *DescribeEventsOutput) { op := &request.Operation{ Name: opDescribeEvents, @@ -713,6 +1196,23 @@ func (c *ElasticBeanstalk) DescribeEvents(input *DescribeEventsInput) (*Describe return out, err } +// DescribeEventsPages iterates over the pages of a DescribeEvents operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeEvents method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeEvents operation. +// pageNum := 0 +// err := client.DescribeEventsPages(params, +// func(page *DescribeEventsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *ElasticBeanstalk) DescribeEventsPages(input *DescribeEventsInput, fn func(p *DescribeEventsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeEventsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -723,7 +1223,28 @@ func (c *ElasticBeanstalk) DescribeEventsPages(input *DescribeEventsInput, fn fu const opDescribeInstancesHealth = "DescribeInstancesHealth" -// DescribeInstancesHealthRequest generates a request for the DescribeInstancesHealth operation. +// DescribeInstancesHealthRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstancesHealth operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeInstancesHealth method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeInstancesHealthRequest method. +// req, resp := client.DescribeInstancesHealthRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) DescribeInstancesHealthRequest(input *DescribeInstancesHealthInput) (req *request.Request, output *DescribeInstancesHealthOutput) { op := &request.Operation{ Name: opDescribeInstancesHealth, @@ -752,7 +1273,28 @@ func (c *ElasticBeanstalk) DescribeInstancesHealth(input *DescribeInstancesHealt const opListAvailableSolutionStacks = "ListAvailableSolutionStacks" -// ListAvailableSolutionStacksRequest generates a request for the ListAvailableSolutionStacks operation. +// ListAvailableSolutionStacksRequest generates a "aws/request.Request" representing the +// client's request for the ListAvailableSolutionStacks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListAvailableSolutionStacks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListAvailableSolutionStacksRequest method. +// req, resp := client.ListAvailableSolutionStacksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) ListAvailableSolutionStacksRequest(input *ListAvailableSolutionStacksInput) (req *request.Request, output *ListAvailableSolutionStacksOutput) { op := &request.Operation{ Name: opListAvailableSolutionStacks, @@ -779,7 +1321,28 @@ func (c *ElasticBeanstalk) ListAvailableSolutionStacks(input *ListAvailableSolut const opRebuildEnvironment = "RebuildEnvironment" -// RebuildEnvironmentRequest generates a request for the RebuildEnvironment operation. +// RebuildEnvironmentRequest generates a "aws/request.Request" representing the +// client's request for the RebuildEnvironment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RebuildEnvironment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RebuildEnvironmentRequest method. +// req, resp := client.RebuildEnvironmentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) RebuildEnvironmentRequest(input *RebuildEnvironmentInput) (req *request.Request, output *RebuildEnvironmentOutput) { op := &request.Operation{ Name: opRebuildEnvironment, @@ -809,7 +1372,28 @@ func (c *ElasticBeanstalk) RebuildEnvironment(input *RebuildEnvironmentInput) (* const opRequestEnvironmentInfo = "RequestEnvironmentInfo" -// RequestEnvironmentInfoRequest generates a request for the RequestEnvironmentInfo operation. +// RequestEnvironmentInfoRequest generates a "aws/request.Request" representing the +// client's request for the RequestEnvironmentInfo operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RequestEnvironmentInfo method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RequestEnvironmentInfoRequest method. +// req, resp := client.RequestEnvironmentInfoRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) RequestEnvironmentInfoRequest(input *RequestEnvironmentInfoInput) (req *request.Request, output *RequestEnvironmentInfoOutput) { op := &request.Operation{ Name: opRequestEnvironmentInfo, @@ -852,7 +1436,28 @@ func (c *ElasticBeanstalk) RequestEnvironmentInfo(input *RequestEnvironmentInfoI const opRestartAppServer = "RestartAppServer" -// RestartAppServerRequest generates a request for the RestartAppServer operation. +// RestartAppServerRequest generates a "aws/request.Request" representing the +// client's request for the RestartAppServer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RestartAppServer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RestartAppServerRequest method. +// req, resp := client.RestartAppServerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) RestartAppServerRequest(input *RestartAppServerInput) (req *request.Request, output *RestartAppServerOutput) { op := &request.Operation{ Name: opRestartAppServer, @@ -882,7 +1487,28 @@ func (c *ElasticBeanstalk) RestartAppServer(input *RestartAppServerInput) (*Rest const opRetrieveEnvironmentInfo = "RetrieveEnvironmentInfo" -// RetrieveEnvironmentInfoRequest generates a request for the RetrieveEnvironmentInfo operation. +// RetrieveEnvironmentInfoRequest generates a "aws/request.Request" representing the +// client's request for the RetrieveEnvironmentInfo operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RetrieveEnvironmentInfo method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RetrieveEnvironmentInfoRequest method. +// req, resp := client.RetrieveEnvironmentInfoRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) RetrieveEnvironmentInfoRequest(input *RetrieveEnvironmentInfoInput) (req *request.Request, output *RetrieveEnvironmentInfoOutput) { op := &request.Operation{ Name: opRetrieveEnvironmentInfo, @@ -913,7 +1539,28 @@ func (c *ElasticBeanstalk) RetrieveEnvironmentInfo(input *RetrieveEnvironmentInf const opSwapEnvironmentCNAMEs = "SwapEnvironmentCNAMEs" -// SwapEnvironmentCNAMEsRequest generates a request for the SwapEnvironmentCNAMEs operation. +// SwapEnvironmentCNAMEsRequest generates a "aws/request.Request" representing the +// client's request for the SwapEnvironmentCNAMEs operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SwapEnvironmentCNAMEs method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SwapEnvironmentCNAMEsRequest method. +// req, resp := client.SwapEnvironmentCNAMEsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) SwapEnvironmentCNAMEsRequest(input *SwapEnvironmentCNAMEsInput) (req *request.Request, output *SwapEnvironmentCNAMEsOutput) { op := &request.Operation{ Name: opSwapEnvironmentCNAMEs, @@ -942,7 +1589,28 @@ func (c *ElasticBeanstalk) SwapEnvironmentCNAMEs(input *SwapEnvironmentCNAMEsInp const opTerminateEnvironment = "TerminateEnvironment" -// TerminateEnvironmentRequest generates a request for the TerminateEnvironment operation. +// TerminateEnvironmentRequest generates a "aws/request.Request" representing the +// client's request for the TerminateEnvironment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the TerminateEnvironment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the TerminateEnvironmentRequest method. +// req, resp := client.TerminateEnvironmentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) TerminateEnvironmentRequest(input *TerminateEnvironmentInput) (req *request.Request, output *EnvironmentDescription) { op := &request.Operation{ Name: opTerminateEnvironment, @@ -969,7 +1637,28 @@ func (c *ElasticBeanstalk) TerminateEnvironment(input *TerminateEnvironmentInput const opUpdateApplication = "UpdateApplication" -// UpdateApplicationRequest generates a request for the UpdateApplication operation. +// UpdateApplicationRequest generates a "aws/request.Request" representing the +// client's request for the UpdateApplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateApplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateApplicationRequest method. +// req, resp := client.UpdateApplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) UpdateApplicationRequest(input *UpdateApplicationInput) (req *request.Request, output *ApplicationDescriptionMessage) { op := &request.Operation{ Name: opUpdateApplication, @@ -999,7 +1688,28 @@ func (c *ElasticBeanstalk) UpdateApplication(input *UpdateApplicationInput) (*Ap const opUpdateApplicationVersion = "UpdateApplicationVersion" -// UpdateApplicationVersionRequest generates a request for the UpdateApplicationVersion operation. +// UpdateApplicationVersionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateApplicationVersion operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateApplicationVersion method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateApplicationVersionRequest method. +// req, resp := client.UpdateApplicationVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) UpdateApplicationVersionRequest(input *UpdateApplicationVersionInput) (req *request.Request, output *ApplicationVersionDescriptionMessage) { op := &request.Operation{ Name: opUpdateApplicationVersion, @@ -1029,7 +1739,28 @@ func (c *ElasticBeanstalk) UpdateApplicationVersion(input *UpdateApplicationVers const opUpdateConfigurationTemplate = "UpdateConfigurationTemplate" -// UpdateConfigurationTemplateRequest generates a request for the UpdateConfigurationTemplate operation. +// UpdateConfigurationTemplateRequest generates a "aws/request.Request" representing the +// client's request for the UpdateConfigurationTemplate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateConfigurationTemplate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateConfigurationTemplateRequest method. +// req, resp := client.UpdateConfigurationTemplateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) UpdateConfigurationTemplateRequest(input *UpdateConfigurationTemplateInput) (req *request.Request, output *ConfigurationSettingsDescription) { op := &request.Operation{ Name: opUpdateConfigurationTemplate, @@ -1063,7 +1794,28 @@ func (c *ElasticBeanstalk) UpdateConfigurationTemplate(input *UpdateConfiguratio const opUpdateEnvironment = "UpdateEnvironment" -// UpdateEnvironmentRequest generates a request for the UpdateEnvironment operation. +// UpdateEnvironmentRequest generates a "aws/request.Request" representing the +// client's request for the UpdateEnvironment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateEnvironment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateEnvironmentRequest method. +// req, resp := client.UpdateEnvironmentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) UpdateEnvironmentRequest(input *UpdateEnvironmentInput) (req *request.Request, output *EnvironmentDescription) { op := &request.Operation{ Name: opUpdateEnvironment, @@ -1100,7 +1852,28 @@ func (c *ElasticBeanstalk) UpdateEnvironment(input *UpdateEnvironmentInput) (*En const opValidateConfigurationSettings = "ValidateConfigurationSettings" -// ValidateConfigurationSettingsRequest generates a request for the ValidateConfigurationSettings operation. +// ValidateConfigurationSettingsRequest generates a "aws/request.Request" representing the +// client's request for the ValidateConfigurationSettings operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ValidateConfigurationSettings method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ValidateConfigurationSettingsRequest method. +// req, resp := client.ValidateConfigurationSettingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticBeanstalk) ValidateConfigurationSettingsRequest(input *ValidateConfigurationSettingsInput) (req *request.Request, output *ValidateConfigurationSettingsOutput) { op := &request.Operation{ Name: opValidateConfigurationSettings, diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/service.go b/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/service.go index 5e6d4236d..795a6fef1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticbeanstalk/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/query" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // AWS Elastic Beanstalk makes it easy for you to create, deploy, and manage @@ -74,7 +74,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(query.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/api.go b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/api.go index 756a84d6f..df36f509f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/api.go @@ -15,7 +15,28 @@ import ( const opAddTags = "AddTags" -// AddTagsRequest generates a request for the AddTags operation. +// AddTagsRequest generates a "aws/request.Request" representing the +// client's request for the AddTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddTagsRequest method. +// req, resp := client.AddTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticsearchService) AddTagsRequest(input *AddTagsInput) (req *request.Request, output *AddTagsOutput) { op := &request.Operation{ Name: opAddTags, @@ -47,7 +68,28 @@ func (c *ElasticsearchService) AddTags(input *AddTagsInput) (*AddTagsOutput, err const opCreateElasticsearchDomain = "CreateElasticsearchDomain" -// CreateElasticsearchDomainRequest generates a request for the CreateElasticsearchDomain operation. +// CreateElasticsearchDomainRequest generates a "aws/request.Request" representing the +// client's request for the CreateElasticsearchDomain operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateElasticsearchDomain method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateElasticsearchDomainRequest method. +// req, resp := client.CreateElasticsearchDomainRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticsearchService) CreateElasticsearchDomainRequest(input *CreateElasticsearchDomainInput) (req *request.Request, output *CreateElasticsearchDomainOutput) { op := &request.Operation{ Name: opCreateElasticsearchDomain, @@ -76,7 +118,28 @@ func (c *ElasticsearchService) CreateElasticsearchDomain(input *CreateElasticsea const opDeleteElasticsearchDomain = "DeleteElasticsearchDomain" -// DeleteElasticsearchDomainRequest generates a request for the DeleteElasticsearchDomain operation. +// DeleteElasticsearchDomainRequest generates a "aws/request.Request" representing the +// client's request for the DeleteElasticsearchDomain operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteElasticsearchDomain method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteElasticsearchDomainRequest method. +// req, resp := client.DeleteElasticsearchDomainRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticsearchService) DeleteElasticsearchDomainRequest(input *DeleteElasticsearchDomainInput) (req *request.Request, output *DeleteElasticsearchDomainOutput) { op := &request.Operation{ Name: opDeleteElasticsearchDomain, @@ -104,7 +167,28 @@ func (c *ElasticsearchService) DeleteElasticsearchDomain(input *DeleteElasticsea const opDescribeElasticsearchDomain = "DescribeElasticsearchDomain" -// DescribeElasticsearchDomainRequest generates a request for the DescribeElasticsearchDomain operation. +// DescribeElasticsearchDomainRequest generates a "aws/request.Request" representing the +// client's request for the DescribeElasticsearchDomain operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeElasticsearchDomain method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeElasticsearchDomainRequest method. +// req, resp := client.DescribeElasticsearchDomainRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticsearchService) DescribeElasticsearchDomainRequest(input *DescribeElasticsearchDomainInput) (req *request.Request, output *DescribeElasticsearchDomainOutput) { op := &request.Operation{ Name: opDescribeElasticsearchDomain, @@ -132,7 +216,28 @@ func (c *ElasticsearchService) DescribeElasticsearchDomain(input *DescribeElasti const opDescribeElasticsearchDomainConfig = "DescribeElasticsearchDomainConfig" -// DescribeElasticsearchDomainConfigRequest generates a request for the DescribeElasticsearchDomainConfig operation. +// DescribeElasticsearchDomainConfigRequest generates a "aws/request.Request" representing the +// client's request for the DescribeElasticsearchDomainConfig operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeElasticsearchDomainConfig method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeElasticsearchDomainConfigRequest method. +// req, resp := client.DescribeElasticsearchDomainConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticsearchService) DescribeElasticsearchDomainConfigRequest(input *DescribeElasticsearchDomainConfigInput) (req *request.Request, output *DescribeElasticsearchDomainConfigOutput) { op := &request.Operation{ Name: opDescribeElasticsearchDomainConfig, @@ -161,7 +266,28 @@ func (c *ElasticsearchService) DescribeElasticsearchDomainConfig(input *Describe const opDescribeElasticsearchDomains = "DescribeElasticsearchDomains" -// DescribeElasticsearchDomainsRequest generates a request for the DescribeElasticsearchDomains operation. +// DescribeElasticsearchDomainsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeElasticsearchDomains operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeElasticsearchDomains method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeElasticsearchDomainsRequest method. +// req, resp := client.DescribeElasticsearchDomainsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticsearchService) DescribeElasticsearchDomainsRequest(input *DescribeElasticsearchDomainsInput) (req *request.Request, output *DescribeElasticsearchDomainsOutput) { op := &request.Operation{ Name: opDescribeElasticsearchDomains, @@ -189,7 +315,28 @@ func (c *ElasticsearchService) DescribeElasticsearchDomains(input *DescribeElast const opListDomainNames = "ListDomainNames" -// ListDomainNamesRequest generates a request for the ListDomainNames operation. +// ListDomainNamesRequest generates a "aws/request.Request" representing the +// client's request for the ListDomainNames operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListDomainNames method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListDomainNamesRequest method. +// req, resp := client.ListDomainNamesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticsearchService) ListDomainNamesRequest(input *ListDomainNamesInput) (req *request.Request, output *ListDomainNamesOutput) { op := &request.Operation{ Name: opListDomainNames, @@ -217,7 +364,28 @@ func (c *ElasticsearchService) ListDomainNames(input *ListDomainNamesInput) (*Li const opListTags = "ListTags" -// ListTagsRequest generates a request for the ListTags operation. +// ListTagsRequest generates a "aws/request.Request" representing the +// client's request for the ListTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTagsRequest method. +// req, resp := client.ListTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticsearchService) ListTagsRequest(input *ListTagsInput) (req *request.Request, output *ListTagsOutput) { op := &request.Operation{ Name: opListTags, @@ -244,7 +412,28 @@ func (c *ElasticsearchService) ListTags(input *ListTagsInput) (*ListTagsOutput, const opRemoveTags = "RemoveTags" -// RemoveTagsRequest generates a request for the RemoveTags operation. +// RemoveTagsRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveTagsRequest method. +// req, resp := client.RemoveTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticsearchService) RemoveTagsRequest(input *RemoveTagsInput) (req *request.Request, output *RemoveTagsOutput) { op := &request.Operation{ Name: opRemoveTags, @@ -273,7 +462,28 @@ func (c *ElasticsearchService) RemoveTags(input *RemoveTagsInput) (*RemoveTagsOu const opUpdateElasticsearchDomainConfig = "UpdateElasticsearchDomainConfig" -// UpdateElasticsearchDomainConfigRequest generates a request for the UpdateElasticsearchDomainConfig operation. +// UpdateElasticsearchDomainConfigRequest generates a "aws/request.Request" representing the +// client's request for the UpdateElasticsearchDomainConfig operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateElasticsearchDomainConfig method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateElasticsearchDomainConfigRequest method. +// req, resp := client.UpdateElasticsearchDomainConfigRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticsearchService) UpdateElasticsearchDomainConfigRequest(input *UpdateElasticsearchDomainConfigInput) (req *request.Request, output *UpdateElasticsearchDomainConfigOutput) { op := &request.Operation{ Name: opUpdateElasticsearchDomainConfig, diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go index 1617a90df..60f3971a9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/restjson" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // Use the Amazon Elasticsearch configuration API to create, configure, and @@ -64,7 +64,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/api.go b/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/api.go index 1c03defda..af931b4f1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/api.go @@ -12,7 +12,28 @@ import ( const opCancelJob = "CancelJob" -// CancelJobRequest generates a request for the CancelJob operation. +// CancelJobRequest generates a "aws/request.Request" representing the +// client's request for the CancelJob operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CancelJob method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CancelJobRequest method. +// req, resp := client.CancelJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticTranscoder) CancelJobRequest(input *CancelJobInput) (req *request.Request, output *CancelJobOutput) { op := &request.Operation{ Name: opCancelJob, @@ -43,7 +64,28 @@ func (c *ElasticTranscoder) CancelJob(input *CancelJobInput) (*CancelJobOutput, const opCreateJob = "CreateJob" -// CreateJobRequest generates a request for the CreateJob operation. +// CreateJobRequest generates a "aws/request.Request" representing the +// client's request for the CreateJob operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateJob method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateJobRequest method. +// req, resp := client.CreateJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticTranscoder) CreateJobRequest(input *CreateJobInput) (req *request.Request, output *CreateJobResponse) { op := &request.Operation{ Name: opCreateJob, @@ -76,7 +118,28 @@ func (c *ElasticTranscoder) CreateJob(input *CreateJobInput) (*CreateJobResponse const opCreatePipeline = "CreatePipeline" -// CreatePipelineRequest generates a request for the CreatePipeline operation. +// CreatePipelineRequest generates a "aws/request.Request" representing the +// client's request for the CreatePipeline operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreatePipeline method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreatePipelineRequest method. +// req, resp := client.CreatePipelineRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticTranscoder) CreatePipelineRequest(input *CreatePipelineInput) (req *request.Request, output *CreatePipelineOutput) { op := &request.Operation{ Name: opCreatePipeline, @@ -103,7 +166,28 @@ func (c *ElasticTranscoder) CreatePipeline(input *CreatePipelineInput) (*CreateP const opCreatePreset = "CreatePreset" -// CreatePresetRequest generates a request for the CreatePreset operation. +// CreatePresetRequest generates a "aws/request.Request" representing the +// client's request for the CreatePreset operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreatePreset method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreatePresetRequest method. +// req, resp := client.CreatePresetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticTranscoder) CreatePresetRequest(input *CreatePresetInput) (req *request.Request, output *CreatePresetOutput) { op := &request.Operation{ Name: opCreatePreset, @@ -144,7 +228,28 @@ func (c *ElasticTranscoder) CreatePreset(input *CreatePresetInput) (*CreatePrese const opDeletePipeline = "DeletePipeline" -// DeletePipelineRequest generates a request for the DeletePipeline operation. +// DeletePipelineRequest generates a "aws/request.Request" representing the +// client's request for the DeletePipeline operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeletePipeline method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeletePipelineRequest method. +// req, resp := client.DeletePipelineRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticTranscoder) DeletePipelineRequest(input *DeletePipelineInput) (req *request.Request, output *DeletePipelineOutput) { op := &request.Operation{ Name: opDeletePipeline, @@ -175,7 +280,28 @@ func (c *ElasticTranscoder) DeletePipeline(input *DeletePipelineInput) (*DeleteP const opDeletePreset = "DeletePreset" -// DeletePresetRequest generates a request for the DeletePreset operation. +// DeletePresetRequest generates a "aws/request.Request" representing the +// client's request for the DeletePreset operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeletePreset method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeletePresetRequest method. +// req, resp := client.DeletePresetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticTranscoder) DeletePresetRequest(input *DeletePresetInput) (req *request.Request, output *DeletePresetOutput) { op := &request.Operation{ Name: opDeletePreset, @@ -204,7 +330,28 @@ func (c *ElasticTranscoder) DeletePreset(input *DeletePresetInput) (*DeletePrese const opListJobsByPipeline = "ListJobsByPipeline" -// ListJobsByPipelineRequest generates a request for the ListJobsByPipeline operation. +// ListJobsByPipelineRequest generates a "aws/request.Request" representing the +// client's request for the ListJobsByPipeline operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListJobsByPipeline method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListJobsByPipelineRequest method. +// req, resp := client.ListJobsByPipelineRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticTranscoder) ListJobsByPipelineRequest(input *ListJobsByPipelineInput) (req *request.Request, output *ListJobsByPipelineOutput) { op := &request.Operation{ Name: opListJobsByPipeline, @@ -239,6 +386,23 @@ func (c *ElasticTranscoder) ListJobsByPipeline(input *ListJobsByPipelineInput) ( return out, err } +// ListJobsByPipelinePages iterates over the pages of a ListJobsByPipeline operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListJobsByPipeline method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListJobsByPipeline operation. +// pageNum := 0 +// err := client.ListJobsByPipelinePages(params, +// func(page *ListJobsByPipelineOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *ElasticTranscoder) ListJobsByPipelinePages(input *ListJobsByPipelineInput, fn func(p *ListJobsByPipelineOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListJobsByPipelineRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -249,7 +413,28 @@ func (c *ElasticTranscoder) ListJobsByPipelinePages(input *ListJobsByPipelineInp const opListJobsByStatus = "ListJobsByStatus" -// ListJobsByStatusRequest generates a request for the ListJobsByStatus operation. +// ListJobsByStatusRequest generates a "aws/request.Request" representing the +// client's request for the ListJobsByStatus operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListJobsByStatus method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListJobsByStatusRequest method. +// req, resp := client.ListJobsByStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticTranscoder) ListJobsByStatusRequest(input *ListJobsByStatusInput) (req *request.Request, output *ListJobsByStatusOutput) { op := &request.Operation{ Name: opListJobsByStatus, @@ -282,6 +467,23 @@ func (c *ElasticTranscoder) ListJobsByStatus(input *ListJobsByStatusInput) (*Lis return out, err } +// ListJobsByStatusPages iterates over the pages of a ListJobsByStatus operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListJobsByStatus method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListJobsByStatus operation. +// pageNum := 0 +// err := client.ListJobsByStatusPages(params, +// func(page *ListJobsByStatusOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *ElasticTranscoder) ListJobsByStatusPages(input *ListJobsByStatusInput, fn func(p *ListJobsByStatusOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListJobsByStatusRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -292,7 +494,28 @@ func (c *ElasticTranscoder) ListJobsByStatusPages(input *ListJobsByStatusInput, const opListPipelines = "ListPipelines" -// ListPipelinesRequest generates a request for the ListPipelines operation. +// ListPipelinesRequest generates a "aws/request.Request" representing the +// client's request for the ListPipelines operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListPipelines method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListPipelinesRequest method. +// req, resp := client.ListPipelinesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticTranscoder) ListPipelinesRequest(input *ListPipelinesInput) (req *request.Request, output *ListPipelinesOutput) { op := &request.Operation{ Name: opListPipelines, @@ -324,6 +547,23 @@ func (c *ElasticTranscoder) ListPipelines(input *ListPipelinesInput) (*ListPipel return out, err } +// ListPipelinesPages iterates over the pages of a ListPipelines operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListPipelines method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListPipelines operation. +// pageNum := 0 +// err := client.ListPipelinesPages(params, +// func(page *ListPipelinesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *ElasticTranscoder) ListPipelinesPages(input *ListPipelinesInput, fn func(p *ListPipelinesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListPipelinesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -334,7 +574,28 @@ func (c *ElasticTranscoder) ListPipelinesPages(input *ListPipelinesInput, fn fun const opListPresets = "ListPresets" -// ListPresetsRequest generates a request for the ListPresets operation. +// ListPresetsRequest generates a "aws/request.Request" representing the +// client's request for the ListPresets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListPresets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListPresetsRequest method. +// req, resp := client.ListPresetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticTranscoder) ListPresetsRequest(input *ListPresetsInput) (req *request.Request, output *ListPresetsOutput) { op := &request.Operation{ Name: opListPresets, @@ -366,6 +627,23 @@ func (c *ElasticTranscoder) ListPresets(input *ListPresetsInput) (*ListPresetsOu return out, err } +// ListPresetsPages iterates over the pages of a ListPresets operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListPresets method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListPresets operation. +// pageNum := 0 +// err := client.ListPresetsPages(params, +// func(page *ListPresetsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *ElasticTranscoder) ListPresetsPages(input *ListPresetsInput, fn func(p *ListPresetsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListPresetsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -376,7 +654,28 @@ func (c *ElasticTranscoder) ListPresetsPages(input *ListPresetsInput, fn func(p const opReadJob = "ReadJob" -// ReadJobRequest generates a request for the ReadJob operation. +// ReadJobRequest generates a "aws/request.Request" representing the +// client's request for the ReadJob operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ReadJob method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ReadJobRequest method. +// req, resp := client.ReadJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticTranscoder) ReadJobRequest(input *ReadJobInput) (req *request.Request, output *ReadJobOutput) { op := &request.Operation{ Name: opReadJob, @@ -403,7 +702,28 @@ func (c *ElasticTranscoder) ReadJob(input *ReadJobInput) (*ReadJobOutput, error) const opReadPipeline = "ReadPipeline" -// ReadPipelineRequest generates a request for the ReadPipeline operation. +// ReadPipelineRequest generates a "aws/request.Request" representing the +// client's request for the ReadPipeline operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ReadPipeline method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ReadPipelineRequest method. +// req, resp := client.ReadPipelineRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticTranscoder) ReadPipelineRequest(input *ReadPipelineInput) (req *request.Request, output *ReadPipelineOutput) { op := &request.Operation{ Name: opReadPipeline, @@ -430,7 +750,28 @@ func (c *ElasticTranscoder) ReadPipeline(input *ReadPipelineInput) (*ReadPipelin const opReadPreset = "ReadPreset" -// ReadPresetRequest generates a request for the ReadPreset operation. +// ReadPresetRequest generates a "aws/request.Request" representing the +// client's request for the ReadPreset operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ReadPreset method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ReadPresetRequest method. +// req, resp := client.ReadPresetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticTranscoder) ReadPresetRequest(input *ReadPresetInput) (req *request.Request, output *ReadPresetOutput) { op := &request.Operation{ Name: opReadPreset, @@ -457,7 +798,28 @@ func (c *ElasticTranscoder) ReadPreset(input *ReadPresetInput) (*ReadPresetOutpu const opTestRole = "TestRole" -// TestRoleRequest generates a request for the TestRole operation. +// TestRoleRequest generates a "aws/request.Request" representing the +// client's request for the TestRole operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the TestRole method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the TestRoleRequest method. +// req, resp := client.TestRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticTranscoder) TestRoleRequest(input *TestRoleInput) (req *request.Request, output *TestRoleOutput) { op := &request.Operation{ Name: opTestRole, @@ -490,7 +852,28 @@ func (c *ElasticTranscoder) TestRole(input *TestRoleInput) (*TestRoleOutput, err const opUpdatePipeline = "UpdatePipeline" -// UpdatePipelineRequest generates a request for the UpdatePipeline operation. +// UpdatePipelineRequest generates a "aws/request.Request" representing the +// client's request for the UpdatePipeline operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdatePipeline method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdatePipelineRequest method. +// req, resp := client.UpdatePipelineRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticTranscoder) UpdatePipelineRequest(input *UpdatePipelineInput) (req *request.Request, output *UpdatePipelineOutput) { op := &request.Operation{ Name: opUpdatePipeline, @@ -521,7 +904,28 @@ func (c *ElasticTranscoder) UpdatePipeline(input *UpdatePipelineInput) (*UpdateP const opUpdatePipelineNotifications = "UpdatePipelineNotifications" -// UpdatePipelineNotificationsRequest generates a request for the UpdatePipelineNotifications operation. +// UpdatePipelineNotificationsRequest generates a "aws/request.Request" representing the +// client's request for the UpdatePipelineNotifications operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdatePipelineNotifications method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdatePipelineNotificationsRequest method. +// req, resp := client.UpdatePipelineNotificationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticTranscoder) UpdatePipelineNotificationsRequest(input *UpdatePipelineNotificationsInput) (req *request.Request, output *UpdatePipelineNotificationsOutput) { op := &request.Operation{ Name: opUpdatePipelineNotifications, @@ -552,7 +956,28 @@ func (c *ElasticTranscoder) UpdatePipelineNotifications(input *UpdatePipelineNot const opUpdatePipelineStatus = "UpdatePipelineStatus" -// UpdatePipelineStatusRequest generates a request for the UpdatePipelineStatus operation. +// UpdatePipelineStatusRequest generates a "aws/request.Request" representing the +// client's request for the UpdatePipelineStatus operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdatePipelineStatus method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdatePipelineStatusRequest method. +// req, resp := client.UpdatePipelineStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ElasticTranscoder) UpdatePipelineStatusRequest(input *UpdatePipelineStatusInput) (req *request.Request, output *UpdatePipelineStatusOutput) { op := &request.Operation{ Name: opUpdatePipelineStatus, diff --git a/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/service.go b/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/service.go index 322bdcdc5..c23818a23 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/restjson" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // The AWS Elastic Transcoder Service. @@ -58,7 +58,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/elb/api.go b/vendor/github.com/aws/aws-sdk-go/service/elb/api.go index ed4293f27..46c912729 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elb/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elb/api.go @@ -13,7 +13,28 @@ import ( const opAddTags = "AddTags" -// AddTagsRequest generates a request for the AddTags operation. +// AddTagsRequest generates a "aws/request.Request" representing the +// client's request for the AddTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddTagsRequest method. +// req, resp := client.AddTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ELB) AddTagsRequest(input *AddTagsInput) (req *request.Request, output *AddTagsOutput) { op := &request.Operation{ Name: opAddTags, @@ -47,7 +68,28 @@ func (c *ELB) AddTags(input *AddTagsInput) (*AddTagsOutput, error) { const opApplySecurityGroupsToLoadBalancer = "ApplySecurityGroupsToLoadBalancer" -// ApplySecurityGroupsToLoadBalancerRequest generates a request for the ApplySecurityGroupsToLoadBalancer operation. +// ApplySecurityGroupsToLoadBalancerRequest generates a "aws/request.Request" representing the +// client's request for the ApplySecurityGroupsToLoadBalancer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ApplySecurityGroupsToLoadBalancer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ApplySecurityGroupsToLoadBalancerRequest method. +// req, resp := client.ApplySecurityGroupsToLoadBalancerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ELB) ApplySecurityGroupsToLoadBalancerRequest(input *ApplySecurityGroupsToLoadBalancerInput) (req *request.Request, output *ApplySecurityGroupsToLoadBalancerOutput) { op := &request.Operation{ Name: opApplySecurityGroupsToLoadBalancer, @@ -79,7 +121,28 @@ func (c *ELB) ApplySecurityGroupsToLoadBalancer(input *ApplySecurityGroupsToLoad const opAttachLoadBalancerToSubnets = "AttachLoadBalancerToSubnets" -// AttachLoadBalancerToSubnetsRequest generates a request for the AttachLoadBalancerToSubnets operation. +// AttachLoadBalancerToSubnetsRequest generates a "aws/request.Request" representing the +// client's request for the AttachLoadBalancerToSubnets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AttachLoadBalancerToSubnets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AttachLoadBalancerToSubnetsRequest method. +// req, resp := client.AttachLoadBalancerToSubnetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ELB) AttachLoadBalancerToSubnetsRequest(input *AttachLoadBalancerToSubnetsInput) (req *request.Request, output *AttachLoadBalancerToSubnetsOutput) { op := &request.Operation{ Name: opAttachLoadBalancerToSubnets, @@ -112,7 +175,28 @@ func (c *ELB) AttachLoadBalancerToSubnets(input *AttachLoadBalancerToSubnetsInpu const opConfigureHealthCheck = "ConfigureHealthCheck" -// ConfigureHealthCheckRequest generates a request for the ConfigureHealthCheck operation. +// ConfigureHealthCheckRequest generates a "aws/request.Request" representing the +// client's request for the ConfigureHealthCheck operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ConfigureHealthCheck method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ConfigureHealthCheckRequest method. +// req, resp := client.ConfigureHealthCheckRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ELB) ConfigureHealthCheckRequest(input *ConfigureHealthCheckInput) (req *request.Request, output *ConfigureHealthCheckOutput) { op := &request.Operation{ Name: opConfigureHealthCheck, @@ -143,7 +227,28 @@ func (c *ELB) ConfigureHealthCheck(input *ConfigureHealthCheckInput) (*Configure const opCreateAppCookieStickinessPolicy = "CreateAppCookieStickinessPolicy" -// CreateAppCookieStickinessPolicyRequest generates a request for the CreateAppCookieStickinessPolicy operation. +// CreateAppCookieStickinessPolicyRequest generates a "aws/request.Request" representing the +// client's request for the CreateAppCookieStickinessPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateAppCookieStickinessPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateAppCookieStickinessPolicyRequest method. +// req, resp := client.CreateAppCookieStickinessPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ELB) CreateAppCookieStickinessPolicyRequest(input *CreateAppCookieStickinessPolicyInput) (req *request.Request, output *CreateAppCookieStickinessPolicyOutput) { op := &request.Operation{ Name: opCreateAppCookieStickinessPolicy, @@ -184,7 +289,28 @@ func (c *ELB) CreateAppCookieStickinessPolicy(input *CreateAppCookieStickinessPo const opCreateLBCookieStickinessPolicy = "CreateLBCookieStickinessPolicy" -// CreateLBCookieStickinessPolicyRequest generates a request for the CreateLBCookieStickinessPolicy operation. +// CreateLBCookieStickinessPolicyRequest generates a "aws/request.Request" representing the +// client's request for the CreateLBCookieStickinessPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateLBCookieStickinessPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateLBCookieStickinessPolicyRequest method. +// req, resp := client.CreateLBCookieStickinessPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ELB) CreateLBCookieStickinessPolicyRequest(input *CreateLBCookieStickinessPolicyInput) (req *request.Request, output *CreateLBCookieStickinessPolicyOutput) { op := &request.Operation{ Name: opCreateLBCookieStickinessPolicy, @@ -227,7 +353,28 @@ func (c *ELB) CreateLBCookieStickinessPolicy(input *CreateLBCookieStickinessPoli const opCreateLoadBalancer = "CreateLoadBalancer" -// CreateLoadBalancerRequest generates a request for the CreateLoadBalancer operation. +// CreateLoadBalancerRequest generates a "aws/request.Request" representing the +// client's request for the CreateLoadBalancer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateLoadBalancer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateLoadBalancerRequest method. +// req, resp := client.CreateLoadBalancerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ELB) CreateLoadBalancerRequest(input *CreateLoadBalancerInput) (req *request.Request, output *CreateLoadBalancerOutput) { op := &request.Operation{ Name: opCreateLoadBalancer, @@ -265,7 +412,28 @@ func (c *ELB) CreateLoadBalancer(input *CreateLoadBalancerInput) (*CreateLoadBal const opCreateLoadBalancerListeners = "CreateLoadBalancerListeners" -// CreateLoadBalancerListenersRequest generates a request for the CreateLoadBalancerListeners operation. +// CreateLoadBalancerListenersRequest generates a "aws/request.Request" representing the +// client's request for the CreateLoadBalancerListeners operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateLoadBalancerListeners method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateLoadBalancerListenersRequest method. +// req, resp := client.CreateLoadBalancerListenersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ELB) CreateLoadBalancerListenersRequest(input *CreateLoadBalancerListenersInput) (req *request.Request, output *CreateLoadBalancerListenersOutput) { op := &request.Operation{ Name: opCreateLoadBalancerListeners, @@ -298,7 +466,28 @@ func (c *ELB) CreateLoadBalancerListeners(input *CreateLoadBalancerListenersInpu const opCreateLoadBalancerPolicy = "CreateLoadBalancerPolicy" -// CreateLoadBalancerPolicyRequest generates a request for the CreateLoadBalancerPolicy operation. +// CreateLoadBalancerPolicyRequest generates a "aws/request.Request" representing the +// client's request for the CreateLoadBalancerPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateLoadBalancerPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateLoadBalancerPolicyRequest method. +// req, resp := client.CreateLoadBalancerPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ELB) CreateLoadBalancerPolicyRequest(input *CreateLoadBalancerPolicyInput) (req *request.Request, output *CreateLoadBalancerPolicyOutput) { op := &request.Operation{ Name: opCreateLoadBalancerPolicy, @@ -329,7 +518,28 @@ func (c *ELB) CreateLoadBalancerPolicy(input *CreateLoadBalancerPolicyInput) (*C const opDeleteLoadBalancer = "DeleteLoadBalancer" -// DeleteLoadBalancerRequest generates a request for the DeleteLoadBalancer operation. +// DeleteLoadBalancerRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLoadBalancer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteLoadBalancer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteLoadBalancerRequest method. +// req, resp := client.DeleteLoadBalancerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ELB) DeleteLoadBalancerRequest(input *DeleteLoadBalancerInput) (req *request.Request, output *DeleteLoadBalancerOutput) { op := &request.Operation{ Name: opDeleteLoadBalancer, @@ -365,7 +575,28 @@ func (c *ELB) DeleteLoadBalancer(input *DeleteLoadBalancerInput) (*DeleteLoadBal const opDeleteLoadBalancerListeners = "DeleteLoadBalancerListeners" -// DeleteLoadBalancerListenersRequest generates a request for the DeleteLoadBalancerListeners operation. +// DeleteLoadBalancerListenersRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLoadBalancerListeners operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteLoadBalancerListeners method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteLoadBalancerListenersRequest method. +// req, resp := client.DeleteLoadBalancerListenersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ELB) DeleteLoadBalancerListenersRequest(input *DeleteLoadBalancerListenersInput) (req *request.Request, output *DeleteLoadBalancerListenersOutput) { op := &request.Operation{ Name: opDeleteLoadBalancerListeners, @@ -392,7 +623,28 @@ func (c *ELB) DeleteLoadBalancerListeners(input *DeleteLoadBalancerListenersInpu const opDeleteLoadBalancerPolicy = "DeleteLoadBalancerPolicy" -// DeleteLoadBalancerPolicyRequest generates a request for the DeleteLoadBalancerPolicy operation. +// DeleteLoadBalancerPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLoadBalancerPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteLoadBalancerPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteLoadBalancerPolicyRequest method. +// req, resp := client.DeleteLoadBalancerPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ELB) DeleteLoadBalancerPolicyRequest(input *DeleteLoadBalancerPolicyInput) (req *request.Request, output *DeleteLoadBalancerPolicyOutput) { op := &request.Operation{ Name: opDeleteLoadBalancerPolicy, @@ -420,7 +672,28 @@ func (c *ELB) DeleteLoadBalancerPolicy(input *DeleteLoadBalancerPolicyInput) (*D const opDeregisterInstancesFromLoadBalancer = "DeregisterInstancesFromLoadBalancer" -// DeregisterInstancesFromLoadBalancerRequest generates a request for the DeregisterInstancesFromLoadBalancer operation. +// DeregisterInstancesFromLoadBalancerRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterInstancesFromLoadBalancer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeregisterInstancesFromLoadBalancer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeregisterInstancesFromLoadBalancerRequest method. +// req, resp := client.DeregisterInstancesFromLoadBalancerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ELB) DeregisterInstancesFromLoadBalancerRequest(input *DeregisterInstancesFromLoadBalancerInput) (req *request.Request, output *DeregisterInstancesFromLoadBalancerOutput) { op := &request.Operation{ Name: opDeregisterInstancesFromLoadBalancer, @@ -455,7 +728,28 @@ func (c *ELB) DeregisterInstancesFromLoadBalancer(input *DeregisterInstancesFrom const opDescribeInstanceHealth = "DescribeInstanceHealth" -// DescribeInstanceHealthRequest generates a request for the DescribeInstanceHealth operation. +// DescribeInstanceHealthRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstanceHealth operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeInstanceHealth method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeInstanceHealthRequest method. +// req, resp := client.DescribeInstanceHealthRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ELB) DescribeInstanceHealthRequest(input *DescribeInstanceHealthInput) (req *request.Request, output *DescribeInstanceHealthOutput) { op := &request.Operation{ Name: opDescribeInstanceHealth, @@ -487,7 +781,28 @@ func (c *ELB) DescribeInstanceHealth(input *DescribeInstanceHealthInput) (*Descr const opDescribeLoadBalancerAttributes = "DescribeLoadBalancerAttributes" -// DescribeLoadBalancerAttributesRequest generates a request for the DescribeLoadBalancerAttributes operation. +// DescribeLoadBalancerAttributesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLoadBalancerAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLoadBalancerAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLoadBalancerAttributesRequest method. +// req, resp := client.DescribeLoadBalancerAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ELB) DescribeLoadBalancerAttributesRequest(input *DescribeLoadBalancerAttributesInput) (req *request.Request, output *DescribeLoadBalancerAttributesOutput) { op := &request.Operation{ Name: opDescribeLoadBalancerAttributes, @@ -514,7 +829,28 @@ func (c *ELB) DescribeLoadBalancerAttributes(input *DescribeLoadBalancerAttribut const opDescribeLoadBalancerPolicies = "DescribeLoadBalancerPolicies" -// DescribeLoadBalancerPoliciesRequest generates a request for the DescribeLoadBalancerPolicies operation. +// DescribeLoadBalancerPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLoadBalancerPolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLoadBalancerPolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLoadBalancerPoliciesRequest method. +// req, resp := client.DescribeLoadBalancerPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ELB) DescribeLoadBalancerPoliciesRequest(input *DescribeLoadBalancerPoliciesInput) (req *request.Request, output *DescribeLoadBalancerPoliciesOutput) { op := &request.Operation{ Name: opDescribeLoadBalancerPolicies, @@ -548,7 +884,28 @@ func (c *ELB) DescribeLoadBalancerPolicies(input *DescribeLoadBalancerPoliciesIn const opDescribeLoadBalancerPolicyTypes = "DescribeLoadBalancerPolicyTypes" -// DescribeLoadBalancerPolicyTypesRequest generates a request for the DescribeLoadBalancerPolicyTypes operation. +// DescribeLoadBalancerPolicyTypesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLoadBalancerPolicyTypes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLoadBalancerPolicyTypes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLoadBalancerPolicyTypesRequest method. +// req, resp := client.DescribeLoadBalancerPolicyTypesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ELB) DescribeLoadBalancerPolicyTypesRequest(input *DescribeLoadBalancerPolicyTypesInput) (req *request.Request, output *DescribeLoadBalancerPolicyTypesOutput) { op := &request.Operation{ Name: opDescribeLoadBalancerPolicyTypes, @@ -578,7 +935,28 @@ func (c *ELB) DescribeLoadBalancerPolicyTypes(input *DescribeLoadBalancerPolicyT const opDescribeLoadBalancers = "DescribeLoadBalancers" -// DescribeLoadBalancersRequest generates a request for the DescribeLoadBalancers operation. +// DescribeLoadBalancersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLoadBalancers operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLoadBalancers method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLoadBalancersRequest method. +// req, resp := client.DescribeLoadBalancersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ELB) DescribeLoadBalancersRequest(input *DescribeLoadBalancersInput) (req *request.Request, output *DescribeLoadBalancersOutput) { op := &request.Operation{ Name: opDescribeLoadBalancers, @@ -610,6 +988,23 @@ func (c *ELB) DescribeLoadBalancers(input *DescribeLoadBalancersInput) (*Describ return out, err } +// DescribeLoadBalancersPages iterates over the pages of a DescribeLoadBalancers operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeLoadBalancers method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeLoadBalancers operation. +// pageNum := 0 +// err := client.DescribeLoadBalancersPages(params, +// func(page *DescribeLoadBalancersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *ELB) DescribeLoadBalancersPages(input *DescribeLoadBalancersInput, fn func(p *DescribeLoadBalancersOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeLoadBalancersRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -620,7 +1015,28 @@ func (c *ELB) DescribeLoadBalancersPages(input *DescribeLoadBalancersInput, fn f const opDescribeTags = "DescribeTags" -// DescribeTagsRequest generates a request for the DescribeTags operation. +// DescribeTagsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTagsRequest method. +// req, resp := client.DescribeTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ELB) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Request, output *DescribeTagsOutput) { op := &request.Operation{ Name: opDescribeTags, @@ -647,7 +1063,28 @@ func (c *ELB) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, error const opDetachLoadBalancerFromSubnets = "DetachLoadBalancerFromSubnets" -// DetachLoadBalancerFromSubnetsRequest generates a request for the DetachLoadBalancerFromSubnets operation. +// DetachLoadBalancerFromSubnetsRequest generates a "aws/request.Request" representing the +// client's request for the DetachLoadBalancerFromSubnets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DetachLoadBalancerFromSubnets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DetachLoadBalancerFromSubnetsRequest method. +// req, resp := client.DetachLoadBalancerFromSubnetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ELB) DetachLoadBalancerFromSubnetsRequest(input *DetachLoadBalancerFromSubnetsInput) (req *request.Request, output *DetachLoadBalancerFromSubnetsOutput) { op := &request.Operation{ Name: opDetachLoadBalancerFromSubnets, @@ -679,7 +1116,28 @@ func (c *ELB) DetachLoadBalancerFromSubnets(input *DetachLoadBalancerFromSubnets const opDisableAvailabilityZonesForLoadBalancer = "DisableAvailabilityZonesForLoadBalancer" -// DisableAvailabilityZonesForLoadBalancerRequest generates a request for the DisableAvailabilityZonesForLoadBalancer operation. +// DisableAvailabilityZonesForLoadBalancerRequest generates a "aws/request.Request" representing the +// client's request for the DisableAvailabilityZonesForLoadBalancer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableAvailabilityZonesForLoadBalancer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableAvailabilityZonesForLoadBalancerRequest method. +// req, resp := client.DisableAvailabilityZonesForLoadBalancerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ELB) DisableAvailabilityZonesForLoadBalancerRequest(input *DisableAvailabilityZonesForLoadBalancerInput) (req *request.Request, output *DisableAvailabilityZonesForLoadBalancerOutput) { op := &request.Operation{ Name: opDisableAvailabilityZonesForLoadBalancer, @@ -717,7 +1175,28 @@ func (c *ELB) DisableAvailabilityZonesForLoadBalancer(input *DisableAvailability const opEnableAvailabilityZonesForLoadBalancer = "EnableAvailabilityZonesForLoadBalancer" -// EnableAvailabilityZonesForLoadBalancerRequest generates a request for the EnableAvailabilityZonesForLoadBalancer operation. +// EnableAvailabilityZonesForLoadBalancerRequest generates a "aws/request.Request" representing the +// client's request for the EnableAvailabilityZonesForLoadBalancer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableAvailabilityZonesForLoadBalancer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableAvailabilityZonesForLoadBalancerRequest method. +// req, resp := client.EnableAvailabilityZonesForLoadBalancerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ELB) EnableAvailabilityZonesForLoadBalancerRequest(input *EnableAvailabilityZonesForLoadBalancerInput) (req *request.Request, output *EnableAvailabilityZonesForLoadBalancerOutput) { op := &request.Operation{ Name: opEnableAvailabilityZonesForLoadBalancer, @@ -751,7 +1230,28 @@ func (c *ELB) EnableAvailabilityZonesForLoadBalancer(input *EnableAvailabilityZo const opModifyLoadBalancerAttributes = "ModifyLoadBalancerAttributes" -// ModifyLoadBalancerAttributesRequest generates a request for the ModifyLoadBalancerAttributes operation. +// ModifyLoadBalancerAttributesRequest generates a "aws/request.Request" representing the +// client's request for the ModifyLoadBalancerAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyLoadBalancerAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyLoadBalancerAttributesRequest method. +// req, resp := client.ModifyLoadBalancerAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ELB) ModifyLoadBalancerAttributesRequest(input *ModifyLoadBalancerAttributesInput) (req *request.Request, output *ModifyLoadBalancerAttributesOutput) { op := &request.Operation{ Name: opModifyLoadBalancerAttributes, @@ -791,7 +1291,28 @@ func (c *ELB) ModifyLoadBalancerAttributes(input *ModifyLoadBalancerAttributesIn const opRegisterInstancesWithLoadBalancer = "RegisterInstancesWithLoadBalancer" -// RegisterInstancesWithLoadBalancerRequest generates a request for the RegisterInstancesWithLoadBalancer operation. +// RegisterInstancesWithLoadBalancerRequest generates a "aws/request.Request" representing the +// client's request for the RegisterInstancesWithLoadBalancer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterInstancesWithLoadBalancer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterInstancesWithLoadBalancerRequest method. +// req, resp := client.RegisterInstancesWithLoadBalancerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ELB) RegisterInstancesWithLoadBalancerRequest(input *RegisterInstancesWithLoadBalancerInput) (req *request.Request, output *RegisterInstancesWithLoadBalancerOutput) { op := &request.Operation{ Name: opRegisterInstancesWithLoadBalancer, @@ -845,7 +1366,28 @@ func (c *ELB) RegisterInstancesWithLoadBalancer(input *RegisterInstancesWithLoad const opRemoveTags = "RemoveTags" -// RemoveTagsRequest generates a request for the RemoveTags operation. +// RemoveTagsRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveTagsRequest method. +// req, resp := client.RemoveTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ELB) RemoveTagsRequest(input *RemoveTagsInput) (req *request.Request, output *RemoveTagsOutput) { op := &request.Operation{ Name: opRemoveTags, @@ -872,7 +1414,28 @@ func (c *ELB) RemoveTags(input *RemoveTagsInput) (*RemoveTagsOutput, error) { const opSetLoadBalancerListenerSSLCertificate = "SetLoadBalancerListenerSSLCertificate" -// SetLoadBalancerListenerSSLCertificateRequest generates a request for the SetLoadBalancerListenerSSLCertificate operation. +// SetLoadBalancerListenerSSLCertificateRequest generates a "aws/request.Request" representing the +// client's request for the SetLoadBalancerListenerSSLCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetLoadBalancerListenerSSLCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetLoadBalancerListenerSSLCertificateRequest method. +// req, resp := client.SetLoadBalancerListenerSSLCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ELB) SetLoadBalancerListenerSSLCertificateRequest(input *SetLoadBalancerListenerSSLCertificateInput) (req *request.Request, output *SetLoadBalancerListenerSSLCertificateOutput) { op := &request.Operation{ Name: opSetLoadBalancerListenerSSLCertificate, @@ -905,7 +1468,28 @@ func (c *ELB) SetLoadBalancerListenerSSLCertificate(input *SetLoadBalancerListen const opSetLoadBalancerPoliciesForBackendServer = "SetLoadBalancerPoliciesForBackendServer" -// SetLoadBalancerPoliciesForBackendServerRequest generates a request for the SetLoadBalancerPoliciesForBackendServer operation. +// SetLoadBalancerPoliciesForBackendServerRequest generates a "aws/request.Request" representing the +// client's request for the SetLoadBalancerPoliciesForBackendServer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetLoadBalancerPoliciesForBackendServer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetLoadBalancerPoliciesForBackendServerRequest method. +// req, resp := client.SetLoadBalancerPoliciesForBackendServerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ELB) SetLoadBalancerPoliciesForBackendServerRequest(input *SetLoadBalancerPoliciesForBackendServerInput) (req *request.Request, output *SetLoadBalancerPoliciesForBackendServerOutput) { op := &request.Operation{ Name: opSetLoadBalancerPoliciesForBackendServer, @@ -942,7 +1526,28 @@ func (c *ELB) SetLoadBalancerPoliciesForBackendServer(input *SetLoadBalancerPoli const opSetLoadBalancerPoliciesOfListener = "SetLoadBalancerPoliciesOfListener" -// SetLoadBalancerPoliciesOfListenerRequest generates a request for the SetLoadBalancerPoliciesOfListener operation. +// SetLoadBalancerPoliciesOfListenerRequest generates a "aws/request.Request" representing the +// client's request for the SetLoadBalancerPoliciesOfListener operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetLoadBalancerPoliciesOfListener method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetLoadBalancerPoliciesOfListenerRequest method. +// req, resp := client.SetLoadBalancerPoliciesOfListenerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *ELB) SetLoadBalancerPoliciesOfListenerRequest(input *SetLoadBalancerPoliciesOfListenerInput) (req *request.Request, output *SetLoadBalancerPoliciesOfListenerOutput) { op := &request.Operation{ Name: opSetLoadBalancerPoliciesOfListener, diff --git a/vendor/github.com/aws/aws-sdk-go/service/elb/service.go b/vendor/github.com/aws/aws-sdk-go/service/elb/service.go index 867254809..3dc5c73fb 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elb/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elb/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/query" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // Elastic Load Balancing distributes incoming traffic across your EC2 instances. @@ -70,7 +70,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(query.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/emr/api.go b/vendor/github.com/aws/aws-sdk-go/service/emr/api.go index b1a5a7890..bacf653be 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/emr/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/emr/api.go @@ -4,6 +4,7 @@ package emr import ( + "fmt" "time" "github.com/aws/aws-sdk-go/aws/awsutil" @@ -14,7 +15,28 @@ import ( const opAddInstanceGroups = "AddInstanceGroups" -// AddInstanceGroupsRequest generates a request for the AddInstanceGroups operation. +// AddInstanceGroupsRequest generates a "aws/request.Request" representing the +// client's request for the AddInstanceGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddInstanceGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddInstanceGroupsRequest method. +// req, resp := client.AddInstanceGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EMR) AddInstanceGroupsRequest(input *AddInstanceGroupsInput) (req *request.Request, output *AddInstanceGroupsOutput) { op := &request.Operation{ Name: opAddInstanceGroups, @@ -41,7 +63,28 @@ func (c *EMR) AddInstanceGroups(input *AddInstanceGroupsInput) (*AddInstanceGrou const opAddJobFlowSteps = "AddJobFlowSteps" -// AddJobFlowStepsRequest generates a request for the AddJobFlowSteps operation. +// AddJobFlowStepsRequest generates a "aws/request.Request" representing the +// client's request for the AddJobFlowSteps operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddJobFlowSteps method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddJobFlowStepsRequest method. +// req, resp := client.AddJobFlowStepsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EMR) AddJobFlowStepsRequest(input *AddJobFlowStepsInput) (req *request.Request, output *AddJobFlowStepsOutput) { op := &request.Operation{ Name: opAddJobFlowSteps, @@ -91,7 +134,28 @@ func (c *EMR) AddJobFlowSteps(input *AddJobFlowStepsInput) (*AddJobFlowStepsOutp const opAddTags = "AddTags" -// AddTagsRequest generates a request for the AddTags operation. +// AddTagsRequest generates a "aws/request.Request" representing the +// client's request for the AddTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddTagsRequest method. +// req, resp := client.AddTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EMR) AddTagsRequest(input *AddTagsInput) (req *request.Request, output *AddTagsOutput) { op := &request.Operation{ Name: opAddTags, @@ -121,7 +185,28 @@ func (c *EMR) AddTags(input *AddTagsInput) (*AddTagsOutput, error) { const opDescribeCluster = "DescribeCluster" -// DescribeClusterRequest generates a request for the DescribeCluster operation. +// DescribeClusterRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeClusterRequest method. +// req, resp := client.DescribeClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EMR) DescribeClusterRequest(input *DescribeClusterInput) (req *request.Request, output *DescribeClusterOutput) { op := &request.Operation{ Name: opDescribeCluster, @@ -149,7 +234,28 @@ func (c *EMR) DescribeCluster(input *DescribeClusterInput) (*DescribeClusterOutp const opDescribeJobFlows = "DescribeJobFlows" -// DescribeJobFlowsRequest generates a request for the DescribeJobFlows operation. +// DescribeJobFlowsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeJobFlows operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeJobFlows method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeJobFlowsRequest method. +// req, resp := client.DescribeJobFlowsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EMR) DescribeJobFlowsRequest(input *DescribeJobFlowsInput) (req *request.Request, output *DescribeJobFlowsOutput) { if c.Client.Config.Logger != nil { c.Client.Config.Logger.Log("This operation, DescribeJobFlows, has been deprecated") @@ -196,7 +302,28 @@ func (c *EMR) DescribeJobFlows(input *DescribeJobFlowsInput) (*DescribeJobFlowsO const opDescribeStep = "DescribeStep" -// DescribeStepRequest generates a request for the DescribeStep operation. +// DescribeStepRequest generates a "aws/request.Request" representing the +// client's request for the DescribeStep operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeStep method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeStepRequest method. +// req, resp := client.DescribeStepRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EMR) DescribeStepRequest(input *DescribeStepInput) (req *request.Request, output *DescribeStepOutput) { op := &request.Operation{ Name: opDescribeStep, @@ -223,7 +350,28 @@ func (c *EMR) DescribeStep(input *DescribeStepInput) (*DescribeStepOutput, error const opListBootstrapActions = "ListBootstrapActions" -// ListBootstrapActionsRequest generates a request for the ListBootstrapActions operation. +// ListBootstrapActionsRequest generates a "aws/request.Request" representing the +// client's request for the ListBootstrapActions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListBootstrapActions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListBootstrapActionsRequest method. +// req, resp := client.ListBootstrapActionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EMR) ListBootstrapActionsRequest(input *ListBootstrapActionsInput) (req *request.Request, output *ListBootstrapActionsOutput) { op := &request.Operation{ Name: opListBootstrapActions, @@ -254,6 +402,23 @@ func (c *EMR) ListBootstrapActions(input *ListBootstrapActionsInput) (*ListBoots return out, err } +// ListBootstrapActionsPages iterates over the pages of a ListBootstrapActions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListBootstrapActions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListBootstrapActions operation. +// pageNum := 0 +// err := client.ListBootstrapActionsPages(params, +// func(page *ListBootstrapActionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *EMR) ListBootstrapActionsPages(input *ListBootstrapActionsInput, fn func(p *ListBootstrapActionsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListBootstrapActionsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -264,7 +429,28 @@ func (c *EMR) ListBootstrapActionsPages(input *ListBootstrapActionsInput, fn fun const opListClusters = "ListClusters" -// ListClustersRequest generates a request for the ListClusters operation. +// ListClustersRequest generates a "aws/request.Request" representing the +// client's request for the ListClusters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListClusters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListClustersRequest method. +// req, resp := client.ListClustersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EMR) ListClustersRequest(input *ListClustersInput) (req *request.Request, output *ListClustersOutput) { op := &request.Operation{ Name: opListClusters, @@ -299,6 +485,23 @@ func (c *EMR) ListClusters(input *ListClustersInput) (*ListClustersOutput, error return out, err } +// ListClustersPages iterates over the pages of a ListClusters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListClusters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListClusters operation. +// pageNum := 0 +// err := client.ListClustersPages(params, +// func(page *ListClustersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *EMR) ListClustersPages(input *ListClustersInput, fn func(p *ListClustersOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListClustersRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -309,7 +512,28 @@ func (c *EMR) ListClustersPages(input *ListClustersInput, fn func(p *ListCluster const opListInstanceGroups = "ListInstanceGroups" -// ListInstanceGroupsRequest generates a request for the ListInstanceGroups operation. +// ListInstanceGroupsRequest generates a "aws/request.Request" representing the +// client's request for the ListInstanceGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListInstanceGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListInstanceGroupsRequest method. +// req, resp := client.ListInstanceGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EMR) ListInstanceGroupsRequest(input *ListInstanceGroupsInput) (req *request.Request, output *ListInstanceGroupsOutput) { op := &request.Operation{ Name: opListInstanceGroups, @@ -340,6 +564,23 @@ func (c *EMR) ListInstanceGroups(input *ListInstanceGroupsInput) (*ListInstanceG return out, err } +// ListInstanceGroupsPages iterates over the pages of a ListInstanceGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListInstanceGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListInstanceGroups operation. +// pageNum := 0 +// err := client.ListInstanceGroupsPages(params, +// func(page *ListInstanceGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *EMR) ListInstanceGroupsPages(input *ListInstanceGroupsInput, fn func(p *ListInstanceGroupsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListInstanceGroupsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -350,7 +591,28 @@ func (c *EMR) ListInstanceGroupsPages(input *ListInstanceGroupsInput, fn func(p const opListInstances = "ListInstances" -// ListInstancesRequest generates a request for the ListInstances operation. +// ListInstancesRequest generates a "aws/request.Request" representing the +// client's request for the ListInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListInstancesRequest method. +// req, resp := client.ListInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EMR) ListInstancesRequest(input *ListInstancesInput) (req *request.Request, output *ListInstancesOutput) { op := &request.Operation{ Name: opListInstances, @@ -385,6 +647,23 @@ func (c *EMR) ListInstances(input *ListInstancesInput) (*ListInstancesOutput, er return out, err } +// ListInstancesPages iterates over the pages of a ListInstances operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListInstances method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListInstances operation. +// pageNum := 0 +// err := client.ListInstancesPages(params, +// func(page *ListInstancesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *EMR) ListInstancesPages(input *ListInstancesInput, fn func(p *ListInstancesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListInstancesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -395,7 +674,28 @@ func (c *EMR) ListInstancesPages(input *ListInstancesInput, fn func(p *ListInsta const opListSteps = "ListSteps" -// ListStepsRequest generates a request for the ListSteps operation. +// ListStepsRequest generates a "aws/request.Request" representing the +// client's request for the ListSteps operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListSteps method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListStepsRequest method. +// req, resp := client.ListStepsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EMR) ListStepsRequest(input *ListStepsInput) (req *request.Request, output *ListStepsOutput) { op := &request.Operation{ Name: opListSteps, @@ -426,6 +726,23 @@ func (c *EMR) ListSteps(input *ListStepsInput) (*ListStepsOutput, error) { return out, err } +// ListStepsPages iterates over the pages of a ListSteps operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListSteps method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListSteps operation. +// pageNum := 0 +// err := client.ListStepsPages(params, +// func(page *ListStepsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *EMR) ListStepsPages(input *ListStepsInput, fn func(p *ListStepsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListStepsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -436,7 +753,28 @@ func (c *EMR) ListStepsPages(input *ListStepsInput, fn func(p *ListStepsOutput, const opModifyInstanceGroups = "ModifyInstanceGroups" -// ModifyInstanceGroupsRequest generates a request for the ModifyInstanceGroups operation. +// ModifyInstanceGroupsRequest generates a "aws/request.Request" representing the +// client's request for the ModifyInstanceGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyInstanceGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyInstanceGroupsRequest method. +// req, resp := client.ModifyInstanceGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EMR) ModifyInstanceGroupsRequest(input *ModifyInstanceGroupsInput) (req *request.Request, output *ModifyInstanceGroupsOutput) { op := &request.Operation{ Name: opModifyInstanceGroups, @@ -468,7 +806,28 @@ func (c *EMR) ModifyInstanceGroups(input *ModifyInstanceGroupsInput) (*ModifyIns const opRemoveTags = "RemoveTags" -// RemoveTagsRequest generates a request for the RemoveTags operation. +// RemoveTagsRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveTagsRequest method. +// req, resp := client.RemoveTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EMR) RemoveTagsRequest(input *RemoveTagsInput) (req *request.Request, output *RemoveTagsOutput) { op := &request.Operation{ Name: opRemoveTags, @@ -500,7 +859,28 @@ func (c *EMR) RemoveTags(input *RemoveTagsInput) (*RemoveTagsOutput, error) { const opRunJobFlow = "RunJobFlow" -// RunJobFlowRequest generates a request for the RunJobFlow operation. +// RunJobFlowRequest generates a "aws/request.Request" representing the +// client's request for the RunJobFlow operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RunJobFlow method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RunJobFlowRequest method. +// req, resp := client.RunJobFlowRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EMR) RunJobFlowRequest(input *RunJobFlowInput) (req *request.Request, output *RunJobFlowOutput) { op := &request.Operation{ Name: opRunJobFlow, @@ -549,7 +929,28 @@ func (c *EMR) RunJobFlow(input *RunJobFlowInput) (*RunJobFlowOutput, error) { const opSetTerminationProtection = "SetTerminationProtection" -// SetTerminationProtectionRequest generates a request for the SetTerminationProtection operation. +// SetTerminationProtectionRequest generates a "aws/request.Request" representing the +// client's request for the SetTerminationProtection operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetTerminationProtection method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetTerminationProtectionRequest method. +// req, resp := client.SetTerminationProtectionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EMR) SetTerminationProtectionRequest(input *SetTerminationProtectionInput) (req *request.Request, output *SetTerminationProtectionOutput) { op := &request.Operation{ Name: opSetTerminationProtection, @@ -594,7 +995,28 @@ func (c *EMR) SetTerminationProtection(input *SetTerminationProtectionInput) (*S const opSetVisibleToAllUsers = "SetVisibleToAllUsers" -// SetVisibleToAllUsersRequest generates a request for the SetVisibleToAllUsers operation. +// SetVisibleToAllUsersRequest generates a "aws/request.Request" representing the +// client's request for the SetVisibleToAllUsers operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetVisibleToAllUsers method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetVisibleToAllUsersRequest method. +// req, resp := client.SetVisibleToAllUsersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EMR) SetVisibleToAllUsersRequest(input *SetVisibleToAllUsersInput) (req *request.Request, output *SetVisibleToAllUsersOutput) { op := &request.Operation{ Name: opSetVisibleToAllUsers, @@ -628,7 +1050,28 @@ func (c *EMR) SetVisibleToAllUsers(input *SetVisibleToAllUsersInput) (*SetVisibl const opTerminateJobFlows = "TerminateJobFlows" -// TerminateJobFlowsRequest generates a request for the TerminateJobFlows operation. +// TerminateJobFlowsRequest generates a "aws/request.Request" representing the +// client's request for the TerminateJobFlows operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the TerminateJobFlows method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the TerminateJobFlowsRequest method. +// req, resp := client.TerminateJobFlowsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *EMR) TerminateJobFlowsRequest(input *TerminateJobFlowsInput) (req *request.Request, output *TerminateJobFlowsOutput) { op := &request.Operation{ Name: opTerminateJobFlows, @@ -684,6 +1127,32 @@ func (s AddInstanceGroupsInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddInstanceGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddInstanceGroupsInput"} + if s.InstanceGroups == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceGroups")) + } + if s.JobFlowId == nil { + invalidParams.Add(request.NewErrParamRequired("JobFlowId")) + } + if s.InstanceGroups != nil { + for i, v := range s.InstanceGroups { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InstanceGroups", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // Output from an AddInstanceGroups call. type AddInstanceGroupsOutput struct { _ struct{} `type:"structure"` @@ -727,6 +1196,32 @@ func (s AddJobFlowStepsInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddJobFlowStepsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddJobFlowStepsInput"} + if s.JobFlowId == nil { + invalidParams.Add(request.NewErrParamRequired("JobFlowId")) + } + if s.Steps == nil { + invalidParams.Add(request.NewErrParamRequired("Steps")) + } + if s.Steps != nil { + for i, v := range s.Steps { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Steps", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // The output for the AddJobFlowSteps operation. type AddJobFlowStepsOutput struct { _ struct{} `type:"structure"` @@ -770,6 +1265,22 @@ func (s AddTagsInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddTagsInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // This output indicates the result of adding tags to a resource. type AddTagsOutput struct { _ struct{} `type:"structure"` @@ -847,6 +1358,27 @@ func (s BootstrapActionConfig) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *BootstrapActionConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BootstrapActionConfig"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.ScriptBootstrapAction == nil { + invalidParams.Add(request.NewErrParamRequired("ScriptBootstrapAction")) + } + if s.ScriptBootstrapAction != nil { + if err := s.ScriptBootstrapAction.Validate(); err != nil { + invalidParams.AddNested("ScriptBootstrapAction", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // Reports the configuration of a bootstrap action in a job flow. type BootstrapActionDetail struct { _ struct{} `type:"structure"` @@ -1123,6 +1655,19 @@ func (s DescribeClusterInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeClusterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeClusterInput"} + if s.ClusterId == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // This output contains the description of the cluster. type DescribeClusterOutput struct { _ struct{} `type:"structure"` @@ -1207,6 +1752,22 @@ func (s DescribeStepInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeStepInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeStepInput"} + if s.ClusterId == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterId")) + } + if s.StepId == nil { + invalidParams.Add(request.NewErrParamRequired("StepId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // This output contains the description of the cluster step. type DescribeStepOutput struct { _ struct{} `type:"structure"` @@ -1272,6 +1833,24 @@ func (s EbsBlockDeviceConfig) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *EbsBlockDeviceConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EbsBlockDeviceConfig"} + if s.VolumeSpecification == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeSpecification")) + } + if s.VolumeSpecification != nil { + if err := s.VolumeSpecification.Validate(); err != nil { + invalidParams.AddNested("VolumeSpecification", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type EbsConfiguration struct { _ struct{} `type:"structure"` @@ -1290,6 +1869,26 @@ func (s EbsConfiguration) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *EbsConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EbsConfiguration"} + if s.EbsBlockDeviceConfigs != nil { + for i, v := range s.EbsBlockDeviceConfigs { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "EbsBlockDeviceConfigs", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // EBS block device that's attached to an EC2 instance. type EbsVolume struct { _ struct{} `type:"structure"` @@ -1396,6 +1995,19 @@ func (s HadoopJarStepConfig) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *HadoopJarStepConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HadoopJarStepConfig"} + if s.Jar == nil { + invalidParams.Add(request.NewErrParamRequired("Jar")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // A cluster step consisting of a JAR file whose main function will be executed. // The main function submits a job for Hadoop to execute and waits for the job // to finish or fail. @@ -1516,6 +2128,9 @@ type InstanceGroup struct { // The number of instances currently running in this instance group. RunningInstanceCount *int64 `type:"integer"` + // Policy for customizing shrink operations. + ShrinkPolicy *ShrinkPolicy `type:"structure"` + // The current status of the instance group. Status *InstanceGroupStatus `type:"structure"` } @@ -1575,6 +2190,33 @@ func (s InstanceGroupConfig) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *InstanceGroupConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InstanceGroupConfig"} + if s.InstanceCount == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceCount")) + } + if s.InstanceRole == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceRole")) + } + if s.InstanceType == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceType")) + } + if s.InstanceType != nil && len(*s.InstanceType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("InstanceType", 1)) + } + if s.EbsConfiguration != nil { + if err := s.EbsConfiguration.Validate(); err != nil { + invalidParams.AddNested("EbsConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // Detailed information about an instance group. type InstanceGroupDetail struct { _ struct{} `type:"structure"` @@ -1638,9 +2280,8 @@ func (s InstanceGroupDetail) GoString() string { type InstanceGroupModifyConfig struct { _ struct{} `type:"structure"` - // The EC2 InstanceIds to terminate. For advanced users only. Once you terminate - // the instances, the instance group will not return to its original requested - // size. + // The EC2 InstanceIds to terminate. Once you terminate the instances, the instance + // group will not return to its original requested size. EC2InstanceIdsToTerminate []*string `type:"list"` // Target size for the instance group. @@ -1648,6 +2289,9 @@ type InstanceGroupModifyConfig struct { // Unique ID of the instance group to expand or shrink. InstanceGroupId *string `type:"string" required:"true"` + + // Policy for customizing shrink operations. + ShrinkPolicy *ShrinkPolicy `type:"structure"` } // String returns the string representation @@ -1660,6 +2304,19 @@ func (s InstanceGroupModifyConfig) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *InstanceGroupModifyConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InstanceGroupModifyConfig"} + if s.InstanceGroupId == nil { + invalidParams.Add(request.NewErrParamRequired("InstanceGroupId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // The status change reason details for the instance group. type InstanceGroupStateChangeReason struct { _ struct{} `type:"structure"` @@ -1729,6 +2386,32 @@ func (s InstanceGroupTimeline) GoString() string { return s.String() } +// Custom policy for requesting termination protection or termination of specific +// instances when shrinking an instance group. +type InstanceResizePolicy struct { + _ struct{} `type:"structure"` + + // Decommissioning timeout override for the specific list of instances to be + // terminated. + InstanceTerminationTimeout *int64 `type:"integer"` + + // Specific list of instances to be protected when shrinking an instance group. + InstancesToProtect []*string `type:"list"` + + // Specific list of instances to be terminated when shrinking an instance group. + InstancesToTerminate []*string `type:"list"` +} + +// String returns the string representation +func (s InstanceResizePolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InstanceResizePolicy) GoString() string { + return s.String() +} + // The details of the status change reason for the instance. type InstanceStateChangeReason struct { _ struct{} `type:"structure"` @@ -1974,6 +2657,37 @@ func (s JobFlowInstancesConfig) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *JobFlowInstancesConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "JobFlowInstancesConfig"} + if s.MasterInstanceType != nil && len(*s.MasterInstanceType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("MasterInstanceType", 1)) + } + if s.SlaveInstanceType != nil && len(*s.SlaveInstanceType) < 1 { + invalidParams.Add(request.NewErrParamMinLen("SlaveInstanceType", 1)) + } + if s.InstanceGroups != nil { + for i, v := range s.InstanceGroups { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InstanceGroups", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Placement != nil { + if err := s.Placement.Validate(); err != nil { + invalidParams.AddNested("Placement", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // Specify the type of Amazon EC2 instances to run the job flow on. type JobFlowInstancesDetail struct { _ struct{} `type:"structure"` @@ -2081,6 +2795,19 @@ func (s ListBootstrapActionsInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBootstrapActionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBootstrapActionsInput"} + if s.ClusterId == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // This output contains the boostrap actions detail . type ListBootstrapActionsOutput struct { _ struct{} `type:"structure"` @@ -2173,6 +2900,19 @@ func (s ListInstanceGroupsInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListInstanceGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListInstanceGroupsInput"} + if s.ClusterId == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // This input determines which instance groups to retrieve. type ListInstanceGroupsOutput struct { _ struct{} `type:"structure"` @@ -2207,6 +2947,10 @@ type ListInstancesInput struct { // The type of instance group for which to list the instances. InstanceGroupTypes []*string `type:"list"` + // A list of instance states that will filter the instances returned with this + // request. + InstanceStates []*string `type:"list"` + // The pagination token that indicates the next set of results to retrieve. Marker *string `type:"string"` } @@ -2221,6 +2965,19 @@ func (s ListInstancesInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListInstancesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListInstancesInput"} + if s.ClusterId == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // This output contains the list of instances. type ListInstancesOutput struct { _ struct{} `type:"structure"` @@ -2269,7 +3026,21 @@ func (s ListStepsInput) GoString() string { return s.String() } -// This output contains the list of steps. +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListStepsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListStepsInput"} + if s.ClusterId == nil { + invalidParams.Add(request.NewErrParamRequired("ClusterId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// This output contains the list of steps returned in reverse order. This means +// that the last step is the first element in the list. type ListStepsOutput struct { _ struct{} `type:"structure"` @@ -2308,6 +3079,26 @@ func (s ModifyInstanceGroupsInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyInstanceGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyInstanceGroupsInput"} + if s.InstanceGroups != nil { + for i, v := range s.InstanceGroups { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "InstanceGroups", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type ModifyInstanceGroupsOutput struct { _ struct{} `type:"structure"` } @@ -2340,6 +3131,19 @@ func (s PlacementType) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *PlacementType) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PlacementType"} + if s.AvailabilityZone == nil { + invalidParams.Add(request.NewErrParamRequired("AvailabilityZone")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // This input identifies a cluster and a list of tags to remove. type RemoveTagsInput struct { _ struct{} `type:"structure"` @@ -2362,6 +3166,22 @@ func (s RemoveTagsInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveTagsInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // This output indicates the result of removing tags from a resource. type RemoveTagsOutput struct { _ struct{} `type:"structure"` @@ -2498,6 +3318,47 @@ func (s RunJobFlowInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *RunJobFlowInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RunJobFlowInput"} + if s.Instances == nil { + invalidParams.Add(request.NewErrParamRequired("Instances")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.BootstrapActions != nil { + for i, v := range s.BootstrapActions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "BootstrapActions", i), err.(request.ErrInvalidParams)) + } + } + } + if s.Instances != nil { + if err := s.Instances.Validate(); err != nil { + invalidParams.AddNested("Instances", err.(request.ErrInvalidParams)) + } + } + if s.Steps != nil { + for i, v := range s.Steps { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Steps", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // The result of the RunJobFlow operation. type RunJobFlowOutput struct { _ struct{} `type:"structure"` @@ -2538,6 +3399,19 @@ func (s ScriptBootstrapActionConfig) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *ScriptBootstrapActionConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ScriptBootstrapActionConfig"} + if s.Path == nil { + invalidParams.Add(request.NewErrParamRequired("Path")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // The input argument to the TerminationProtection operation. type SetTerminationProtectionInput struct { _ struct{} `type:"structure"` @@ -2563,6 +3437,22 @@ func (s SetTerminationProtectionInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetTerminationProtectionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetTerminationProtectionInput"} + if s.JobFlowIds == nil { + invalidParams.Add(request.NewErrParamRequired("JobFlowIds")) + } + if s.TerminationProtected == nil { + invalidParams.Add(request.NewErrParamRequired("TerminationProtected")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type SetTerminationProtectionOutput struct { _ struct{} `type:"structure"` } @@ -2602,6 +3492,22 @@ func (s SetVisibleToAllUsersInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetVisibleToAllUsersInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetVisibleToAllUsersInput"} + if s.JobFlowIds == nil { + invalidParams.Add(request.NewErrParamRequired("JobFlowIds")) + } + if s.VisibleToAllUsers == nil { + invalidParams.Add(request.NewErrParamRequired("VisibleToAllUsers")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type SetVisibleToAllUsersOutput struct { _ struct{} `type:"structure"` } @@ -2616,6 +3522,30 @@ func (s SetVisibleToAllUsersOutput) GoString() string { return s.String() } +// Policy for customizing shrink operations. Allows configuration of decommissioning +// timeout and targeted instance shrinking. +type ShrinkPolicy struct { + _ struct{} `type:"structure"` + + // The desired timeout for decommissioning an instance. Overrides the default + // YARN decommissioning timeout. + DecommissionTimeout *int64 `type:"integer"` + + // Custom policy for requesting termination protection or termination of specific + // instances when shrinking an instance group. + InstanceResizePolicy *InstanceResizePolicy `type:"structure"` +} + +// String returns the string representation +func (s ShrinkPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ShrinkPolicy) GoString() string { + return s.String() +} + // This represents a step in a cluster. type Step struct { _ struct{} `type:"structure"` @@ -2671,6 +3601,27 @@ func (s StepConfig) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *StepConfig) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StepConfig"} + if s.HadoopJarStep == nil { + invalidParams.Add(request.NewErrParamRequired("HadoopJarStep")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.HadoopJarStep != nil { + if err := s.HadoopJarStep.Validate(); err != nil { + invalidParams.AddNested("HadoopJarStep", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // Combines the execution state and configuration of a step. type StepDetail struct { _ struct{} `type:"structure"` @@ -2891,6 +3842,19 @@ func (s TerminateJobFlowsInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *TerminateJobFlowsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TerminateJobFlowsInput"} + if s.JobFlowIds == nil { + invalidParams.Add(request.NewErrParamRequired("JobFlowIds")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type TerminateJobFlowsOutput struct { _ struct{} `type:"structure"` } @@ -2931,6 +3895,22 @@ func (s VolumeSpecification) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *VolumeSpecification) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VolumeSpecification"} + if s.SizeInGB == nil { + invalidParams.Add(request.NewErrParamRequired("SizeInGB")) + } + if s.VolumeType == nil { + invalidParams.Add(request.NewErrParamRequired("VolumeType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + const ( // @enum ActionOnFailure ActionOnFailureTerminateJobFlow = "TERMINATE_JOB_FLOW" diff --git a/vendor/github.com/aws/aws-sdk-go/service/emr/examples_test.go b/vendor/github.com/aws/aws-sdk-go/service/emr/examples_test.go deleted file mode 100644 index b1b578ae7..000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/emr/examples_test.go +++ /dev/null @@ -1,617 +0,0 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. - -package emr_test - -import ( - "bytes" - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/emr" -) - -var _ time.Duration -var _ bytes.Buffer - -func ExampleEMR_AddInstanceGroups() { - svc := emr.New(session.New()) - - params := &emr.AddInstanceGroupsInput{ - InstanceGroups: []*emr.InstanceGroupConfig{ // Required - { // Required - InstanceCount: aws.Int64(1), // Required - InstanceRole: aws.String("InstanceRoleType"), // Required - InstanceType: aws.String("InstanceType"), // Required - BidPrice: aws.String("XmlStringMaxLen256"), - Configurations: []*emr.Configuration{ - { // Required - Classification: aws.String("String"), - Configurations: []*emr.Configuration{ - // Recursive values... - }, - Properties: map[string]*string{ - "Key": aws.String("String"), // Required - // More values... - }, - }, - // More values... - }, - EbsConfiguration: &emr.EbsConfiguration{ - EbsBlockDeviceConfigs: []*emr.EbsBlockDeviceConfig{ - { // Required - VolumeSpecification: &emr.VolumeSpecification{ // Required - SizeInGB: aws.Int64(1), // Required - VolumeType: aws.String("String"), // Required - Iops: aws.Int64(1), - }, - VolumesPerInstance: aws.Int64(1), - }, - // More values... - }, - EbsOptimized: aws.Bool(true), - }, - Market: aws.String("MarketType"), - Name: aws.String("XmlStringMaxLen256"), - }, - // More values... - }, - JobFlowId: aws.String("XmlStringMaxLen256"), // Required - } - resp, err := svc.AddInstanceGroups(params) - - if err != nil { - // Print the error, cast err to awserr.Error to get the Code and - // Message from an error. - fmt.Println(err.Error()) - return - } - - // Pretty-print the response data. - fmt.Println(resp) -} - -func ExampleEMR_AddJobFlowSteps() { - svc := emr.New(session.New()) - - params := &emr.AddJobFlowStepsInput{ - JobFlowId: aws.String("XmlStringMaxLen256"), // Required - Steps: []*emr.StepConfig{ // Required - { // Required - HadoopJarStep: &emr.HadoopJarStepConfig{ // Required - Jar: aws.String("XmlString"), // Required - Args: []*string{ - aws.String("XmlString"), // Required - // More values... - }, - MainClass: aws.String("XmlString"), - Properties: []*emr.KeyValue{ - { // Required - Key: aws.String("XmlString"), - Value: aws.String("XmlString"), - }, - // More values... - }, - }, - Name: aws.String("XmlStringMaxLen256"), // Required - ActionOnFailure: aws.String("ActionOnFailure"), - }, - // More values... - }, - } - resp, err := svc.AddJobFlowSteps(params) - - if err != nil { - // Print the error, cast err to awserr.Error to get the Code and - // Message from an error. - fmt.Println(err.Error()) - return - } - - // Pretty-print the response data. - fmt.Println(resp) -} - -func ExampleEMR_AddTags() { - svc := emr.New(session.New()) - - params := &emr.AddTagsInput{ - ResourceId: aws.String("ResourceId"), // Required - Tags: []*emr.Tag{ // Required - { // Required - Key: aws.String("String"), - Value: aws.String("String"), - }, - // More values... - }, - } - resp, err := svc.AddTags(params) - - if err != nil { - // Print the error, cast err to awserr.Error to get the Code and - // Message from an error. - fmt.Println(err.Error()) - return - } - - // Pretty-print the response data. - fmt.Println(resp) -} - -func ExampleEMR_DescribeCluster() { - svc := emr.New(session.New()) - - params := &emr.DescribeClusterInput{ - ClusterId: aws.String("ClusterId"), // Required - } - resp, err := svc.DescribeCluster(params) - - if err != nil { - // Print the error, cast err to awserr.Error to get the Code and - // Message from an error. - fmt.Println(err.Error()) - return - } - - // Pretty-print the response data. - fmt.Println(resp) -} - -func ExampleEMR_DescribeJobFlows() { - svc := emr.New(session.New()) - - params := &emr.DescribeJobFlowsInput{ - CreatedAfter: aws.Time(time.Now()), - CreatedBefore: aws.Time(time.Now()), - JobFlowIds: []*string{ - aws.String("XmlString"), // Required - // More values... - }, - JobFlowStates: []*string{ - aws.String("JobFlowExecutionState"), // Required - // More values... - }, - } - resp, err := svc.DescribeJobFlows(params) - - if err != nil { - // Print the error, cast err to awserr.Error to get the Code and - // Message from an error. - fmt.Println(err.Error()) - return - } - - // Pretty-print the response data. - fmt.Println(resp) -} - -func ExampleEMR_DescribeStep() { - svc := emr.New(session.New()) - - params := &emr.DescribeStepInput{ - ClusterId: aws.String("ClusterId"), // Required - StepId: aws.String("StepId"), // Required - } - resp, err := svc.DescribeStep(params) - - if err != nil { - // Print the error, cast err to awserr.Error to get the Code and - // Message from an error. - fmt.Println(err.Error()) - return - } - - // Pretty-print the response data. - fmt.Println(resp) -} - -func ExampleEMR_ListBootstrapActions() { - svc := emr.New(session.New()) - - params := &emr.ListBootstrapActionsInput{ - ClusterId: aws.String("ClusterId"), // Required - Marker: aws.String("Marker"), - } - resp, err := svc.ListBootstrapActions(params) - - if err != nil { - // Print the error, cast err to awserr.Error to get the Code and - // Message from an error. - fmt.Println(err.Error()) - return - } - - // Pretty-print the response data. - fmt.Println(resp) -} - -func ExampleEMR_ListClusters() { - svc := emr.New(session.New()) - - params := &emr.ListClustersInput{ - ClusterStates: []*string{ - aws.String("ClusterState"), // Required - // More values... - }, - CreatedAfter: aws.Time(time.Now()), - CreatedBefore: aws.Time(time.Now()), - Marker: aws.String("Marker"), - } - resp, err := svc.ListClusters(params) - - if err != nil { - // Print the error, cast err to awserr.Error to get the Code and - // Message from an error. - fmt.Println(err.Error()) - return - } - - // Pretty-print the response data. - fmt.Println(resp) -} - -func ExampleEMR_ListInstanceGroups() { - svc := emr.New(session.New()) - - params := &emr.ListInstanceGroupsInput{ - ClusterId: aws.String("ClusterId"), // Required - Marker: aws.String("Marker"), - } - resp, err := svc.ListInstanceGroups(params) - - if err != nil { - // Print the error, cast err to awserr.Error to get the Code and - // Message from an error. - fmt.Println(err.Error()) - return - } - - // Pretty-print the response data. - fmt.Println(resp) -} - -func ExampleEMR_ListInstances() { - svc := emr.New(session.New()) - - params := &emr.ListInstancesInput{ - ClusterId: aws.String("ClusterId"), // Required - InstanceGroupId: aws.String("InstanceGroupId"), - InstanceGroupTypes: []*string{ - aws.String("InstanceGroupType"), // Required - // More values... - }, - Marker: aws.String("Marker"), - } - resp, err := svc.ListInstances(params) - - if err != nil { - // Print the error, cast err to awserr.Error to get the Code and - // Message from an error. - fmt.Println(err.Error()) - return - } - - // Pretty-print the response data. - fmt.Println(resp) -} - -func ExampleEMR_ListSteps() { - svc := emr.New(session.New()) - - params := &emr.ListStepsInput{ - ClusterId: aws.String("ClusterId"), // Required - Marker: aws.String("Marker"), - StepIds: []*string{ - aws.String("XmlString"), // Required - // More values... - }, - StepStates: []*string{ - aws.String("StepState"), // Required - // More values... - }, - } - resp, err := svc.ListSteps(params) - - if err != nil { - // Print the error, cast err to awserr.Error to get the Code and - // Message from an error. - fmt.Println(err.Error()) - return - } - - // Pretty-print the response data. - fmt.Println(resp) -} - -func ExampleEMR_ModifyInstanceGroups() { - svc := emr.New(session.New()) - - params := &emr.ModifyInstanceGroupsInput{ - InstanceGroups: []*emr.InstanceGroupModifyConfig{ - { // Required - InstanceGroupId: aws.String("XmlStringMaxLen256"), // Required - EC2InstanceIdsToTerminate: []*string{ - aws.String("InstanceId"), // Required - // More values... - }, - InstanceCount: aws.Int64(1), - }, - // More values... - }, - } - resp, err := svc.ModifyInstanceGroups(params) - - if err != nil { - // Print the error, cast err to awserr.Error to get the Code and - // Message from an error. - fmt.Println(err.Error()) - return - } - - // Pretty-print the response data. - fmt.Println(resp) -} - -func ExampleEMR_RemoveTags() { - svc := emr.New(session.New()) - - params := &emr.RemoveTagsInput{ - ResourceId: aws.String("ResourceId"), // Required - TagKeys: []*string{ // Required - aws.String("String"), // Required - // More values... - }, - } - resp, err := svc.RemoveTags(params) - - if err != nil { - // Print the error, cast err to awserr.Error to get the Code and - // Message from an error. - fmt.Println(err.Error()) - return - } - - // Pretty-print the response data. - fmt.Println(resp) -} - -func ExampleEMR_RunJobFlow() { - svc := emr.New(session.New()) - - params := &emr.RunJobFlowInput{ - Instances: &emr.JobFlowInstancesConfig{ // Required - AdditionalMasterSecurityGroups: []*string{ - aws.String("XmlStringMaxLen256"), // Required - // More values... - }, - AdditionalSlaveSecurityGroups: []*string{ - aws.String("XmlStringMaxLen256"), // Required - // More values... - }, - Ec2KeyName: aws.String("XmlStringMaxLen256"), - Ec2SubnetId: aws.String("XmlStringMaxLen256"), - EmrManagedMasterSecurityGroup: aws.String("XmlStringMaxLen256"), - EmrManagedSlaveSecurityGroup: aws.String("XmlStringMaxLen256"), - HadoopVersion: aws.String("XmlStringMaxLen256"), - InstanceCount: aws.Int64(1), - InstanceGroups: []*emr.InstanceGroupConfig{ - { // Required - InstanceCount: aws.Int64(1), // Required - InstanceRole: aws.String("InstanceRoleType"), // Required - InstanceType: aws.String("InstanceType"), // Required - BidPrice: aws.String("XmlStringMaxLen256"), - Configurations: []*emr.Configuration{ - { // Required - Classification: aws.String("String"), - Configurations: []*emr.Configuration{ - // Recursive values... - }, - Properties: map[string]*string{ - "Key": aws.String("String"), // Required - // More values... - }, - }, - // More values... - }, - EbsConfiguration: &emr.EbsConfiguration{ - EbsBlockDeviceConfigs: []*emr.EbsBlockDeviceConfig{ - { // Required - VolumeSpecification: &emr.VolumeSpecification{ // Required - SizeInGB: aws.Int64(1), // Required - VolumeType: aws.String("String"), // Required - Iops: aws.Int64(1), - }, - VolumesPerInstance: aws.Int64(1), - }, - // More values... - }, - EbsOptimized: aws.Bool(true), - }, - Market: aws.String("MarketType"), - Name: aws.String("XmlStringMaxLen256"), - }, - // More values... - }, - KeepJobFlowAliveWhenNoSteps: aws.Bool(true), - MasterInstanceType: aws.String("InstanceType"), - Placement: &emr.PlacementType{ - AvailabilityZone: aws.String("XmlString"), // Required - }, - ServiceAccessSecurityGroup: aws.String("XmlStringMaxLen256"), - SlaveInstanceType: aws.String("InstanceType"), - TerminationProtected: aws.Bool(true), - }, - Name: aws.String("XmlStringMaxLen256"), // Required - AdditionalInfo: aws.String("XmlString"), - AmiVersion: aws.String("XmlStringMaxLen256"), - Applications: []*emr.Application{ - { // Required - AdditionalInfo: map[string]*string{ - "Key": aws.String("String"), // Required - // More values... - }, - Args: []*string{ - aws.String("String"), // Required - // More values... - }, - Name: aws.String("String"), - Version: aws.String("String"), - }, - // More values... - }, - BootstrapActions: []*emr.BootstrapActionConfig{ - { // Required - Name: aws.String("XmlStringMaxLen256"), // Required - ScriptBootstrapAction: &emr.ScriptBootstrapActionConfig{ // Required - Path: aws.String("XmlString"), // Required - Args: []*string{ - aws.String("XmlString"), // Required - // More values... - }, - }, - }, - // More values... - }, - Configurations: []*emr.Configuration{ - { // Required - Classification: aws.String("String"), - Configurations: []*emr.Configuration{ - // Recursive values... - }, - Properties: map[string]*string{ - "Key": aws.String("String"), // Required - // More values... - }, - }, - // More values... - }, - JobFlowRole: aws.String("XmlString"), - LogUri: aws.String("XmlString"), - NewSupportedProducts: []*emr.SupportedProductConfig{ - { // Required - Args: []*string{ - aws.String("XmlString"), // Required - // More values... - }, - Name: aws.String("XmlStringMaxLen256"), - }, - // More values... - }, - ReleaseLabel: aws.String("XmlStringMaxLen256"), - ServiceRole: aws.String("XmlString"), - Steps: []*emr.StepConfig{ - { // Required - HadoopJarStep: &emr.HadoopJarStepConfig{ // Required - Jar: aws.String("XmlString"), // Required - Args: []*string{ - aws.String("XmlString"), // Required - // More values... - }, - MainClass: aws.String("XmlString"), - Properties: []*emr.KeyValue{ - { // Required - Key: aws.String("XmlString"), - Value: aws.String("XmlString"), - }, - // More values... - }, - }, - Name: aws.String("XmlStringMaxLen256"), // Required - ActionOnFailure: aws.String("ActionOnFailure"), - }, - // More values... - }, - SupportedProducts: []*string{ - aws.String("XmlStringMaxLen256"), // Required - // More values... - }, - Tags: []*emr.Tag{ - { // Required - Key: aws.String("String"), - Value: aws.String("String"), - }, - // More values... - }, - VisibleToAllUsers: aws.Bool(true), - } - resp, err := svc.RunJobFlow(params) - - if err != nil { - // Print the error, cast err to awserr.Error to get the Code and - // Message from an error. - fmt.Println(err.Error()) - return - } - - // Pretty-print the response data. - fmt.Println(resp) -} - -func ExampleEMR_SetTerminationProtection() { - svc := emr.New(session.New()) - - params := &emr.SetTerminationProtectionInput{ - JobFlowIds: []*string{ // Required - aws.String("XmlString"), // Required - // More values... - }, - TerminationProtected: aws.Bool(true), // Required - } - resp, err := svc.SetTerminationProtection(params) - - if err != nil { - // Print the error, cast err to awserr.Error to get the Code and - // Message from an error. - fmt.Println(err.Error()) - return - } - - // Pretty-print the response data. - fmt.Println(resp) -} - -func ExampleEMR_SetVisibleToAllUsers() { - svc := emr.New(session.New()) - - params := &emr.SetVisibleToAllUsersInput{ - JobFlowIds: []*string{ // Required - aws.String("XmlString"), // Required - // More values... - }, - VisibleToAllUsers: aws.Bool(true), // Required - } - resp, err := svc.SetVisibleToAllUsers(params) - - if err != nil { - // Print the error, cast err to awserr.Error to get the Code and - // Message from an error. - fmt.Println(err.Error()) - return - } - - // Pretty-print the response data. - fmt.Println(resp) -} - -func ExampleEMR_TerminateJobFlows() { - svc := emr.New(session.New()) - - params := &emr.TerminateJobFlowsInput{ - JobFlowIds: []*string{ // Required - aws.String("XmlString"), // Required - // More values... - }, - } - resp, err := svc.TerminateJobFlows(params) - - if err != nil { - // Print the error, cast err to awserr.Error to get the Code and - // Message from an error. - fmt.Println(err.Error()) - return - } - - // Pretty-print the response data. - fmt.Println(resp) -} diff --git a/vendor/github.com/aws/aws-sdk-go/service/emr/service.go b/vendor/github.com/aws/aws-sdk-go/service/emr/service.go index 7074979e2..755e1699a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/emr/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/emr/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // Amazon Elastic MapReduce (Amazon EMR) is a web service that makes it easy @@ -64,7 +64,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/firehose/api.go b/vendor/github.com/aws/aws-sdk-go/service/firehose/api.go index ddead2447..e9eb2fdee 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/firehose/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/firehose/api.go @@ -13,7 +13,28 @@ import ( const opCreateDeliveryStream = "CreateDeliveryStream" -// CreateDeliveryStreamRequest generates a request for the CreateDeliveryStream operation. +// CreateDeliveryStreamRequest generates a "aws/request.Request" representing the +// client's request for the CreateDeliveryStream operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDeliveryStream method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDeliveryStreamRequest method. +// req, resp := client.CreateDeliveryStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Firehose) CreateDeliveryStreamRequest(input *CreateDeliveryStreamInput) (req *request.Request, output *CreateDeliveryStreamOutput) { op := &request.Operation{ Name: opCreateDeliveryStream, @@ -33,7 +54,7 @@ func (c *Firehose) CreateDeliveryStreamRequest(input *CreateDeliveryStreamInput) // Creates a delivery stream. // -// CreateDeliveryStream is an asynchronous operation that immediately returns. +// CreateDeliveryStream is an asynchronous operation that immediately returns. // The initial status of the delivery stream is CREATING. After the delivery // stream is created, its status is ACTIVE and it now accepts data. Attempts // to send data to a delivery stream that is not in the ACTIVE state cause an @@ -47,9 +68,9 @@ func (c *Firehose) CreateDeliveryStreamRequest(input *CreateDeliveryStreamInput) // By default, you can create up to 20 delivery streams per region. // // A delivery stream can only be configured with a single destination, Amazon -// S3 or Amazon Redshift. For correct CreateDeliveryStream request syntax, specify -// only one destination configuration parameter: either ElasticsearchDestinationConfiguration, -// RedshiftDestinationConfiguration or S3DestinationConfiguration +// S3, Amazon Elasticsearch Service, or Amazon Redshift. For correct CreateDeliveryStream +// request syntax, specify only one destination configuration parameter: either +// S3DestinationConfiguration, ElasticsearchDestinationConfiguration, or RedshiftDestinationConfiguration. // // As part of S3DestinationConfiguration, optional values BufferingHints, EncryptionConfiguration, // and CompressionFormat can be provided. By default, if no BufferingHints value @@ -63,19 +84,23 @@ func (c *Firehose) CreateDeliveryStreamRequest(input *CreateDeliveryStreamInput) // // A few notes about RedshiftDestinationConfiguration: // -// An Amazon Redshift destination requires an S3 bucket as intermediate location, +// An Amazon Redshift destination requires an S3 bucket as intermediate location, // as Firehose first delivers data to S3 and then uses COPY syntax to load data // into an Amazon Redshift table. This is specified in the RedshiftDestinationConfiguration.S3Configuration -// parameter element. The compression formats SNAPPY or ZIP cannot be specified -// in RedshiftDestinationConfiguration.S3Configuration because the Amazon Redshift -// COPY operation that reads from the S3 bucket doesn't support these compression -// formats. We strongly recommend that the username and password provided is -// used exclusively for Firehose purposes, and that the permissions for the -// account are restricted for Amazon Redshift INSERT permissions. Firehose -// assumes the IAM role that is configured as part of destinations. The IAM -// role should allow the Firehose principal to assume the role, and the role -// should have permissions that allows the service to deliver the data. For -// more information, see Amazon S3 Bucket Access (http://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3) +// parameter element. +// +// The compression formats SNAPPY or ZIP cannot be specified in RedshiftDestinationConfiguration.S3Configuration +// because the Amazon Redshift COPY operation that reads from the S3 bucket +// doesn't support these compression formats. +// +// We strongly recommend that the username and password provided is used +// exclusively for Firehose purposes, and that the permissions for the account +// are restricted for Amazon Redshift INSERT permissions. +// +// Firehose assumes the IAM role that is configured as part of destinations. +// The IAM role should allow the Firehose principal to assume the role, and +// the role should have permissions that allows the service to deliver the data. +// For more information, see Amazon S3 Bucket Access (http://docs.aws.amazon.com/firehose/latest/dev/controlling-access.html#using-iam-s3) // in the Amazon Kinesis Firehose Developer Guide. func (c *Firehose) CreateDeliveryStream(input *CreateDeliveryStreamInput) (*CreateDeliveryStreamOutput, error) { req, out := c.CreateDeliveryStreamRequest(input) @@ -85,7 +110,28 @@ func (c *Firehose) CreateDeliveryStream(input *CreateDeliveryStreamInput) (*Crea const opDeleteDeliveryStream = "DeleteDeliveryStream" -// DeleteDeliveryStreamRequest generates a request for the DeleteDeliveryStream operation. +// DeleteDeliveryStreamRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDeliveryStream operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDeliveryStream method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDeliveryStreamRequest method. +// req, resp := client.DeleteDeliveryStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Firehose) DeleteDeliveryStreamRequest(input *DeleteDeliveryStreamInput) (req *request.Request, output *DeleteDeliveryStreamOutput) { op := &request.Operation{ Name: opDeleteDeliveryStream, @@ -123,7 +169,28 @@ func (c *Firehose) DeleteDeliveryStream(input *DeleteDeliveryStreamInput) (*Dele const opDescribeDeliveryStream = "DescribeDeliveryStream" -// DescribeDeliveryStreamRequest generates a request for the DescribeDeliveryStream operation. +// DescribeDeliveryStreamRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDeliveryStream operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDeliveryStream method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDeliveryStreamRequest method. +// req, resp := client.DescribeDeliveryStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Firehose) DescribeDeliveryStreamRequest(input *DescribeDeliveryStreamInput) (req *request.Request, output *DescribeDeliveryStreamOutput) { op := &request.Operation{ Name: opDescribeDeliveryStream, @@ -153,7 +220,28 @@ func (c *Firehose) DescribeDeliveryStream(input *DescribeDeliveryStreamInput) (* const opListDeliveryStreams = "ListDeliveryStreams" -// ListDeliveryStreamsRequest generates a request for the ListDeliveryStreams operation. +// ListDeliveryStreamsRequest generates a "aws/request.Request" representing the +// client's request for the ListDeliveryStreams operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListDeliveryStreams method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListDeliveryStreamsRequest method. +// req, resp := client.ListDeliveryStreamsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Firehose) ListDeliveryStreamsRequest(input *ListDeliveryStreamsInput) (req *request.Request, output *ListDeliveryStreamsOutput) { op := &request.Operation{ Name: opListDeliveryStreams, @@ -188,7 +276,28 @@ func (c *Firehose) ListDeliveryStreams(input *ListDeliveryStreamsInput) (*ListDe const opPutRecord = "PutRecord" -// PutRecordRequest generates a request for the PutRecord operation. +// PutRecordRequest generates a "aws/request.Request" representing the +// client's request for the PutRecord operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutRecord method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutRecordRequest method. +// req, resp := client.PutRecordRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Firehose) PutRecordRequest(input *PutRecordInput) (req *request.Request, output *PutRecordOutput) { op := &request.Operation{ Name: opPutRecord, @@ -247,7 +356,28 @@ func (c *Firehose) PutRecord(input *PutRecordInput) (*PutRecordOutput, error) { const opPutRecordBatch = "PutRecordBatch" -// PutRecordBatchRequest generates a request for the PutRecordBatch operation. +// PutRecordBatchRequest generates a "aws/request.Request" representing the +// client's request for the PutRecordBatch operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutRecordBatch method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutRecordBatchRequest method. +// req, resp := client.PutRecordBatchRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Firehose) PutRecordBatchRequest(input *PutRecordBatchInput) (req *request.Request, output *PutRecordBatchOutput) { op := &request.Operation{ Name: opPutRecordBatch, @@ -329,7 +459,28 @@ func (c *Firehose) PutRecordBatch(input *PutRecordBatchInput) (*PutRecordBatchOu const opUpdateDestination = "UpdateDestination" -// UpdateDestinationRequest generates a request for the UpdateDestination operation. +// UpdateDestinationRequest generates a "aws/request.Request" representing the +// client's request for the UpdateDestination operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateDestination method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateDestinationRequest method. +// req, resp := client.UpdateDestinationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Firehose) UpdateDestinationRequest(input *UpdateDestinationInput) (req *request.Request, output *UpdateDestinationOutput) { op := &request.Operation{ Name: opUpdateDestination, @@ -465,14 +616,14 @@ type CopyCommand struct { // command (http://docs.aws.amazon.com/redshift/latest/dg/r_COPY.html). Some // possible examples that would apply to Firehose are as follows. // - // delimiter '\t' lzop; - fields are delimited with "\t" (TAB character) and + // delimiter '\t' lzop; - fields are delimited with "\t" (TAB character) and // compressed using lzop. // - // delimiter '| - fields are delimited with "|" (this is the default delimiter). + // delimiter '| - fields are delimited with "|" (this is the default delimiter). // - // delimiter '|' escape - the delimiter should be escaped. + // delimiter '|' escape - the delimiter should be escaped. // - // fixedwidth 'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6' + // fixedwidth 'venueid:3,venuename:25,venuecity:12,venuestate:2,venueseats:6' // - fields are fixed width in the source, with each width specified after every // column in the table. // @@ -1067,9 +1218,10 @@ type ElasticsearchRetryOptions struct { _ struct{} `type:"structure"` // After an initial failure to deliver to Amazon ES, the total amount of time - // during which Firehose re-attempts delivery. After this time has elapsed, - // the failed documents are written to Amazon S3. Default value is 300 seconds. - // A value of 0 (zero) results in no retries. + // during which Firehose re-attempts delivery (including the first attempt). + // After this time has elapsed, the failed documents are written to Amazon S3. + // Default value is 300 seconds (5 minutes). A value of 0 (zero) results in + // no retries. DurationInSeconds *int64 `type:"integer"` } @@ -1428,6 +1580,10 @@ type RedshiftDestinationConfiguration struct { // The user password. Password *string `min:"6" type:"string" required:"true"` + // Configures retry behavior in the event that Firehose is unable to deliver + // documents to Amazon Redshift. Default value is 3600 (60 minutes). + RetryOptions *RedshiftRetryOptions `type:"structure"` + // The ARN of the AWS credentials. RoleARN *string `min:"1" type:"string" required:"true"` @@ -1516,6 +1672,10 @@ type RedshiftDestinationDescription struct { // The COPY command. CopyCommand *CopyCommand `type:"structure" required:"true"` + // Configures retry behavior in the event that Firehose is unable to deliver + // documents to Amazon Redshift. Default value is 3600 (60 minutes). + RetryOptions *RedshiftRetryOptions `type:"structure"` + // The ARN of the AWS credentials. RoleARN *string `min:"1" type:"string" required:"true"` @@ -1552,6 +1712,10 @@ type RedshiftDestinationUpdate struct { // The user password. Password *string `min:"6" type:"string"` + // Configures retry behavior in the event that Firehose is unable to deliver + // documents to Amazon Redshift. Default value is 3600 (60 minutes). + RetryOptions *RedshiftRetryOptions `type:"structure"` + // The ARN of the AWS credentials. RoleARN *string `min:"1" type:"string"` @@ -1608,6 +1772,29 @@ func (s *RedshiftDestinationUpdate) Validate() error { return nil } +// Configures retry behavior in the event that Firehose is unable to deliver +// documents to Amazon Redshift. +type RedshiftRetryOptions struct { + _ struct{} `type:"structure"` + + // The length of time during which Firehose retries delivery after a failure, + // starting from the initial request and including the first attempt. The default + // value is 3600 seconds (60 minutes). Firehose does not retry if the value + // of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer + // than the current value. + DurationInSeconds *int64 `type:"integer"` +} + +// String returns the string representation +func (s RedshiftRetryOptions) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedshiftRetryOptions) GoString() string { + return s.String() +} + // Describes the configuration of a destination in Amazon S3. type S3DestinationConfiguration struct { _ struct{} `type:"structure"` @@ -1638,7 +1825,7 @@ type S3DestinationConfiguration struct { // format prefix. Note that if the prefix ends with a slash, it appears as a // folder in the S3 bucket. For more information, see Amazon S3 Object Name // Format (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html) - // in the guide-fh-dev (http://docs.aws.amazon.com/firehose/latest/dev/). + // in the Amazon Kinesis Firehose Developer Guide (http://docs.aws.amazon.com/firehose/latest/dev/). Prefix *string `type:"string"` // The ARN of the AWS credentials. @@ -1713,7 +1900,7 @@ type S3DestinationDescription struct { // format prefix. Note that if the prefix ends with a slash, it appears as a // folder in the S3 bucket. For more information, see Amazon S3 Object Name // Format (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html) - // in the guide-fh-dev (http://docs.aws.amazon.com/firehose/latest/dev/). + // in the Amazon Kinesis Firehose Developer Guide (http://docs.aws.amazon.com/firehose/latest/dev/). Prefix *string `type:"string"` // The ARN of the AWS credentials. @@ -1760,7 +1947,7 @@ type S3DestinationUpdate struct { // format prefix. Note that if the prefix ends with a slash, it appears as a // folder in the S3 bucket. For more information, see Amazon S3 Object Name // Format (http://docs.aws.amazon.com/firehose/latest/dev/basic-deliver.html) - // in the guide-fh-dev (http://docs.aws.amazon.com/firehose/latest/dev/). + // in the Amazon Kinesis Firehose Developer Guide (http://docs.aws.amazon.com/firehose/latest/dev/). Prefix *string `type:"string"` // The ARN of the AWS credentials. diff --git a/vendor/github.com/aws/aws-sdk-go/service/firehose/service.go b/vendor/github.com/aws/aws-sdk-go/service/firehose/service.go index fcb419882..7fd7f37ad 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/firehose/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/firehose/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // Amazon Kinesis Firehose is a fully-managed service that delivers real-time @@ -62,7 +62,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/glacier/api.go b/vendor/github.com/aws/aws-sdk-go/service/glacier/api.go index dedf76f42..5ef73b115 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/glacier/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/glacier/api.go @@ -14,7 +14,28 @@ import ( const opAbortMultipartUpload = "AbortMultipartUpload" -// AbortMultipartUploadRequest generates a request for the AbortMultipartUpload operation. +// AbortMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the AbortMultipartUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AbortMultipartUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AbortMultipartUploadRequest method. +// req, resp := client.AbortMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Glacier) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { op := &request.Operation{ Name: opAbortMultipartUpload, @@ -62,7 +83,28 @@ func (c *Glacier) AbortMultipartUpload(input *AbortMultipartUploadInput) (*Abort const opAbortVaultLock = "AbortVaultLock" -// AbortVaultLockRequest generates a request for the AbortVaultLock operation. +// AbortVaultLockRequest generates a "aws/request.Request" representing the +// client's request for the AbortVaultLock operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AbortVaultLock method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AbortVaultLockRequest method. +// req, resp := client.AbortVaultLockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Glacier) AbortVaultLockRequest(input *AbortVaultLockInput) (req *request.Request, output *AbortVaultLockOutput) { op := &request.Operation{ Name: opAbortVaultLock, @@ -106,7 +148,28 @@ func (c *Glacier) AbortVaultLock(input *AbortVaultLockInput) (*AbortVaultLockOut const opAddTagsToVault = "AddTagsToVault" -// AddTagsToVaultRequest generates a request for the AddTagsToVault operation. +// AddTagsToVaultRequest generates a "aws/request.Request" representing the +// client's request for the AddTagsToVault operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddTagsToVault method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddTagsToVaultRequest method. +// req, resp := client.AddTagsToVaultRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Glacier) AddTagsToVaultRequest(input *AddTagsToVaultInput) (req *request.Request, output *AddTagsToVaultOutput) { op := &request.Operation{ Name: opAddTagsToVault, @@ -140,7 +203,28 @@ func (c *Glacier) AddTagsToVault(input *AddTagsToVaultInput) (*AddTagsToVaultOut const opCompleteMultipartUpload = "CompleteMultipartUpload" -// CompleteMultipartUploadRequest generates a request for the CompleteMultipartUpload operation. +// CompleteMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the CompleteMultipartUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CompleteMultipartUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CompleteMultipartUploadRequest method. +// req, resp := client.CompleteMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Glacier) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *ArchiveCreationOutput) { op := &request.Operation{ Name: opCompleteMultipartUpload, @@ -210,7 +294,28 @@ func (c *Glacier) CompleteMultipartUpload(input *CompleteMultipartUploadInput) ( const opCompleteVaultLock = "CompleteVaultLock" -// CompleteVaultLockRequest generates a request for the CompleteVaultLock operation. +// CompleteVaultLockRequest generates a "aws/request.Request" representing the +// client's request for the CompleteVaultLock operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CompleteVaultLock method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CompleteVaultLockRequest method. +// req, resp := client.CompleteVaultLockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Glacier) CompleteVaultLockRequest(input *CompleteVaultLockInput) (req *request.Request, output *CompleteVaultLockOutput) { op := &request.Operation{ Name: opCompleteVaultLock, @@ -253,7 +358,28 @@ func (c *Glacier) CompleteVaultLock(input *CompleteVaultLockInput) (*CompleteVau const opCreateVault = "CreateVault" -// CreateVaultRequest generates a request for the CreateVault operation. +// CreateVaultRequest generates a "aws/request.Request" representing the +// client's request for the CreateVault operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateVault method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateVaultRequest method. +// req, resp := client.CreateVaultRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Glacier) CreateVaultRequest(input *CreateVaultInput) (req *request.Request, output *CreateVaultOutput) { op := &request.Operation{ Name: opCreateVault, @@ -303,7 +429,28 @@ func (c *Glacier) CreateVault(input *CreateVaultInput) (*CreateVaultOutput, erro const opDeleteArchive = "DeleteArchive" -// DeleteArchiveRequest generates a request for the DeleteArchive operation. +// DeleteArchiveRequest generates a "aws/request.Request" representing the +// client's request for the DeleteArchive operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteArchive method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteArchiveRequest method. +// req, resp := client.DeleteArchiveRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Glacier) DeleteArchiveRequest(input *DeleteArchiveInput) (req *request.Request, output *DeleteArchiveOutput) { op := &request.Operation{ Name: opDeleteArchive, @@ -352,7 +499,28 @@ func (c *Glacier) DeleteArchive(input *DeleteArchiveInput) (*DeleteArchiveOutput const opDeleteVault = "DeleteVault" -// DeleteVaultRequest generates a request for the DeleteVault operation. +// DeleteVaultRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVault operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteVault method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteVaultRequest method. +// req, resp := client.DeleteVaultRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Glacier) DeleteVaultRequest(input *DeleteVaultInput) (req *request.Request, output *DeleteVaultOutput) { op := &request.Operation{ Name: opDeleteVault, @@ -403,7 +571,28 @@ func (c *Glacier) DeleteVault(input *DeleteVaultInput) (*DeleteVaultOutput, erro const opDeleteVaultAccessPolicy = "DeleteVaultAccessPolicy" -// DeleteVaultAccessPolicyRequest generates a request for the DeleteVaultAccessPolicy operation. +// DeleteVaultAccessPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVaultAccessPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteVaultAccessPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteVaultAccessPolicyRequest method. +// req, resp := client.DeleteVaultAccessPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Glacier) DeleteVaultAccessPolicyRequest(input *DeleteVaultAccessPolicyInput) (req *request.Request, output *DeleteVaultAccessPolicyOutput) { op := &request.Operation{ Name: opDeleteVaultAccessPolicy, @@ -441,7 +630,28 @@ func (c *Glacier) DeleteVaultAccessPolicy(input *DeleteVaultAccessPolicyInput) ( const opDeleteVaultNotifications = "DeleteVaultNotifications" -// DeleteVaultNotificationsRequest generates a request for the DeleteVaultNotifications operation. +// DeleteVaultNotificationsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVaultNotifications operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteVaultNotifications method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteVaultNotificationsRequest method. +// req, resp := client.DeleteVaultNotificationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Glacier) DeleteVaultNotificationsRequest(input *DeleteVaultNotificationsInput) (req *request.Request, output *DeleteVaultNotificationsOutput) { op := &request.Operation{ Name: opDeleteVaultNotifications, @@ -484,7 +694,28 @@ func (c *Glacier) DeleteVaultNotifications(input *DeleteVaultNotificationsInput) const opDescribeJob = "DescribeJob" -// DescribeJobRequest generates a request for the DescribeJob operation. +// DescribeJobRequest generates a "aws/request.Request" representing the +// client's request for the DescribeJob operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeJob method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeJobRequest method. +// req, resp := client.DescribeJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Glacier) DescribeJobRequest(input *DescribeJobInput) (req *request.Request, output *JobDescription) { op := &request.Operation{ Name: opDescribeJob, @@ -532,7 +763,28 @@ func (c *Glacier) DescribeJob(input *DescribeJobInput) (*JobDescription, error) const opDescribeVault = "DescribeVault" -// DescribeVaultRequest generates a request for the DescribeVault operation. +// DescribeVaultRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVault operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeVault method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeVaultRequest method. +// req, resp := client.DescribeVaultRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Glacier) DescribeVaultRequest(input *DescribeVaultInput) (req *request.Request, output *DescribeVaultOutput) { op := &request.Operation{ Name: opDescribeVault, @@ -578,7 +830,28 @@ func (c *Glacier) DescribeVault(input *DescribeVaultInput) (*DescribeVaultOutput const opGetDataRetrievalPolicy = "GetDataRetrievalPolicy" -// GetDataRetrievalPolicyRequest generates a request for the GetDataRetrievalPolicy operation. +// GetDataRetrievalPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetDataRetrievalPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetDataRetrievalPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetDataRetrievalPolicyRequest method. +// req, resp := client.GetDataRetrievalPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Glacier) GetDataRetrievalPolicyRequest(input *GetDataRetrievalPolicyInput) (req *request.Request, output *GetDataRetrievalPolicyOutput) { op := &request.Operation{ Name: opGetDataRetrievalPolicy, @@ -607,7 +880,28 @@ func (c *Glacier) GetDataRetrievalPolicy(input *GetDataRetrievalPolicyInput) (*G const opGetJobOutput = "GetJobOutput" -// GetJobOutputRequest generates a request for the GetJobOutput operation. +// GetJobOutputRequest generates a "aws/request.Request" representing the +// client's request for the GetJobOutput operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetJobOutput method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetJobOutputRequest method. +// req, resp := client.GetJobOutputRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Glacier) GetJobOutputRequest(input *GetJobOutputInput) (req *request.Request, output *GetJobOutputOutput) { op := &request.Operation{ Name: opGetJobOutput, @@ -677,7 +971,28 @@ func (c *Glacier) GetJobOutput(input *GetJobOutputInput) (*GetJobOutputOutput, e const opGetVaultAccessPolicy = "GetVaultAccessPolicy" -// GetVaultAccessPolicyRequest generates a request for the GetVaultAccessPolicy operation. +// GetVaultAccessPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetVaultAccessPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetVaultAccessPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetVaultAccessPolicyRequest method. +// req, resp := client.GetVaultAccessPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Glacier) GetVaultAccessPolicyRequest(input *GetVaultAccessPolicyInput) (req *request.Request, output *GetVaultAccessPolicyOutput) { op := &request.Operation{ Name: opGetVaultAccessPolicy, @@ -709,7 +1024,28 @@ func (c *Glacier) GetVaultAccessPolicy(input *GetVaultAccessPolicyInput) (*GetVa const opGetVaultLock = "GetVaultLock" -// GetVaultLockRequest generates a request for the GetVaultLock operation. +// GetVaultLockRequest generates a "aws/request.Request" representing the +// client's request for the GetVaultLock operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetVaultLock method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetVaultLockRequest method. +// req, resp := client.GetVaultLockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Glacier) GetVaultLockRequest(input *GetVaultLockInput) (req *request.Request, output *GetVaultLockOutput) { op := &request.Operation{ Name: opGetVaultLock, @@ -753,7 +1089,28 @@ func (c *Glacier) GetVaultLock(input *GetVaultLockInput) (*GetVaultLockOutput, e const opGetVaultNotifications = "GetVaultNotifications" -// GetVaultNotificationsRequest generates a request for the GetVaultNotifications operation. +// GetVaultNotificationsRequest generates a "aws/request.Request" representing the +// client's request for the GetVaultNotifications operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetVaultNotifications method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetVaultNotificationsRequest method. +// req, resp := client.GetVaultNotificationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Glacier) GetVaultNotificationsRequest(input *GetVaultNotificationsInput) (req *request.Request, output *GetVaultNotificationsOutput) { op := &request.Operation{ Name: opGetVaultNotifications, @@ -798,7 +1155,28 @@ func (c *Glacier) GetVaultNotifications(input *GetVaultNotificationsInput) (*Get const opInitiateJob = "InitiateJob" -// InitiateJobRequest generates a request for the InitiateJob operation. +// InitiateJobRequest generates a "aws/request.Request" representing the +// client's request for the InitiateJob operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InitiateJob method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InitiateJobRequest method. +// req, resp := client.InitiateJobRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Glacier) InitiateJobRequest(input *InitiateJobInput) (req *request.Request, output *InitiateJobOutput) { op := &request.Operation{ Name: opInitiateJob, @@ -940,7 +1318,28 @@ func (c *Glacier) InitiateJob(input *InitiateJobInput) (*InitiateJobOutput, erro const opInitiateMultipartUpload = "InitiateMultipartUpload" -// InitiateMultipartUploadRequest generates a request for the InitiateMultipartUpload operation. +// InitiateMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the InitiateMultipartUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InitiateMultipartUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InitiateMultipartUploadRequest method. +// req, resp := client.InitiateMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Glacier) InitiateMultipartUploadRequest(input *InitiateMultipartUploadInput) (req *request.Request, output *InitiateMultipartUploadOutput) { op := &request.Operation{ Name: opInitiateMultipartUpload, @@ -1001,7 +1400,28 @@ func (c *Glacier) InitiateMultipartUpload(input *InitiateMultipartUploadInput) ( const opInitiateVaultLock = "InitiateVaultLock" -// InitiateVaultLockRequest generates a request for the InitiateVaultLock operation. +// InitiateVaultLockRequest generates a "aws/request.Request" representing the +// client's request for the InitiateVaultLock operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InitiateVaultLock method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InitiateVaultLockRequest method. +// req, resp := client.InitiateVaultLockRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Glacier) InitiateVaultLockRequest(input *InitiateVaultLockInput) (req *request.Request, output *InitiateVaultLockOutput) { op := &request.Operation{ Name: opInitiateVaultLock, @@ -1055,7 +1475,28 @@ func (c *Glacier) InitiateVaultLock(input *InitiateVaultLockInput) (*InitiateVau const opListJobs = "ListJobs" -// ListJobsRequest generates a request for the ListJobs operation. +// ListJobsRequest generates a "aws/request.Request" representing the +// client's request for the ListJobs operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListJobs method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListJobsRequest method. +// req, resp := client.ListJobsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Glacier) ListJobsRequest(input *ListJobsInput) (req *request.Request, output *ListJobsOutput) { op := &request.Operation{ Name: opListJobs, @@ -1125,6 +1566,23 @@ func (c *Glacier) ListJobs(input *ListJobsInput) (*ListJobsOutput, error) { return out, err } +// ListJobsPages iterates over the pages of a ListJobs operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListJobs method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListJobs operation. +// pageNum := 0 +// err := client.ListJobsPages(params, +// func(page *ListJobsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *Glacier) ListJobsPages(input *ListJobsInput, fn func(p *ListJobsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListJobsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1135,7 +1593,28 @@ func (c *Glacier) ListJobsPages(input *ListJobsInput, fn func(p *ListJobsOutput, const opListMultipartUploads = "ListMultipartUploads" -// ListMultipartUploadsRequest generates a request for the ListMultipartUploads operation. +// ListMultipartUploadsRequest generates a "aws/request.Request" representing the +// client's request for the ListMultipartUploads operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListMultipartUploads method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListMultipartUploadsRequest method. +// req, resp := client.ListMultipartUploadsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Glacier) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { op := &request.Operation{ Name: opListMultipartUploads, @@ -1195,6 +1674,23 @@ func (c *Glacier) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListM return out, err } +// ListMultipartUploadsPages iterates over the pages of a ListMultipartUploads operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListMultipartUploads method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListMultipartUploads operation. +// pageNum := 0 +// err := client.ListMultipartUploadsPages(params, +// func(page *ListMultipartUploadsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *Glacier) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(p *ListMultipartUploadsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListMultipartUploadsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1205,7 +1701,28 @@ func (c *Glacier) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn const opListParts = "ListParts" -// ListPartsRequest generates a request for the ListParts operation. +// ListPartsRequest generates a "aws/request.Request" representing the +// client's request for the ListParts operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListParts method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListPartsRequest method. +// req, resp := client.ListPartsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Glacier) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { op := &request.Operation{ Name: opListParts, @@ -1259,6 +1776,23 @@ func (c *Glacier) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { return out, err } +// ListPartsPages iterates over the pages of a ListParts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListParts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListParts operation. +// pageNum := 0 +// err := client.ListPartsPages(params, +// func(page *ListPartsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *Glacier) ListPartsPages(input *ListPartsInput, fn func(p *ListPartsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListPartsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1269,7 +1803,28 @@ func (c *Glacier) ListPartsPages(input *ListPartsInput, fn func(p *ListPartsOutp const opListTagsForVault = "ListTagsForVault" -// ListTagsForVaultRequest generates a request for the ListTagsForVault operation. +// ListTagsForVaultRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForVault operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTagsForVault method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTagsForVaultRequest method. +// req, resp := client.ListTagsForVaultRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Glacier) ListTagsForVaultRequest(input *ListTagsForVaultInput) (req *request.Request, output *ListTagsForVaultOutput) { op := &request.Operation{ Name: opListTagsForVault, @@ -1298,7 +1853,28 @@ func (c *Glacier) ListTagsForVault(input *ListTagsForVaultInput) (*ListTagsForVa const opListVaults = "ListVaults" -// ListVaultsRequest generates a request for the ListVaults operation. +// ListVaultsRequest generates a "aws/request.Request" representing the +// client's request for the ListVaults operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListVaults method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListVaultsRequest method. +// req, resp := client.ListVaultsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Glacier) ListVaultsRequest(input *ListVaultsInput) (req *request.Request, output *ListVaultsOutput) { op := &request.Operation{ Name: opListVaults, @@ -1350,6 +1926,23 @@ func (c *Glacier) ListVaults(input *ListVaultsInput) (*ListVaultsOutput, error) return out, err } +// ListVaultsPages iterates over the pages of a ListVaults operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListVaults method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListVaults operation. +// pageNum := 0 +// err := client.ListVaultsPages(params, +// func(page *ListVaultsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *Glacier) ListVaultsPages(input *ListVaultsInput, fn func(p *ListVaultsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListVaultsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1360,7 +1953,28 @@ func (c *Glacier) ListVaultsPages(input *ListVaultsInput, fn func(p *ListVaultsO const opRemoveTagsFromVault = "RemoveTagsFromVault" -// RemoveTagsFromVaultRequest generates a request for the RemoveTagsFromVault operation. +// RemoveTagsFromVaultRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTagsFromVault operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveTagsFromVault method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveTagsFromVaultRequest method. +// req, resp := client.RemoveTagsFromVaultRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Glacier) RemoveTagsFromVaultRequest(input *RemoveTagsFromVaultInput) (req *request.Request, output *RemoveTagsFromVaultOutput) { op := &request.Operation{ Name: opRemoveTagsFromVault, @@ -1393,7 +2007,28 @@ func (c *Glacier) RemoveTagsFromVault(input *RemoveTagsFromVaultInput) (*RemoveT const opSetDataRetrievalPolicy = "SetDataRetrievalPolicy" -// SetDataRetrievalPolicyRequest generates a request for the SetDataRetrievalPolicy operation. +// SetDataRetrievalPolicyRequest generates a "aws/request.Request" representing the +// client's request for the SetDataRetrievalPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetDataRetrievalPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetDataRetrievalPolicyRequest method. +// req, resp := client.SetDataRetrievalPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Glacier) SetDataRetrievalPolicyRequest(input *SetDataRetrievalPolicyInput) (req *request.Request, output *SetDataRetrievalPolicyOutput) { op := &request.Operation{ Name: opSetDataRetrievalPolicy, @@ -1428,7 +2063,28 @@ func (c *Glacier) SetDataRetrievalPolicy(input *SetDataRetrievalPolicyInput) (*S const opSetVaultAccessPolicy = "SetVaultAccessPolicy" -// SetVaultAccessPolicyRequest generates a request for the SetVaultAccessPolicy operation. +// SetVaultAccessPolicyRequest generates a "aws/request.Request" representing the +// client's request for the SetVaultAccessPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetVaultAccessPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetVaultAccessPolicyRequest method. +// req, resp := client.SetVaultAccessPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Glacier) SetVaultAccessPolicyRequest(input *SetVaultAccessPolicyInput) (req *request.Request, output *SetVaultAccessPolicyOutput) { op := &request.Operation{ Name: opSetVaultAccessPolicy, @@ -1463,7 +2119,28 @@ func (c *Glacier) SetVaultAccessPolicy(input *SetVaultAccessPolicyInput) (*SetVa const opSetVaultNotifications = "SetVaultNotifications" -// SetVaultNotificationsRequest generates a request for the SetVaultNotifications operation. +// SetVaultNotificationsRequest generates a "aws/request.Request" representing the +// client's request for the SetVaultNotifications operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetVaultNotifications method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetVaultNotificationsRequest method. +// req, resp := client.SetVaultNotificationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Glacier) SetVaultNotificationsRequest(input *SetVaultNotificationsInput) (req *request.Request, output *SetVaultNotificationsOutput) { op := &request.Operation{ Name: opSetVaultNotifications, @@ -1520,7 +2197,28 @@ func (c *Glacier) SetVaultNotifications(input *SetVaultNotificationsInput) (*Set const opUploadArchive = "UploadArchive" -// UploadArchiveRequest generates a request for the UploadArchive operation. +// UploadArchiveRequest generates a "aws/request.Request" representing the +// client's request for the UploadArchive operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UploadArchive method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UploadArchiveRequest method. +// req, resp := client.UploadArchiveRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Glacier) UploadArchiveRequest(input *UploadArchiveInput) (req *request.Request, output *ArchiveCreationOutput) { op := &request.Operation{ Name: opUploadArchive, @@ -1582,7 +2280,28 @@ func (c *Glacier) UploadArchive(input *UploadArchiveInput) (*ArchiveCreationOutp const opUploadMultipartPart = "UploadMultipartPart" -// UploadMultipartPartRequest generates a request for the UploadMultipartPart operation. +// UploadMultipartPartRequest generates a "aws/request.Request" representing the +// client's request for the UploadMultipartPart operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UploadMultipartPart method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UploadMultipartPartRequest method. +// req, resp := client.UploadMultipartPartRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Glacier) UploadMultipartPartRequest(input *UploadMultipartPartInput) (req *request.Request, output *UploadMultipartPartOutput) { op := &request.Operation{ Name: opUploadMultipartPart, diff --git a/vendor/github.com/aws/aws-sdk-go/service/glacier/service.go b/vendor/github.com/aws/aws-sdk-go/service/glacier/service.go index b83c63a61..a318653d0 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/glacier/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/glacier/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/restjson" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // Amazon Glacier is a storage solution for "cold data." @@ -88,7 +88,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/iam/api.go b/vendor/github.com/aws/aws-sdk-go/service/iam/api.go index 0a1b93adc..35efd0338 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/iam/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/iam/api.go @@ -15,7 +15,28 @@ import ( const opAddClientIDToOpenIDConnectProvider = "AddClientIDToOpenIDConnectProvider" -// AddClientIDToOpenIDConnectProviderRequest generates a request for the AddClientIDToOpenIDConnectProvider operation. +// AddClientIDToOpenIDConnectProviderRequest generates a "aws/request.Request" representing the +// client's request for the AddClientIDToOpenIDConnectProvider operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddClientIDToOpenIDConnectProvider method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddClientIDToOpenIDConnectProviderRequest method. +// req, resp := client.AddClientIDToOpenIDConnectProviderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) AddClientIDToOpenIDConnectProviderRequest(input *AddClientIDToOpenIDConnectProviderInput) (req *request.Request, output *AddClientIDToOpenIDConnectProviderOutput) { op := &request.Operation{ Name: opAddClientIDToOpenIDConnectProvider, @@ -36,7 +57,7 @@ func (c *IAM) AddClientIDToOpenIDConnectProviderRequest(input *AddClientIDToOpen } // Adds a new client ID (also known as audience) to the list of client IDs already -// registered for the specified IAM OpenID Connect provider. +// registered for the specified IAM OpenID Connect (OIDC) provider resource. // // This action is idempotent; it does not fail or return an error if you add // an existing client ID to the provider. @@ -48,7 +69,28 @@ func (c *IAM) AddClientIDToOpenIDConnectProvider(input *AddClientIDToOpenIDConne const opAddRoleToInstanceProfile = "AddRoleToInstanceProfile" -// AddRoleToInstanceProfileRequest generates a request for the AddRoleToInstanceProfile operation. +// AddRoleToInstanceProfileRequest generates a "aws/request.Request" representing the +// client's request for the AddRoleToInstanceProfile operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddRoleToInstanceProfile method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddRoleToInstanceProfileRequest method. +// req, resp := client.AddRoleToInstanceProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) AddRoleToInstanceProfileRequest(input *AddRoleToInstanceProfileInput) (req *request.Request, output *AddRoleToInstanceProfileOutput) { op := &request.Operation{ Name: opAddRoleToInstanceProfile, @@ -68,8 +110,12 @@ func (c *IAM) AddRoleToInstanceProfileRequest(input *AddRoleToInstanceProfileInp return } -// Adds the specified role to the specified instance profile. For more information -// about roles, go to Working with Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// Adds the specified IAM role to the specified instance profile. +// +// The caller of this API must be granted the PassRole permission on the IAM +// role by a permission policy. +// +// For more information about roles, go to Working with Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). // For more information about instance profiles, go to About Instance Profiles // (http://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). func (c *IAM) AddRoleToInstanceProfile(input *AddRoleToInstanceProfileInput) (*AddRoleToInstanceProfileOutput, error) { @@ -80,7 +126,28 @@ func (c *IAM) AddRoleToInstanceProfile(input *AddRoleToInstanceProfileInput) (*A const opAddUserToGroup = "AddUserToGroup" -// AddUserToGroupRequest generates a request for the AddUserToGroup operation. +// AddUserToGroupRequest generates a "aws/request.Request" representing the +// client's request for the AddUserToGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddUserToGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddUserToGroupRequest method. +// req, resp := client.AddUserToGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) AddUserToGroupRequest(input *AddUserToGroupInput) (req *request.Request, output *AddUserToGroupOutput) { op := &request.Operation{ Name: opAddUserToGroup, @@ -109,7 +176,28 @@ func (c *IAM) AddUserToGroup(input *AddUserToGroupInput) (*AddUserToGroupOutput, const opAttachGroupPolicy = "AttachGroupPolicy" -// AttachGroupPolicyRequest generates a request for the AttachGroupPolicy operation. +// AttachGroupPolicyRequest generates a "aws/request.Request" representing the +// client's request for the AttachGroupPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AttachGroupPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AttachGroupPolicyRequest method. +// req, resp := client.AttachGroupPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) AttachGroupPolicyRequest(input *AttachGroupPolicyInput) (req *request.Request, output *AttachGroupPolicyOutput) { op := &request.Operation{ Name: opAttachGroupPolicy, @@ -129,13 +217,13 @@ func (c *IAM) AttachGroupPolicyRequest(input *AttachGroupPolicyInput) (req *requ return } -// Attaches the specified managed policy to the specified group. +// Attaches the specified managed policy to the specified IAM group. // // You use this API to attach a managed policy to a group. To embed an inline // policy in a group, use PutGroupPolicy. // -// For more information about policies, refer to Managed Policies and Inline -// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// For more information about policies, see Managed Policies and Inline Policies +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) // in the IAM User Guide. func (c *IAM) AttachGroupPolicy(input *AttachGroupPolicyInput) (*AttachGroupPolicyOutput, error) { req, out := c.AttachGroupPolicyRequest(input) @@ -145,7 +233,28 @@ func (c *IAM) AttachGroupPolicy(input *AttachGroupPolicyInput) (*AttachGroupPoli const opAttachRolePolicy = "AttachRolePolicy" -// AttachRolePolicyRequest generates a request for the AttachRolePolicy operation. +// AttachRolePolicyRequest generates a "aws/request.Request" representing the +// client's request for the AttachRolePolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AttachRolePolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AttachRolePolicyRequest method. +// req, resp := client.AttachRolePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) AttachRolePolicyRequest(input *AttachRolePolicyInput) (req *request.Request, output *AttachRolePolicyOutput) { op := &request.Operation{ Name: opAttachRolePolicy, @@ -165,17 +274,17 @@ func (c *IAM) AttachRolePolicyRequest(input *AttachRolePolicyInput) (req *reques return } -// Attaches the specified managed policy to the specified role. +// Attaches the specified managed policy to the specified IAM role. // -// When you attach a managed policy to a role, the managed policy is used as -// the role's access (permissions) policy. You cannot use a managed policy as -// the role's trust policy. The role's trust policy is created at the same time -// as the role, using CreateRole. You can update a role's trust policy using -// UpdateAssumeRolePolicy. +// When you attach a managed policy to a role, the managed policy becomes part +// of the role's permission (access) policy. You cannot use a managed policy +// as the role's trust policy. The role's trust policy is created at the same +// time as the role, using CreateRole. You can update a role's trust policy +// using UpdateAssumeRolePolicy. // // Use this API to attach a managed policy to a role. To embed an inline policy -// in a role, use PutRolePolicy. For more information about policies, refer -// to Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in a role, use PutRolePolicy. For more information about policies, see Managed +// Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) // in the IAM User Guide. func (c *IAM) AttachRolePolicy(input *AttachRolePolicyInput) (*AttachRolePolicyOutput, error) { req, out := c.AttachRolePolicyRequest(input) @@ -185,7 +294,28 @@ func (c *IAM) AttachRolePolicy(input *AttachRolePolicyInput) (*AttachRolePolicyO const opAttachUserPolicy = "AttachUserPolicy" -// AttachUserPolicyRequest generates a request for the AttachUserPolicy operation. +// AttachUserPolicyRequest generates a "aws/request.Request" representing the +// client's request for the AttachUserPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AttachUserPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AttachUserPolicyRequest method. +// req, resp := client.AttachUserPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) AttachUserPolicyRequest(input *AttachUserPolicyInput) (req *request.Request, output *AttachUserPolicyOutput) { op := &request.Operation{ Name: opAttachUserPolicy, @@ -210,8 +340,8 @@ func (c *IAM) AttachUserPolicyRequest(input *AttachUserPolicyInput) (req *reques // You use this API to attach a managed policy to a user. To embed an inline // policy in a user, use PutUserPolicy. // -// For more information about policies, refer to Managed Policies and Inline -// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// For more information about policies, see Managed Policies and Inline Policies +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) // in the IAM User Guide. func (c *IAM) AttachUserPolicy(input *AttachUserPolicyInput) (*AttachUserPolicyOutput, error) { req, out := c.AttachUserPolicyRequest(input) @@ -221,7 +351,28 @@ func (c *IAM) AttachUserPolicy(input *AttachUserPolicyInput) (*AttachUserPolicyO const opChangePassword = "ChangePassword" -// ChangePasswordRequest generates a request for the ChangePassword operation. +// ChangePasswordRequest generates a "aws/request.Request" representing the +// client's request for the ChangePassword operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ChangePassword method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ChangePasswordRequest method. +// req, resp := client.ChangePasswordRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) ChangePasswordRequest(input *ChangePasswordInput) (req *request.Request, output *ChangePasswordOutput) { op := &request.Operation{ Name: opChangePassword, @@ -255,7 +406,28 @@ func (c *IAM) ChangePassword(input *ChangePasswordInput) (*ChangePasswordOutput, const opCreateAccessKey = "CreateAccessKey" -// CreateAccessKeyRequest generates a request for the CreateAccessKey operation. +// CreateAccessKeyRequest generates a "aws/request.Request" representing the +// client's request for the CreateAccessKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateAccessKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateAccessKeyRequest method. +// req, resp := client.CreateAccessKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) CreateAccessKeyRequest(input *CreateAccessKeyInput) (req *request.Request, output *CreateAccessKeyOutput) { op := &request.Operation{ Name: opCreateAccessKey, @@ -276,7 +448,7 @@ func (c *IAM) CreateAccessKeyRequest(input *CreateAccessKeyInput) (req *request. // Creates a new AWS secret access key and corresponding AWS access key ID for // the specified user. The default status for new keys is Active. // -// If you do not specify a user name, IAM determines the user name implicitly +// If you do not specify a user name, IAM determines the user name implicitly // based on the AWS access key ID signing the request. Because this action works // for access keys under the AWS account, you can use this action to manage // root credentials even if the AWS account has no associated users. @@ -298,7 +470,28 @@ func (c *IAM) CreateAccessKey(input *CreateAccessKeyInput) (*CreateAccessKeyOutp const opCreateAccountAlias = "CreateAccountAlias" -// CreateAccountAliasRequest generates a request for the CreateAccountAlias operation. +// CreateAccountAliasRequest generates a "aws/request.Request" representing the +// client's request for the CreateAccountAlias operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateAccountAlias method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateAccountAliasRequest method. +// req, resp := client.CreateAccountAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) CreateAccountAliasRequest(input *CreateAccountAliasInput) (req *request.Request, output *CreateAccountAliasOutput) { op := &request.Operation{ Name: opCreateAccountAlias, @@ -329,7 +522,28 @@ func (c *IAM) CreateAccountAlias(input *CreateAccountAliasInput) (*CreateAccount const opCreateGroup = "CreateGroup" -// CreateGroupRequest generates a request for the CreateGroup operation. +// CreateGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateGroupRequest method. +// req, resp := client.CreateGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) CreateGroupRequest(input *CreateGroupInput) (req *request.Request, output *CreateGroupOutput) { op := &request.Operation{ Name: opCreateGroup, @@ -360,7 +574,28 @@ func (c *IAM) CreateGroup(input *CreateGroupInput) (*CreateGroupOutput, error) { const opCreateInstanceProfile = "CreateInstanceProfile" -// CreateInstanceProfileRequest generates a request for the CreateInstanceProfile operation. +// CreateInstanceProfileRequest generates a "aws/request.Request" representing the +// client's request for the CreateInstanceProfile operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateInstanceProfile method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateInstanceProfileRequest method. +// req, resp := client.CreateInstanceProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) CreateInstanceProfileRequest(input *CreateInstanceProfileInput) (req *request.Request, output *CreateInstanceProfileOutput) { op := &request.Operation{ Name: opCreateInstanceProfile, @@ -392,7 +627,28 @@ func (c *IAM) CreateInstanceProfile(input *CreateInstanceProfileInput) (*CreateI const opCreateLoginProfile = "CreateLoginProfile" -// CreateLoginProfileRequest generates a request for the CreateLoginProfile operation. +// CreateLoginProfileRequest generates a "aws/request.Request" representing the +// client's request for the CreateLoginProfile operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateLoginProfile method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateLoginProfileRequest method. +// req, resp := client.CreateLoginProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) CreateLoginProfileRequest(input *CreateLoginProfileInput) (req *request.Request, output *CreateLoginProfileOutput) { op := &request.Operation{ Name: opCreateLoginProfile, @@ -413,7 +669,7 @@ func (c *IAM) CreateLoginProfileRequest(input *CreateLoginProfileInput) (req *re // Creates a password for the specified user, giving the user the ability to // access AWS services through the AWS Management Console. For more information // about managing passwords, see Managing Passwords (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingLogins.html) -// in the Using IAM guide. +// in the IAM User Guide. func (c *IAM) CreateLoginProfile(input *CreateLoginProfileInput) (*CreateLoginProfileOutput, error) { req, out := c.CreateLoginProfileRequest(input) err := req.Send() @@ -422,7 +678,28 @@ func (c *IAM) CreateLoginProfile(input *CreateLoginProfileInput) (*CreateLoginPr const opCreateOpenIDConnectProvider = "CreateOpenIDConnectProvider" -// CreateOpenIDConnectProviderRequest generates a request for the CreateOpenIDConnectProvider operation. +// CreateOpenIDConnectProviderRequest generates a "aws/request.Request" representing the +// client's request for the CreateOpenIDConnectProvider operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateOpenIDConnectProvider method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateOpenIDConnectProviderRequest method. +// req, resp := client.CreateOpenIDConnectProviderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) CreateOpenIDConnectProviderRequest(input *CreateOpenIDConnectProviderInput) (req *request.Request, output *CreateOpenIDConnectProviderOutput) { op := &request.Operation{ Name: opCreateOpenIDConnectProvider, @@ -454,9 +731,9 @@ func (c *IAM) CreateOpenIDConnectProviderRequest(input *CreateOpenIDConnectProvi // that the IdP uses. You get all of this information from the OIDC IdP that // you want to use for access to AWS. // -// Because trust for the OIDC provider is ultimately derived from the IAM provider -// that this action creates, it is a best practice to limit access to the CreateOpenIDConnectProvider -// action to highly-privileged users. +// Because trust for the OIDC provider is ultimately derived from the IAM +// provider that this action creates, it is a best practice to limit access +// to the CreateOpenIDConnectProvider action to highly-privileged users. func (c *IAM) CreateOpenIDConnectProvider(input *CreateOpenIDConnectProviderInput) (*CreateOpenIDConnectProviderOutput, error) { req, out := c.CreateOpenIDConnectProviderRequest(input) err := req.Send() @@ -465,7 +742,28 @@ func (c *IAM) CreateOpenIDConnectProvider(input *CreateOpenIDConnectProviderInpu const opCreatePolicy = "CreatePolicy" -// CreatePolicyRequest generates a request for the CreatePolicy operation. +// CreatePolicyRequest generates a "aws/request.Request" representing the +// client's request for the CreatePolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreatePolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreatePolicyRequest method. +// req, resp := client.CreatePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) CreatePolicyRequest(input *CreatePolicyInput) (req *request.Request, output *CreatePolicyOutput) { op := &request.Operation{ Name: opCreatePolicy, @@ -490,8 +788,8 @@ func (c *IAM) CreatePolicyRequest(input *CreatePolicyInput) (req *request.Reques // versions, see Versioning for Managed Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) // in the IAM User Guide. // -// For more information about managed policies in general, refer to Managed -// Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// For more information about managed policies in general, see Managed Policies +// and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) // in the IAM User Guide. func (c *IAM) CreatePolicy(input *CreatePolicyInput) (*CreatePolicyOutput, error) { req, out := c.CreatePolicyRequest(input) @@ -501,7 +799,28 @@ func (c *IAM) CreatePolicy(input *CreatePolicyInput) (*CreatePolicyOutput, error const opCreatePolicyVersion = "CreatePolicyVersion" -// CreatePolicyVersionRequest generates a request for the CreatePolicyVersion operation. +// CreatePolicyVersionRequest generates a "aws/request.Request" representing the +// client's request for the CreatePolicyVersion operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreatePolicyVersion method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreatePolicyVersionRequest method. +// req, resp := client.CreatePolicyVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) CreatePolicyVersionRequest(input *CreatePolicyVersionInput) (req *request.Request, output *CreatePolicyVersionOutput) { op := &request.Operation{ Name: opCreatePolicyVersion, @@ -525,9 +844,8 @@ func (c *IAM) CreatePolicyVersionRequest(input *CreatePolicyVersionInput) (req * // version using DeletePolicyVersion before you create a new version. // // Optionally, you can set the new version as the policy's default version. -// The default version is the operative version; that is, the version that is -// in effect for the IAM users, groups, and roles that the policy is attached -// to. +// The default version is the version that is in effect for the IAM users, groups, +// and roles to which the policy is attached. // // For more information about managed policy versions, see Versioning for Managed // Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) @@ -540,7 +858,28 @@ func (c *IAM) CreatePolicyVersion(input *CreatePolicyVersionInput) (*CreatePolic const opCreateRole = "CreateRole" -// CreateRoleRequest generates a request for the CreateRole operation. +// CreateRoleRequest generates a "aws/request.Request" representing the +// client's request for the CreateRole operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateRole method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateRoleRequest method. +// req, resp := client.CreateRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) CreateRoleRequest(input *CreateRoleInput) (req *request.Request, output *CreateRoleOutput) { op := &request.Operation{ Name: opCreateRole, @@ -571,7 +910,28 @@ func (c *IAM) CreateRole(input *CreateRoleInput) (*CreateRoleOutput, error) { const opCreateSAMLProvider = "CreateSAMLProvider" -// CreateSAMLProviderRequest generates a request for the CreateSAMLProvider operation. +// CreateSAMLProviderRequest generates a "aws/request.Request" representing the +// client's request for the CreateSAMLProvider operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateSAMLProvider method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateSAMLProviderRequest method. +// req, resp := client.CreateSAMLProviderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) CreateSAMLProviderRequest(input *CreateSAMLProviderInput) (req *request.Request, output *CreateSAMLProviderOutput) { op := &request.Operation{ Name: opCreateSAMLProvider, @@ -589,25 +949,26 @@ func (c *IAM) CreateSAMLProviderRequest(input *CreateSAMLProviderInput) (req *re return } -// Creates an IAM entity to describe an identity provider (IdP) that supports +// Creates an IAM resource that describes an identity provider (IdP) that supports // SAML 2.0. // -// The SAML provider that you create with this operation can be used as a -// principal in a role's trust policy to establish a trust relationship between -// AWS and a SAML identity provider. You can create an IAM role that supports -// Web-based single sign-on (SSO) to the AWS Management Console or one that -// supports API access to AWS. +// The SAML provider resource that you create with this operation can be used +// as a principal in an IAM role's trust policy to enable federated users who +// sign-in using the SAML IdP to assume the role. You can create an IAM role +// that supports Web-based single sign-on (SSO) to the AWS Management Console +// or one that supports API access to AWS. // -// When you create the SAML provider, you upload an a SAML metadata document -// that you get from your IdP and that includes the issuer's name, expiration -// information, and keys that can be used to validate the SAML authentication -// response (assertions) that are received from the IdP. You must generate the -// metadata document using the identity management software that is used as -// your organization's IdP. +// When you create the SAML provider resource, you upload an a SAML metadata +// document that you get from your IdP and that includes the issuer's name, +// expiration information, and keys that can be used to validate the SAML authentication +// response (assertions) that the IdP sends. You must generate the metadata +// document using the identity management software that is used as your organization's +// IdP. // -// This operation requires Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). -// For more information, see Enabling SAML 2.0 Federated Users to Access the -// AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-saml.html) +// This operation requires Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// +// For more information, see Enabling SAML 2.0 Federated Users to Access +// the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-saml.html) // and About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) // in the IAM User Guide. func (c *IAM) CreateSAMLProvider(input *CreateSAMLProviderInput) (*CreateSAMLProviderOutput, error) { @@ -618,7 +979,28 @@ func (c *IAM) CreateSAMLProvider(input *CreateSAMLProviderInput) (*CreateSAMLPro const opCreateUser = "CreateUser" -// CreateUserRequest generates a request for the CreateUser operation. +// CreateUserRequest generates a "aws/request.Request" representing the +// client's request for the CreateUser operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateUser method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateUserRequest method. +// req, resp := client.CreateUserRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) CreateUserRequest(input *CreateUserInput) (req *request.Request, output *CreateUserOutput) { op := &request.Operation{ Name: opCreateUser, @@ -636,9 +1018,9 @@ func (c *IAM) CreateUserRequest(input *CreateUserInput) (req *request.Request, o return } -// Creates a new user for your AWS account. +// Creates a new IAM user for your AWS account. // -// For information about limitations on the number of users you can create, +// For information about limitations on the number of IAM users you can create, // see Limitations on IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) // in the IAM User Guide. func (c *IAM) CreateUser(input *CreateUserInput) (*CreateUserOutput, error) { @@ -649,7 +1031,28 @@ func (c *IAM) CreateUser(input *CreateUserInput) (*CreateUserOutput, error) { const opCreateVirtualMFADevice = "CreateVirtualMFADevice" -// CreateVirtualMFADeviceRequest generates a request for the CreateVirtualMFADevice operation. +// CreateVirtualMFADeviceRequest generates a "aws/request.Request" representing the +// client's request for the CreateVirtualMFADevice operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateVirtualMFADevice method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateVirtualMFADeviceRequest method. +// req, resp := client.CreateVirtualMFADeviceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) CreateVirtualMFADeviceRequest(input *CreateVirtualMFADeviceInput) (req *request.Request, output *CreateVirtualMFADeviceOutput) { op := &request.Operation{ Name: opCreateVirtualMFADevice, @@ -671,13 +1074,13 @@ func (c *IAM) CreateVirtualMFADeviceRequest(input *CreateVirtualMFADeviceInput) // virtual MFA, use EnableMFADevice to attach the MFA device to an IAM user. // For more information about creating and working with virtual MFA devices, // go to Using a Virtual MFA Device (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_VirtualMFA.html) -// in the Using IAM guide. +// in the IAM User Guide. // // For information about limits on the number of MFA devices you can create, // see Limitations on Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) -// in the Using IAM guide. +// in the IAM User Guide. // -// The seed information contained in the QR code and the Base32 string should +// The seed information contained in the QR code and the Base32 string should // be treated like any other secret access information, such as your AWS access // keys or your passwords. After you provision your virtual device, you should // ensure that the information is destroyed following secure procedures. @@ -689,7 +1092,28 @@ func (c *IAM) CreateVirtualMFADevice(input *CreateVirtualMFADeviceInput) (*Creat const opDeactivateMFADevice = "DeactivateMFADevice" -// DeactivateMFADeviceRequest generates a request for the DeactivateMFADevice operation. +// DeactivateMFADeviceRequest generates a "aws/request.Request" representing the +// client's request for the DeactivateMFADevice operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeactivateMFADevice method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeactivateMFADeviceRequest method. +// req, resp := client.DeactivateMFADeviceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) DeactivateMFADeviceRequest(input *DeactivateMFADeviceInput) (req *request.Request, output *DeactivateMFADeviceOutput) { op := &request.Operation{ Name: opDeactivateMFADevice, @@ -714,7 +1138,7 @@ func (c *IAM) DeactivateMFADeviceRequest(input *DeactivateMFADeviceInput) (req * // // For more information about creating and working with virtual MFA devices, // go to Using a Virtual MFA Device (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_VirtualMFA.html) -// in the Using IAM guide. +// in the IAM User Guide. func (c *IAM) DeactivateMFADevice(input *DeactivateMFADeviceInput) (*DeactivateMFADeviceOutput, error) { req, out := c.DeactivateMFADeviceRequest(input) err := req.Send() @@ -723,7 +1147,28 @@ func (c *IAM) DeactivateMFADevice(input *DeactivateMFADeviceInput) (*DeactivateM const opDeleteAccessKey = "DeleteAccessKey" -// DeleteAccessKeyRequest generates a request for the DeleteAccessKey operation. +// DeleteAccessKeyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAccessKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteAccessKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteAccessKeyRequest method. +// req, resp := client.DeleteAccessKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) DeleteAccessKeyRequest(input *DeleteAccessKeyInput) (req *request.Request, output *DeleteAccessKeyOutput) { op := &request.Operation{ Name: opDeleteAccessKey, @@ -743,9 +1188,9 @@ func (c *IAM) DeleteAccessKeyRequest(input *DeleteAccessKeyInput) (req *request. return } -// Deletes the access key associated with the specified user. +// Deletes the access key pair associated with the specified IAM user. // -// If you do not specify a user name, IAM determines the user name implicitly +// If you do not specify a user name, IAM determines the user name implicitly // based on the AWS access key ID signing the request. Because this action works // for access keys under the AWS account, you can use this action to manage // root credentials even if the AWS account has no associated users. @@ -757,7 +1202,28 @@ func (c *IAM) DeleteAccessKey(input *DeleteAccessKeyInput) (*DeleteAccessKeyOutp const opDeleteAccountAlias = "DeleteAccountAlias" -// DeleteAccountAliasRequest generates a request for the DeleteAccountAlias operation. +// DeleteAccountAliasRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAccountAlias operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteAccountAlias method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteAccountAliasRequest method. +// req, resp := client.DeleteAccountAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) DeleteAccountAliasRequest(input *DeleteAccountAliasInput) (req *request.Request, output *DeleteAccountAliasOutput) { op := &request.Operation{ Name: opDeleteAccountAlias, @@ -788,7 +1254,28 @@ func (c *IAM) DeleteAccountAlias(input *DeleteAccountAliasInput) (*DeleteAccount const opDeleteAccountPasswordPolicy = "DeleteAccountPasswordPolicy" -// DeleteAccountPasswordPolicyRequest generates a request for the DeleteAccountPasswordPolicy operation. +// DeleteAccountPasswordPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAccountPasswordPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteAccountPasswordPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteAccountPasswordPolicyRequest method. +// req, resp := client.DeleteAccountPasswordPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) DeleteAccountPasswordPolicyRequest(input *DeleteAccountPasswordPolicyInput) (req *request.Request, output *DeleteAccountPasswordPolicyOutput) { op := &request.Operation{ Name: opDeleteAccountPasswordPolicy, @@ -808,7 +1295,7 @@ func (c *IAM) DeleteAccountPasswordPolicyRequest(input *DeleteAccountPasswordPol return } -// Deletes the password policy for the AWS account. +// Deletes the password policy for the AWS account. There are no parameters. func (c *IAM) DeleteAccountPasswordPolicy(input *DeleteAccountPasswordPolicyInput) (*DeleteAccountPasswordPolicyOutput, error) { req, out := c.DeleteAccountPasswordPolicyRequest(input) err := req.Send() @@ -817,7 +1304,28 @@ func (c *IAM) DeleteAccountPasswordPolicy(input *DeleteAccountPasswordPolicyInpu const opDeleteGroup = "DeleteGroup" -// DeleteGroupRequest generates a request for the DeleteGroup operation. +// DeleteGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteGroupRequest method. +// req, resp := client.DeleteGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) DeleteGroupRequest(input *DeleteGroupInput) (req *request.Request, output *DeleteGroupOutput) { op := &request.Operation{ Name: opDeleteGroup, @@ -837,8 +1345,8 @@ func (c *IAM) DeleteGroupRequest(input *DeleteGroupInput) (req *request.Request, return } -// Deletes the specified group. The group must not contain any users or have -// any attached policies. +// Deletes the specified IAM group. The group must not contain any users or +// have any attached policies. func (c *IAM) DeleteGroup(input *DeleteGroupInput) (*DeleteGroupOutput, error) { req, out := c.DeleteGroupRequest(input) err := req.Send() @@ -847,7 +1355,28 @@ func (c *IAM) DeleteGroup(input *DeleteGroupInput) (*DeleteGroupOutput, error) { const opDeleteGroupPolicy = "DeleteGroupPolicy" -// DeleteGroupPolicyRequest generates a request for the DeleteGroupPolicy operation. +// DeleteGroupPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteGroupPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteGroupPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteGroupPolicyRequest method. +// req, resp := client.DeleteGroupPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) DeleteGroupPolicyRequest(input *DeleteGroupPolicyInput) (req *request.Request, output *DeleteGroupPolicyOutput) { op := &request.Operation{ Name: opDeleteGroupPolicy, @@ -867,7 +1396,8 @@ func (c *IAM) DeleteGroupPolicyRequest(input *DeleteGroupPolicyInput) (req *requ return } -// Deletes the specified inline policy that is embedded in the specified group. +// Deletes the specified inline policy that is embedded in the specified IAM +// group. // // A group can also have managed policies attached to it. To detach a managed // policy from a group, use DetachGroupPolicy. For more information about policies, @@ -881,7 +1411,28 @@ func (c *IAM) DeleteGroupPolicy(input *DeleteGroupPolicyInput) (*DeleteGroupPoli const opDeleteInstanceProfile = "DeleteInstanceProfile" -// DeleteInstanceProfileRequest generates a request for the DeleteInstanceProfile operation. +// DeleteInstanceProfileRequest generates a "aws/request.Request" representing the +// client's request for the DeleteInstanceProfile operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteInstanceProfile method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteInstanceProfileRequest method. +// req, resp := client.DeleteInstanceProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) DeleteInstanceProfileRequest(input *DeleteInstanceProfileInput) (req *request.Request, output *DeleteInstanceProfileOutput) { op := &request.Operation{ Name: opDeleteInstanceProfile, @@ -907,8 +1458,10 @@ func (c *IAM) DeleteInstanceProfileRequest(input *DeleteInstanceProfileInput) (r // Make sure you do not have any Amazon EC2 instances running with the instance // profile you are about to delete. Deleting a role or instance profile that // is associated with a running instance will break any applications running -// on the instance. For more information about instance profiles, go to About -// Instance Profiles (http://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +// on the instance. +// +// For more information about instance profiles, go to About Instance Profiles +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). func (c *IAM) DeleteInstanceProfile(input *DeleteInstanceProfileInput) (*DeleteInstanceProfileOutput, error) { req, out := c.DeleteInstanceProfileRequest(input) err := req.Send() @@ -917,7 +1470,28 @@ func (c *IAM) DeleteInstanceProfile(input *DeleteInstanceProfileInput) (*DeleteI const opDeleteLoginProfile = "DeleteLoginProfile" -// DeleteLoginProfileRequest generates a request for the DeleteLoginProfile operation. +// DeleteLoginProfileRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLoginProfile operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteLoginProfile method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteLoginProfileRequest method. +// req, resp := client.DeleteLoginProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) DeleteLoginProfileRequest(input *DeleteLoginProfileInput) (req *request.Request, output *DeleteLoginProfileOutput) { op := &request.Operation{ Name: opDeleteLoginProfile, @@ -937,13 +1511,14 @@ func (c *IAM) DeleteLoginProfileRequest(input *DeleteLoginProfileInput) (req *re return } -// Deletes the password for the specified user, which terminates the user's +// Deletes the password for the specified IAM user, which terminates the user's // ability to access AWS services through the AWS Management Console. // -// Deleting a user's password does not prevent a user from accessing IAM through -// the command line interface or the API. To prevent all user access you must -// also either make the access key inactive or delete it. For more information -// about making keys inactive or deleting them, see UpdateAccessKey and DeleteAccessKey. +// Deleting a user's password does not prevent a user from accessing AWS +// through the command line interface or the API. To prevent all user access +// you must also either make any access keys inactive or delete them. For more +// information about making keys inactive or deleting them, see UpdateAccessKey +// and DeleteAccessKey. func (c *IAM) DeleteLoginProfile(input *DeleteLoginProfileInput) (*DeleteLoginProfileOutput, error) { req, out := c.DeleteLoginProfileRequest(input) err := req.Send() @@ -952,7 +1527,28 @@ func (c *IAM) DeleteLoginProfile(input *DeleteLoginProfileInput) (*DeleteLoginPr const opDeleteOpenIDConnectProvider = "DeleteOpenIDConnectProvider" -// DeleteOpenIDConnectProviderRequest generates a request for the DeleteOpenIDConnectProvider operation. +// DeleteOpenIDConnectProviderRequest generates a "aws/request.Request" representing the +// client's request for the DeleteOpenIDConnectProvider operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteOpenIDConnectProvider method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteOpenIDConnectProviderRequest method. +// req, resp := client.DeleteOpenIDConnectProviderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) DeleteOpenIDConnectProviderRequest(input *DeleteOpenIDConnectProviderInput) (req *request.Request, output *DeleteOpenIDConnectProviderOutput) { op := &request.Operation{ Name: opDeleteOpenIDConnectProvider, @@ -972,14 +1568,14 @@ func (c *IAM) DeleteOpenIDConnectProviderRequest(input *DeleteOpenIDConnectProvi return } -// Deletes an IAM OpenID Connect identity provider. +// Deletes an OpenID Connect identity provider (IdP) resource object in IAM. // -// Deleting an OIDC provider does not update any roles that reference the provider -// as a principal in their trust policies. Any attempt to assume a role that -// references a provider that has been deleted will fail. +// Deleting an IAM OIDC provider resource does not update any roles that reference +// the provider as a principal in their trust policies. Any attempt to assume +// a role that references a deleted provider fails. // // This action is idempotent; it does not fail or return an error if you call -// the action for a provider that was already deleted. +// the action for a provider that does not exist. func (c *IAM) DeleteOpenIDConnectProvider(input *DeleteOpenIDConnectProviderInput) (*DeleteOpenIDConnectProviderOutput, error) { req, out := c.DeleteOpenIDConnectProviderRequest(input) err := req.Send() @@ -988,7 +1584,28 @@ func (c *IAM) DeleteOpenIDConnectProvider(input *DeleteOpenIDConnectProviderInpu const opDeletePolicy = "DeletePolicy" -// DeletePolicyRequest generates a request for the DeletePolicy operation. +// DeletePolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeletePolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeletePolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeletePolicyRequest method. +// req, resp := client.DeletePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) DeletePolicyRequest(input *DeletePolicyInput) (req *request.Request, output *DeletePolicyOutput) { op := &request.Operation{ Name: opDeletePolicy, @@ -1010,20 +1627,25 @@ func (c *IAM) DeletePolicyRequest(input *DeletePolicyInput) (req *request.Reques // Deletes the specified managed policy. // -// Before you can delete a managed policy, you must detach the policy from -// all users, groups, and roles that it is attached to, and you must delete +// Before you can delete a managed policy, you must first detach the policy +// from all users, groups, and roles that it is attached to, and you must delete // all of the policy's versions. The following steps describe the process for -// deleting a managed policy: Detach the policy from all users, groups, and -// roles that the policy is attached to, using the DetachUserPolicy, DetachGroupPolicy, -// or DetachRolePolicy APIs. To list all the users, groups, and roles that a -// policy is attached to, use ListEntitiesForPolicy. Delete all versions of -// the policy using DeletePolicyVersion. To list the policy's versions, use -// ListPolicyVersions. You cannot use DeletePolicyVersion to delete the version -// that is marked as the default version. You delete the policy's default version -// in the next step of the process. Delete the policy (this automatically deletes -// the policy's default version) using this API. +// deleting a managed policy: // -// For information about managed policies, refer to Managed Policies and Inline +// Detach the policy from all users, groups, and roles that the policy is +// attached to, using the DetachUserPolicy, DetachGroupPolicy, or DetachRolePolicy +// APIs. To list all the users, groups, and roles that a policy is attached +// to, use ListEntitiesForPolicy. +// +// Delete all versions of the policy using DeletePolicyVersion. To list the +// policy's versions, use ListPolicyVersions. You cannot use DeletePolicyVersion +// to delete the version that is marked as the default version. You delete the +// policy's default version in the next step of the process. +// +// Delete the policy (this automatically deletes the policy's default version) +// using this API. +// +// For information about managed policies, see Managed Policies and Inline // Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) // in the IAM User Guide. func (c *IAM) DeletePolicy(input *DeletePolicyInput) (*DeletePolicyOutput, error) { @@ -1034,7 +1656,28 @@ func (c *IAM) DeletePolicy(input *DeletePolicyInput) (*DeletePolicyOutput, error const opDeletePolicyVersion = "DeletePolicyVersion" -// DeletePolicyVersionRequest generates a request for the DeletePolicyVersion operation. +// DeletePolicyVersionRequest generates a "aws/request.Request" representing the +// client's request for the DeletePolicyVersion operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeletePolicyVersion method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeletePolicyVersionRequest method. +// req, resp := client.DeletePolicyVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) DeletePolicyVersionRequest(input *DeletePolicyVersionInput) (req *request.Request, output *DeletePolicyVersionOutput) { op := &request.Operation{ Name: opDeletePolicyVersion, @@ -1054,14 +1697,14 @@ func (c *IAM) DeletePolicyVersionRequest(input *DeletePolicyVersionInput) (req * return } -// Deletes the specified version of the specified managed policy. +// Deletes the specified version from the specified managed policy. // -// You cannot delete the default version of a policy using this API. To delete -// the default version of a policy, use DeletePolicy. To find out which version +// You cannot delete the default version from a policy using this API. To delete +// the default version from a policy, use DeletePolicy. To find out which version // of a policy is marked as the default version, use ListPolicyVersions. // -// For information about versions for managed policies, refer to Versioning -// for Managed Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) +// For information about versions for managed policies, see Versioning for +// Managed Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) // in the IAM User Guide. func (c *IAM) DeletePolicyVersion(input *DeletePolicyVersionInput) (*DeletePolicyVersionOutput, error) { req, out := c.DeletePolicyVersionRequest(input) @@ -1071,7 +1714,28 @@ func (c *IAM) DeletePolicyVersion(input *DeletePolicyVersionInput) (*DeletePolic const opDeleteRole = "DeleteRole" -// DeleteRoleRequest generates a request for the DeleteRole operation. +// DeleteRoleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRole operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteRole method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteRoleRequest method. +// req, resp := client.DeleteRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) DeleteRoleRequest(input *DeleteRoleInput) (req *request.Request, output *DeleteRoleOutput) { op := &request.Operation{ Name: opDeleteRole, @@ -1094,7 +1758,7 @@ func (c *IAM) DeleteRoleRequest(input *DeleteRoleInput) (req *request.Request, o // Deletes the specified role. The role must not have any policies attached. // For more information about roles, go to Working with Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). // -// Make sure you do not have any Amazon EC2 instances running with the role +// Make sure you do not have any Amazon EC2 instances running with the role // you are about to delete. Deleting a role or instance profile that is associated // with a running instance will break any applications running on the instance. func (c *IAM) DeleteRole(input *DeleteRoleInput) (*DeleteRoleOutput, error) { @@ -1105,7 +1769,28 @@ func (c *IAM) DeleteRole(input *DeleteRoleInput) (*DeleteRoleOutput, error) { const opDeleteRolePolicy = "DeleteRolePolicy" -// DeleteRolePolicyRequest generates a request for the DeleteRolePolicy operation. +// DeleteRolePolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteRolePolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteRolePolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteRolePolicyRequest method. +// req, resp := client.DeleteRolePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) DeleteRolePolicyRequest(input *DeleteRolePolicyInput) (req *request.Request, output *DeleteRolePolicyOutput) { op := &request.Operation{ Name: opDeleteRolePolicy, @@ -1125,7 +1810,8 @@ func (c *IAM) DeleteRolePolicyRequest(input *DeleteRolePolicyInput) (req *reques return } -// Deletes the specified inline policy that is embedded in the specified role. +// Deletes the specified inline policy that is embedded in the specified IAM +// role. // // A role can also have managed policies attached to it. To detach a managed // policy from a role, use DetachRolePolicy. For more information about policies, @@ -1139,7 +1825,28 @@ func (c *IAM) DeleteRolePolicy(input *DeleteRolePolicyInput) (*DeleteRolePolicyO const opDeleteSAMLProvider = "DeleteSAMLProvider" -// DeleteSAMLProviderRequest generates a request for the DeleteSAMLProvider operation. +// DeleteSAMLProviderRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSAMLProvider operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteSAMLProvider method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteSAMLProviderRequest method. +// req, resp := client.DeleteSAMLProviderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) DeleteSAMLProviderRequest(input *DeleteSAMLProviderInput) (req *request.Request, output *DeleteSAMLProviderOutput) { op := &request.Operation{ Name: opDeleteSAMLProvider, @@ -1159,13 +1866,14 @@ func (c *IAM) DeleteSAMLProviderRequest(input *DeleteSAMLProviderInput) (req *re return } -// Deletes a SAML provider. +// Deletes a SAML provider resource in IAM. // -// Deleting the provider does not update any roles that reference the SAML -// provider as a principal in their trust policies. Any attempt to assume a -// role that references a SAML provider that has been deleted will fail. +// Deleting the provider resource from IAM does not update any roles that reference +// the SAML provider resource's ARN as a principal in their trust policies. +// Any attempt to assume a role that references a non-existent provider resource +// ARN fails. // -// This operation requires Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// This operation requires Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). func (c *IAM) DeleteSAMLProvider(input *DeleteSAMLProviderInput) (*DeleteSAMLProviderOutput, error) { req, out := c.DeleteSAMLProviderRequest(input) err := req.Send() @@ -1174,7 +1882,28 @@ func (c *IAM) DeleteSAMLProvider(input *DeleteSAMLProviderInput) (*DeleteSAMLPro const opDeleteSSHPublicKey = "DeleteSSHPublicKey" -// DeleteSSHPublicKeyRequest generates a request for the DeleteSSHPublicKey operation. +// DeleteSSHPublicKeyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSSHPublicKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteSSHPublicKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteSSHPublicKeyRequest method. +// req, resp := client.DeleteSSHPublicKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) DeleteSSHPublicKeyRequest(input *DeleteSSHPublicKeyInput) (req *request.Request, output *DeleteSSHPublicKeyOutput) { op := &request.Operation{ Name: opDeleteSSHPublicKey, @@ -1209,7 +1938,28 @@ func (c *IAM) DeleteSSHPublicKey(input *DeleteSSHPublicKeyInput) (*DeleteSSHPubl const opDeleteServerCertificate = "DeleteServerCertificate" -// DeleteServerCertificateRequest generates a request for the DeleteServerCertificate operation. +// DeleteServerCertificateRequest generates a "aws/request.Request" representing the +// client's request for the DeleteServerCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteServerCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteServerCertificateRequest method. +// req, resp := client.DeleteServerCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) DeleteServerCertificateRequest(input *DeleteServerCertificateInput) (req *request.Request, output *DeleteServerCertificateOutput) { op := &request.Operation{ Name: opDeleteServerCertificate, @@ -1236,7 +1986,7 @@ func (c *IAM) DeleteServerCertificateRequest(input *DeleteServerCertificateInput // with IAM, go to Working with Server Certificates (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html) // in the IAM User Guide. // -// If you are using a server certificate with Elastic Load Balancing, deleting +// If you are using a server certificate with Elastic Load Balancing, deleting // the certificate could have implications for your application. If Elastic // Load Balancing doesn't detect the deletion of bound certificates, it may // continue to use the certificates. This could cause Elastic Load Balancing @@ -1253,7 +2003,28 @@ func (c *IAM) DeleteServerCertificate(input *DeleteServerCertificateInput) (*Del const opDeleteSigningCertificate = "DeleteSigningCertificate" -// DeleteSigningCertificateRequest generates a request for the DeleteSigningCertificate operation. +// DeleteSigningCertificateRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSigningCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteSigningCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteSigningCertificateRequest method. +// req, resp := client.DeleteSigningCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) DeleteSigningCertificateRequest(input *DeleteSigningCertificateInput) (req *request.Request, output *DeleteSigningCertificateOutput) { op := &request.Operation{ Name: opDeleteSigningCertificate, @@ -1273,12 +2044,12 @@ func (c *IAM) DeleteSigningCertificateRequest(input *DeleteSigningCertificateInp return } -// Deletes the specified signing certificate associated with the specified user. +// Deletes a signing certificate associated with the specified IAM user. // // If you do not specify a user name, IAM determines the user name implicitly // based on the AWS access key ID signing the request. Because this action works // for access keys under the AWS account, you can use this action to manage -// root credentials even if the AWS account has no associated users. +// root credentials even if the AWS account has no associated IAM users. func (c *IAM) DeleteSigningCertificate(input *DeleteSigningCertificateInput) (*DeleteSigningCertificateOutput, error) { req, out := c.DeleteSigningCertificateRequest(input) err := req.Send() @@ -1287,7 +2058,28 @@ func (c *IAM) DeleteSigningCertificate(input *DeleteSigningCertificateInput) (*D const opDeleteUser = "DeleteUser" -// DeleteUserRequest generates a request for the DeleteUser operation. +// DeleteUserRequest generates a "aws/request.Request" representing the +// client's request for the DeleteUser operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteUser method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteUserRequest method. +// req, resp := client.DeleteUserRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) DeleteUserRequest(input *DeleteUserInput) (req *request.Request, output *DeleteUserOutput) { op := &request.Operation{ Name: opDeleteUser, @@ -1307,8 +2099,8 @@ func (c *IAM) DeleteUserRequest(input *DeleteUserInput) (req *request.Request, o return } -// Deletes the specified user. The user must not belong to any groups, have -// any keys or signing certificates, or have any attached policies. +// Deletes the specified IAM user. The user must not belong to any groups or +// have any access keys, signing certificates, or attached policies. func (c *IAM) DeleteUser(input *DeleteUserInput) (*DeleteUserOutput, error) { req, out := c.DeleteUserRequest(input) err := req.Send() @@ -1317,7 +2109,28 @@ func (c *IAM) DeleteUser(input *DeleteUserInput) (*DeleteUserOutput, error) { const opDeleteUserPolicy = "DeleteUserPolicy" -// DeleteUserPolicyRequest generates a request for the DeleteUserPolicy operation. +// DeleteUserPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteUserPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteUserPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteUserPolicyRequest method. +// req, resp := client.DeleteUserPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) DeleteUserPolicyRequest(input *DeleteUserPolicyInput) (req *request.Request, output *DeleteUserPolicyOutput) { op := &request.Operation{ Name: opDeleteUserPolicy, @@ -1337,7 +2150,8 @@ func (c *IAM) DeleteUserPolicyRequest(input *DeleteUserPolicyInput) (req *reques return } -// Deletes the specified inline policy that is embedded in the specified user. +// Deletes the specified inline policy that is embedded in the specified IAM +// user. // // A user can also have managed policies attached to it. To detach a managed // policy from a user, use DetachUserPolicy. For more information about policies, @@ -1351,7 +2165,28 @@ func (c *IAM) DeleteUserPolicy(input *DeleteUserPolicyInput) (*DeleteUserPolicyO const opDeleteVirtualMFADevice = "DeleteVirtualMFADevice" -// DeleteVirtualMFADeviceRequest generates a request for the DeleteVirtualMFADevice operation. +// DeleteVirtualMFADeviceRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVirtualMFADevice operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteVirtualMFADevice method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteVirtualMFADeviceRequest method. +// req, resp := client.DeleteVirtualMFADeviceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) DeleteVirtualMFADeviceRequest(input *DeleteVirtualMFADeviceInput) (req *request.Request, output *DeleteVirtualMFADeviceOutput) { op := &request.Operation{ Name: opDeleteVirtualMFADevice, @@ -1373,8 +2208,8 @@ func (c *IAM) DeleteVirtualMFADeviceRequest(input *DeleteVirtualMFADeviceInput) // Deletes a virtual MFA device. // -// You must deactivate a user's virtual MFA device before you can delete it. -// For information about deactivating MFA devices, see DeactivateMFADevice. +// You must deactivate a user's virtual MFA device before you can delete +// it. For information about deactivating MFA devices, see DeactivateMFADevice. func (c *IAM) DeleteVirtualMFADevice(input *DeleteVirtualMFADeviceInput) (*DeleteVirtualMFADeviceOutput, error) { req, out := c.DeleteVirtualMFADeviceRequest(input) err := req.Send() @@ -1383,7 +2218,28 @@ func (c *IAM) DeleteVirtualMFADevice(input *DeleteVirtualMFADeviceInput) (*Delet const opDetachGroupPolicy = "DetachGroupPolicy" -// DetachGroupPolicyRequest generates a request for the DetachGroupPolicy operation. +// DetachGroupPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DetachGroupPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DetachGroupPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DetachGroupPolicyRequest method. +// req, resp := client.DetachGroupPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) DetachGroupPolicyRequest(input *DetachGroupPolicyInput) (req *request.Request, output *DetachGroupPolicyOutput) { op := &request.Operation{ Name: opDetachGroupPolicy, @@ -1403,11 +2259,11 @@ func (c *IAM) DetachGroupPolicyRequest(input *DetachGroupPolicyInput) (req *requ return } -// Removes the specified managed policy from the specified group. +// Removes the specified managed policy from the specified IAM group. // // A group can also have inline policies embedded with it. To delete an inline -// policy, use the DeleteGroupPolicy API. For information about policies, refer -// to Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// policy, use the DeleteGroupPolicy API. For information about policies, see +// Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) // in the IAM User Guide. func (c *IAM) DetachGroupPolicy(input *DetachGroupPolicyInput) (*DetachGroupPolicyOutput, error) { req, out := c.DetachGroupPolicyRequest(input) @@ -1417,7 +2273,28 @@ func (c *IAM) DetachGroupPolicy(input *DetachGroupPolicyInput) (*DetachGroupPoli const opDetachRolePolicy = "DetachRolePolicy" -// DetachRolePolicyRequest generates a request for the DetachRolePolicy operation. +// DetachRolePolicyRequest generates a "aws/request.Request" representing the +// client's request for the DetachRolePolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DetachRolePolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DetachRolePolicyRequest method. +// req, resp := client.DetachRolePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) DetachRolePolicyRequest(input *DetachRolePolicyInput) (req *request.Request, output *DetachRolePolicyOutput) { op := &request.Operation{ Name: opDetachRolePolicy, @@ -1440,8 +2317,8 @@ func (c *IAM) DetachRolePolicyRequest(input *DetachRolePolicyInput) (req *reques // Removes the specified managed policy from the specified role. // // A role can also have inline policies embedded with it. To delete an inline -// policy, use the DeleteRolePolicy API. For information about policies, refer -// to Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// policy, use the DeleteRolePolicy API. For information about policies, see +// Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) // in the IAM User Guide. func (c *IAM) DetachRolePolicy(input *DetachRolePolicyInput) (*DetachRolePolicyOutput, error) { req, out := c.DetachRolePolicyRequest(input) @@ -1451,7 +2328,28 @@ func (c *IAM) DetachRolePolicy(input *DetachRolePolicyInput) (*DetachRolePolicyO const opDetachUserPolicy = "DetachUserPolicy" -// DetachUserPolicyRequest generates a request for the DetachUserPolicy operation. +// DetachUserPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DetachUserPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DetachUserPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DetachUserPolicyRequest method. +// req, resp := client.DetachUserPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) DetachUserPolicyRequest(input *DetachUserPolicyInput) (req *request.Request, output *DetachUserPolicyOutput) { op := &request.Operation{ Name: opDetachUserPolicy, @@ -1474,8 +2372,8 @@ func (c *IAM) DetachUserPolicyRequest(input *DetachUserPolicyInput) (req *reques // Removes the specified managed policy from the specified user. // // A user can also have inline policies embedded with it. To delete an inline -// policy, use the DeleteUserPolicy API. For information about policies, refer -// to Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// policy, use the DeleteUserPolicy API. For information about policies, see +// Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) // in the IAM User Guide. func (c *IAM) DetachUserPolicy(input *DetachUserPolicyInput) (*DetachUserPolicyOutput, error) { req, out := c.DetachUserPolicyRequest(input) @@ -1485,7 +2383,28 @@ func (c *IAM) DetachUserPolicy(input *DetachUserPolicyInput) (*DetachUserPolicyO const opEnableMFADevice = "EnableMFADevice" -// EnableMFADeviceRequest generates a request for the EnableMFADevice operation. +// EnableMFADeviceRequest generates a "aws/request.Request" representing the +// client's request for the EnableMFADevice operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableMFADevice method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableMFADeviceRequest method. +// req, resp := client.EnableMFADeviceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) EnableMFADeviceRequest(input *EnableMFADeviceInput) (req *request.Request, output *EnableMFADeviceOutput) { op := &request.Operation{ Name: opEnableMFADevice, @@ -1505,9 +2424,9 @@ func (c *IAM) EnableMFADeviceRequest(input *EnableMFADeviceInput) (req *request. return } -// Enables the specified MFA device and associates it with the specified user -// name. When enabled, the MFA device is required for every subsequent login -// by the user name associated with the device. +// Enables the specified MFA device and associates it with the specified IAM +// user. When enabled, the MFA device is required for every subsequent login +// by the IAM user associated with the device. func (c *IAM) EnableMFADevice(input *EnableMFADeviceInput) (*EnableMFADeviceOutput, error) { req, out := c.EnableMFADeviceRequest(input) err := req.Send() @@ -1516,7 +2435,28 @@ func (c *IAM) EnableMFADevice(input *EnableMFADeviceInput) (*EnableMFADeviceOutp const opGenerateCredentialReport = "GenerateCredentialReport" -// GenerateCredentialReportRequest generates a request for the GenerateCredentialReport operation. +// GenerateCredentialReportRequest generates a "aws/request.Request" representing the +// client's request for the GenerateCredentialReport operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GenerateCredentialReport method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GenerateCredentialReportRequest method. +// req, resp := client.GenerateCredentialReportRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) GenerateCredentialReportRequest(input *GenerateCredentialReportInput) (req *request.Request, output *GenerateCredentialReportOutput) { op := &request.Operation{ Name: opGenerateCredentialReport, @@ -1545,7 +2485,28 @@ func (c *IAM) GenerateCredentialReport(input *GenerateCredentialReportInput) (*G const opGetAccessKeyLastUsed = "GetAccessKeyLastUsed" -// GetAccessKeyLastUsedRequest generates a request for the GetAccessKeyLastUsed operation. +// GetAccessKeyLastUsedRequest generates a "aws/request.Request" representing the +// client's request for the GetAccessKeyLastUsed operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetAccessKeyLastUsed method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetAccessKeyLastUsedRequest method. +// req, resp := client.GetAccessKeyLastUsedRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) GetAccessKeyLastUsedRequest(input *GetAccessKeyLastUsedInput) (req *request.Request, output *GetAccessKeyLastUsedOutput) { op := &request.Operation{ Name: opGetAccessKeyLastUsed, @@ -1575,7 +2536,28 @@ func (c *IAM) GetAccessKeyLastUsed(input *GetAccessKeyLastUsedInput) (*GetAccess const opGetAccountAuthorizationDetails = "GetAccountAuthorizationDetails" -// GetAccountAuthorizationDetailsRequest generates a request for the GetAccountAuthorizationDetails operation. +// GetAccountAuthorizationDetailsRequest generates a "aws/request.Request" representing the +// client's request for the GetAccountAuthorizationDetails operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetAccountAuthorizationDetails method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetAccountAuthorizationDetailsRequest method. +// req, resp := client.GetAccountAuthorizationDetailsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) GetAccountAuthorizationDetailsRequest(input *GetAccountAuthorizationDetailsInput) (req *request.Request, output *GetAccountAuthorizationDetailsOutput) { op := &request.Operation{ Name: opGetAccountAuthorizationDetails, @@ -1600,9 +2582,9 @@ func (c *IAM) GetAccountAuthorizationDetailsRequest(input *GetAccountAuthorizati } // Retrieves information about all IAM users, groups, roles, and policies in -// your account, including their relationships to one another. Use this API -// to obtain a snapshot of the configuration of IAM permissions (users, groups, -// roles, and policies) in your account. +// your AWS account, including their relationships to one another. Use this +// API to obtain a snapshot of the configuration of IAM permissions (users, +// groups, roles, and policies) in your account. // // You can optionally filter the results using the Filter parameter. You can // paginate the results using the MaxItems and Marker parameters. @@ -1612,6 +2594,23 @@ func (c *IAM) GetAccountAuthorizationDetails(input *GetAccountAuthorizationDetai return out, err } +// GetAccountAuthorizationDetailsPages iterates over the pages of a GetAccountAuthorizationDetails operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetAccountAuthorizationDetails method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetAccountAuthorizationDetails operation. +// pageNum := 0 +// err := client.GetAccountAuthorizationDetailsPages(params, +// func(page *GetAccountAuthorizationDetailsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *IAM) GetAccountAuthorizationDetailsPages(input *GetAccountAuthorizationDetailsInput, fn func(p *GetAccountAuthorizationDetailsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.GetAccountAuthorizationDetailsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1622,7 +2621,28 @@ func (c *IAM) GetAccountAuthorizationDetailsPages(input *GetAccountAuthorization const opGetAccountPasswordPolicy = "GetAccountPasswordPolicy" -// GetAccountPasswordPolicyRequest generates a request for the GetAccountPasswordPolicy operation. +// GetAccountPasswordPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetAccountPasswordPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetAccountPasswordPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetAccountPasswordPolicyRequest method. +// req, resp := client.GetAccountPasswordPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) GetAccountPasswordPolicyRequest(input *GetAccountPasswordPolicyInput) (req *request.Request, output *GetAccountPasswordPolicyOutput) { op := &request.Operation{ Name: opGetAccountPasswordPolicy, @@ -1650,7 +2670,28 @@ func (c *IAM) GetAccountPasswordPolicy(input *GetAccountPasswordPolicyInput) (*G const opGetAccountSummary = "GetAccountSummary" -// GetAccountSummaryRequest generates a request for the GetAccountSummary operation. +// GetAccountSummaryRequest generates a "aws/request.Request" representing the +// client's request for the GetAccountSummary operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetAccountSummary method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetAccountSummaryRequest method. +// req, resp := client.GetAccountSummaryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) GetAccountSummaryRequest(input *GetAccountSummaryInput) (req *request.Request, output *GetAccountSummaryOutput) { op := &request.Operation{ Name: opGetAccountSummary, @@ -1681,7 +2722,28 @@ func (c *IAM) GetAccountSummary(input *GetAccountSummaryInput) (*GetAccountSumma const opGetContextKeysForCustomPolicy = "GetContextKeysForCustomPolicy" -// GetContextKeysForCustomPolicyRequest generates a request for the GetContextKeysForCustomPolicy operation. +// GetContextKeysForCustomPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetContextKeysForCustomPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetContextKeysForCustomPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetContextKeysForCustomPolicyRequest method. +// req, resp := client.GetContextKeysForCustomPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) GetContextKeysForCustomPolicyRequest(input *GetContextKeysForCustomPolicyInput) (req *request.Request, output *GetContextKeysForPolicyResponse) { op := &request.Operation{ Name: opGetContextKeysForCustomPolicy, @@ -1699,14 +2761,13 @@ func (c *IAM) GetContextKeysForCustomPolicyRequest(input *GetContextKeysForCusto return } -// Gets a list of all of the context keys referenced in Condition elements in -// the input policies. The policies are supplied as a list of one or more strings. -// To get the context keys from policies associated with an IAM user, group, -// or role, use GetContextKeysForPrincipalPolicy. +// Gets a list of all of the context keys referenced in the input policies. +// The policies are supplied as a list of one or more strings. To get the context +// keys from policies associated with an IAM user, group, or role, use GetContextKeysForPrincipalPolicy. // // Context keys are variables maintained by AWS and its services that provide // details about the context of an API query request, and can be evaluated by -// using the Condition element of an IAM policy. Use GetContextKeysForCustomPolicy +// testing against a value specified in an IAM policy. Use GetContextKeysForCustomPolicy // to understand what key names and values you must supply when you call SimulateCustomPolicy. // Note that all parameters are shown in unencoded form here for clarity, but // must be URL encoded to be included as a part of a real HTML request. @@ -1718,7 +2779,28 @@ func (c *IAM) GetContextKeysForCustomPolicy(input *GetContextKeysForCustomPolicy const opGetContextKeysForPrincipalPolicy = "GetContextKeysForPrincipalPolicy" -// GetContextKeysForPrincipalPolicyRequest generates a request for the GetContextKeysForPrincipalPolicy operation. +// GetContextKeysForPrincipalPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetContextKeysForPrincipalPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetContextKeysForPrincipalPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetContextKeysForPrincipalPolicyRequest method. +// req, resp := client.GetContextKeysForPrincipalPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) GetContextKeysForPrincipalPolicyRequest(input *GetContextKeysForPrincipalPolicyInput) (req *request.Request, output *GetContextKeysForPolicyResponse) { op := &request.Operation{ Name: opGetContextKeysForPrincipalPolicy, @@ -1736,23 +2818,22 @@ func (c *IAM) GetContextKeysForPrincipalPolicyRequest(input *GetContextKeysForPr return } -// Gets a list of all of the context keys referenced in Condition elements in -// all of the IAM policies attached to the specified IAM entity. The entity -// can be an IAM user, group, or role. If you specify a user, then the request -// also includes all of the policies attached to groups that the user is a member -// of. +// Gets a list of all of the context keys referenced in all of the IAM policies +// attached to the specified IAM entity. The entity can be an IAM user, group, +// or role. If you specify a user, then the request also includes all of the +// policies attached to groups that the user is a member of. // // You can optionally include a list of one or more additional policies, specified // as strings. If you want to include only a list of policies by string, use // GetContextKeysForCustomPolicy instead. // -// Note: This API discloses information about the permissions granted to other +// Note: This API discloses information about the permissions granted to other // users. If you do not want users to see other user's permissions, then consider // allowing them to use GetContextKeysForCustomPolicy instead. // // Context keys are variables maintained by AWS and its services that provide // details about the context of an API query request, and can be evaluated by -// using the Condition element of an IAM policy. Use GetContextKeysForPrincipalPolicy +// testing against a value in an IAM policy. Use GetContextKeysForPrincipalPolicy // to understand what key names and values you must supply when you call SimulatePrincipalPolicy. func (c *IAM) GetContextKeysForPrincipalPolicy(input *GetContextKeysForPrincipalPolicyInput) (*GetContextKeysForPolicyResponse, error) { req, out := c.GetContextKeysForPrincipalPolicyRequest(input) @@ -1762,7 +2843,28 @@ func (c *IAM) GetContextKeysForPrincipalPolicy(input *GetContextKeysForPrincipal const opGetCredentialReport = "GetCredentialReport" -// GetCredentialReportRequest generates a request for the GetCredentialReport operation. +// GetCredentialReportRequest generates a "aws/request.Request" representing the +// client's request for the GetCredentialReport operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetCredentialReport method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetCredentialReportRequest method. +// req, resp := client.GetCredentialReportRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) GetCredentialReportRequest(input *GetCredentialReportInput) (req *request.Request, output *GetCredentialReportOutput) { op := &request.Operation{ Name: opGetCredentialReport, @@ -1791,7 +2893,28 @@ func (c *IAM) GetCredentialReport(input *GetCredentialReportInput) (*GetCredenti const opGetGroup = "GetGroup" -// GetGroupRequest generates a request for the GetGroup operation. +// GetGroupRequest generates a "aws/request.Request" representing the +// client's request for the GetGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetGroupRequest method. +// req, resp := client.GetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) GetGroupRequest(input *GetGroupInput) (req *request.Request, output *GetGroupOutput) { op := &request.Operation{ Name: opGetGroup, @@ -1815,14 +2938,31 @@ func (c *IAM) GetGroupRequest(input *GetGroupInput) (req *request.Request, outpu return } -// Returns a list of users that are in the specified group. You can paginate -// the results using the MaxItems and Marker parameters. +// Returns a list of IAM users that are in the specified IAM group. You can +// paginate the results using the MaxItems and Marker parameters. func (c *IAM) GetGroup(input *GetGroupInput) (*GetGroupOutput, error) { req, out := c.GetGroupRequest(input) err := req.Send() return out, err } +// GetGroupPages iterates over the pages of a GetGroup operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See GetGroup method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a GetGroup operation. +// pageNum := 0 +// err := client.GetGroupPages(params, +// func(page *GetGroupOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *IAM) GetGroupPages(input *GetGroupInput, fn func(p *GetGroupOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.GetGroupRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1833,7 +2973,28 @@ func (c *IAM) GetGroupPages(input *GetGroupInput, fn func(p *GetGroupOutput, las const opGetGroupPolicy = "GetGroupPolicy" -// GetGroupPolicyRequest generates a request for the GetGroupPolicy operation. +// GetGroupPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetGroupPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetGroupPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetGroupPolicyRequest method. +// req, resp := client.GetGroupPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) GetGroupPolicyRequest(input *GetGroupPolicyInput) (req *request.Request, output *GetGroupPolicyOutput) { op := &request.Operation{ Name: opGetGroupPolicy, @@ -1852,15 +3013,21 @@ func (c *IAM) GetGroupPolicyRequest(input *GetGroupPolicyInput) (req *request.Re } // Retrieves the specified inline policy document that is embedded in the specified -// group. +// IAM group. // -// A group can also have managed policies attached to it. To retrieve a managed -// policy document that is attached to a group, use GetPolicy to determine the -// policy's default version, then use GetPolicyVersion to retrieve the policy +// Policies returned by this API are URL-encoded compliant with RFC 3986 (https://tools.ietf.org/html/rfc3986). +// You can use a URL decoding method to convert the policy back to plain JSON +// text. For example, if you use Java, you can use the decode method of the +// java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs +// provide similar functionality. +// +// An IAM group can also have managed policies attached to it. To retrieve +// a managed policy document that is attached to a group, use GetPolicy to determine +// the policy's default version, then use GetPolicyVersion to retrieve the policy // document. // -// For more information about policies, refer to Managed Policies and Inline -// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// For more information about policies, see Managed Policies and Inline Policies +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) // in the IAM User Guide. func (c *IAM) GetGroupPolicy(input *GetGroupPolicyInput) (*GetGroupPolicyOutput, error) { req, out := c.GetGroupPolicyRequest(input) @@ -1870,7 +3037,28 @@ func (c *IAM) GetGroupPolicy(input *GetGroupPolicyInput) (*GetGroupPolicyOutput, const opGetInstanceProfile = "GetInstanceProfile" -// GetInstanceProfileRequest generates a request for the GetInstanceProfile operation. +// GetInstanceProfileRequest generates a "aws/request.Request" representing the +// client's request for the GetInstanceProfile operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetInstanceProfile method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetInstanceProfileRequest method. +// req, resp := client.GetInstanceProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) GetInstanceProfileRequest(input *GetInstanceProfileInput) (req *request.Request, output *GetInstanceProfileOutput) { op := &request.Operation{ Name: opGetInstanceProfile, @@ -1890,8 +3078,8 @@ func (c *IAM) GetInstanceProfileRequest(input *GetInstanceProfileInput) (req *re // Retrieves information about the specified instance profile, including the // instance profile's path, GUID, ARN, and role. For more information about -// instance profiles, go to About Instance Profiles (http://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). -// For more information about ARNs, go to ARNs (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs). +// instance profiles, see About Instance Profiles (http://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html) +// in the IAM User Guide. func (c *IAM) GetInstanceProfile(input *GetInstanceProfileInput) (*GetInstanceProfileOutput, error) { req, out := c.GetInstanceProfileRequest(input) err := req.Send() @@ -1900,7 +3088,28 @@ func (c *IAM) GetInstanceProfile(input *GetInstanceProfileInput) (*GetInstancePr const opGetLoginProfile = "GetLoginProfile" -// GetLoginProfileRequest generates a request for the GetLoginProfile operation. +// GetLoginProfileRequest generates a "aws/request.Request" representing the +// client's request for the GetLoginProfile operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetLoginProfile method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetLoginProfileRequest method. +// req, resp := client.GetLoginProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) GetLoginProfileRequest(input *GetLoginProfileInput) (req *request.Request, output *GetLoginProfileOutput) { op := &request.Operation{ Name: opGetLoginProfile, @@ -1918,9 +3127,9 @@ func (c *IAM) GetLoginProfileRequest(input *GetLoginProfileInput) (req *request. return } -// Retrieves the user name and password-creation date for the specified user. -// If the user has not been assigned a password, the action returns a 404 (NoSuchEntity) -// error. +// Retrieves the user name and password-creation date for the specified IAM +// user. If the user has not been assigned a password, the action returns a +// 404 (NoSuchEntity) error. func (c *IAM) GetLoginProfile(input *GetLoginProfileInput) (*GetLoginProfileOutput, error) { req, out := c.GetLoginProfileRequest(input) err := req.Send() @@ -1929,7 +3138,28 @@ func (c *IAM) GetLoginProfile(input *GetLoginProfileInput) (*GetLoginProfileOutp const opGetOpenIDConnectProvider = "GetOpenIDConnectProvider" -// GetOpenIDConnectProviderRequest generates a request for the GetOpenIDConnectProvider operation. +// GetOpenIDConnectProviderRequest generates a "aws/request.Request" representing the +// client's request for the GetOpenIDConnectProvider operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetOpenIDConnectProvider method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetOpenIDConnectProviderRequest method. +// req, resp := client.GetOpenIDConnectProviderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) GetOpenIDConnectProviderRequest(input *GetOpenIDConnectProviderInput) (req *request.Request, output *GetOpenIDConnectProviderOutput) { op := &request.Operation{ Name: opGetOpenIDConnectProvider, @@ -1947,7 +3177,8 @@ func (c *IAM) GetOpenIDConnectProviderRequest(input *GetOpenIDConnectProviderInp return } -// Returns information about the specified OpenID Connect provider. +// Returns information about the specified OpenID Connect (OIDC) provider resource +// object in IAM. func (c *IAM) GetOpenIDConnectProvider(input *GetOpenIDConnectProviderInput) (*GetOpenIDConnectProviderOutput, error) { req, out := c.GetOpenIDConnectProviderRequest(input) err := req.Send() @@ -1956,7 +3187,28 @@ func (c *IAM) GetOpenIDConnectProvider(input *GetOpenIDConnectProviderInput) (*G const opGetPolicy = "GetPolicy" -// GetPolicyRequest generates a request for the GetPolicy operation. +// GetPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetPolicyRequest method. +// req, resp := client.GetPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) GetPolicyRequest(input *GetPolicyInput) (req *request.Request, output *GetPolicyOutput) { op := &request.Operation{ Name: opGetPolicy, @@ -1975,18 +3227,18 @@ func (c *IAM) GetPolicyRequest(input *GetPolicyInput) (req *request.Request, out } // Retrieves information about the specified managed policy, including the policy's -// default version and the total number of users, groups, and roles that the -// policy is attached to. For a list of the specific users, groups, and roles -// that the policy is attached to, use the ListEntitiesForPolicy API. This API -// returns metadata about the policy. To retrieve the policy document for a -// specific version of the policy, use GetPolicyVersion. +// default version and the total number of IAM users, groups, and roles to which +// the policy is attached. To retrieve the list of the specific users, groups, +// and roles that the policy is attached to, use the ListEntitiesForPolicy API. +// This API returns metadata about the policy. To retrieve the actual policy +// document for a specific version of the policy, use GetPolicyVersion. // // This API retrieves information about managed policies. To retrieve information -// about an inline policy that is embedded with a user, group, or role, use -// the GetUserPolicy, GetGroupPolicy, or GetRolePolicy API. +// about an inline policy that is embedded with an IAM user, group, or role, +// use the GetUserPolicy, GetGroupPolicy, or GetRolePolicy API. // -// For more information about policies, refer to Managed Policies and Inline -// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// For more information about policies, see Managed Policies and Inline Policies +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) // in the IAM User Guide. func (c *IAM) GetPolicy(input *GetPolicyInput) (*GetPolicyOutput, error) { req, out := c.GetPolicyRequest(input) @@ -1996,7 +3248,28 @@ func (c *IAM) GetPolicy(input *GetPolicyInput) (*GetPolicyOutput, error) { const opGetPolicyVersion = "GetPolicyVersion" -// GetPolicyVersionRequest generates a request for the GetPolicyVersion operation. +// GetPolicyVersionRequest generates a "aws/request.Request" representing the +// client's request for the GetPolicyVersion operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetPolicyVersion method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetPolicyVersionRequest method. +// req, resp := client.GetPolicyVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) GetPolicyVersionRequest(input *GetPolicyVersionInput) (req *request.Request, output *GetPolicyVersionOutput) { op := &request.Operation{ Name: opGetPolicyVersion, @@ -2017,14 +3290,24 @@ func (c *IAM) GetPolicyVersionRequest(input *GetPolicyVersionInput) (req *reques // Retrieves information about the specified version of the specified managed // policy, including the policy document. // -// To list the available versions for a policy, use ListPolicyVersions. +// Policies returned by this API are URL-encoded compliant with RFC 3986 (https://tools.ietf.org/html/rfc3986). +// You can use a URL decoding method to convert the policy back to plain JSON +// text. For example, if you use Java, you can use the decode method of the +// java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs +// provide similar functionality. +// +// To list the available versions for a policy, use ListPolicyVersions. // // This API retrieves information about managed policies. To retrieve information // about an inline policy that is embedded in a user, group, or role, use the // GetUserPolicy, GetGroupPolicy, or GetRolePolicy API. // -// For more information about the types of policies, refer to Managed Policies -// and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// For more information about the types of policies, see Managed Policies and +// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// For more information about managed policy versions, see Versioning for Managed +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) // in the IAM User Guide. func (c *IAM) GetPolicyVersion(input *GetPolicyVersionInput) (*GetPolicyVersionOutput, error) { req, out := c.GetPolicyVersionRequest(input) @@ -2034,7 +3317,28 @@ func (c *IAM) GetPolicyVersion(input *GetPolicyVersionInput) (*GetPolicyVersionO const opGetRole = "GetRole" -// GetRoleRequest generates a request for the GetRole operation. +// GetRoleRequest generates a "aws/request.Request" representing the +// client's request for the GetRole operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetRole method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetRoleRequest method. +// req, resp := client.GetRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) GetRoleRequest(input *GetRoleInput) (req *request.Request, output *GetRoleOutput) { op := &request.Operation{ Name: opGetRole, @@ -2053,9 +3357,14 @@ func (c *IAM) GetRoleRequest(input *GetRoleInput) (req *request.Request, output } // Retrieves information about the specified role, including the role's path, -// GUID, ARN, and the policy granting permission to assume the role. For more -// information about ARNs, go to ARNs (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#Identifiers_ARNs). -// For more information about roles, go to Working with Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// GUID, ARN, and the role's trust policy that grants permission to assume the +// role. For more information about roles, see Working with Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// +// Policies returned by this API are URL-encoded compliant with RFC 3986 (https://tools.ietf.org/html/rfc3986). +// You can use a URL decoding method to convert the policy back to plain JSON +// text. For example, if you use Java, you can use the decode method of the +// java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs +// provide similar functionality. func (c *IAM) GetRole(input *GetRoleInput) (*GetRoleOutput, error) { req, out := c.GetRoleRequest(input) err := req.Send() @@ -2064,7 +3373,28 @@ func (c *IAM) GetRole(input *GetRoleInput) (*GetRoleOutput, error) { const opGetRolePolicy = "GetRolePolicy" -// GetRolePolicyRequest generates a request for the GetRolePolicy operation. +// GetRolePolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetRolePolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetRolePolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetRolePolicyRequest method. +// req, resp := client.GetRolePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) GetRolePolicyRequest(input *GetRolePolicyInput) (req *request.Request, output *GetRolePolicyOutput) { op := &request.Operation{ Name: opGetRolePolicy, @@ -2083,18 +3413,24 @@ func (c *IAM) GetRolePolicyRequest(input *GetRolePolicyInput) (req *request.Requ } // Retrieves the specified inline policy document that is embedded with the -// specified role. +// specified IAM role. // -// A role can also have managed policies attached to it. To retrieve a managed -// policy document that is attached to a role, use GetPolicy to determine the -// policy's default version, then use GetPolicyVersion to retrieve the policy +// Policies returned by this API are URL-encoded compliant with RFC 3986 (https://tools.ietf.org/html/rfc3986). +// You can use a URL decoding method to convert the policy back to plain JSON +// text. For example, if you use Java, you can use the decode method of the +// java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs +// provide similar functionality. +// +// An IAM role can also have managed policies attached to it. To retrieve +// a managed policy document that is attached to a role, use GetPolicy to determine +// the policy's default version, then use GetPolicyVersion to retrieve the policy // document. // -// For more information about policies, refer to Managed Policies and Inline -// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// For more information about policies, see Managed Policies and Inline Policies +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) // in the IAM User Guide. // -// For more information about roles, go to Using Roles to Delegate Permissions +// For more information about roles, see Using Roles to Delegate Permissions // and Federate Identities (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html). func (c *IAM) GetRolePolicy(input *GetRolePolicyInput) (*GetRolePolicyOutput, error) { req, out := c.GetRolePolicyRequest(input) @@ -2104,7 +3440,28 @@ func (c *IAM) GetRolePolicy(input *GetRolePolicyInput) (*GetRolePolicyOutput, er const opGetSAMLProvider = "GetSAMLProvider" -// GetSAMLProviderRequest generates a request for the GetSAMLProvider operation. +// GetSAMLProviderRequest generates a "aws/request.Request" representing the +// client's request for the GetSAMLProvider operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetSAMLProvider method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetSAMLProviderRequest method. +// req, resp := client.GetSAMLProviderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) GetSAMLProviderRequest(input *GetSAMLProviderInput) (req *request.Request, output *GetSAMLProviderOutput) { op := &request.Operation{ Name: opGetSAMLProvider, @@ -2122,10 +3479,10 @@ func (c *IAM) GetSAMLProviderRequest(input *GetSAMLProviderInput) (req *request. return } -// Returns the SAML provider metadocument that was uploaded when the provider -// was created or updated. +// Returns the SAML provider metadocument that was uploaded when the IAM SAML +// provider resource object was created or updated. // -// This operation requires Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// This operation requires Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). func (c *IAM) GetSAMLProvider(input *GetSAMLProviderInput) (*GetSAMLProviderOutput, error) { req, out := c.GetSAMLProviderRequest(input) err := req.Send() @@ -2134,7 +3491,28 @@ func (c *IAM) GetSAMLProvider(input *GetSAMLProviderInput) (*GetSAMLProviderOutp const opGetSSHPublicKey = "GetSSHPublicKey" -// GetSSHPublicKeyRequest generates a request for the GetSSHPublicKey operation. +// GetSSHPublicKeyRequest generates a "aws/request.Request" representing the +// client's request for the GetSSHPublicKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetSSHPublicKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetSSHPublicKeyRequest method. +// req, resp := client.GetSSHPublicKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) GetSSHPublicKeyRequest(input *GetSSHPublicKeyInput) (req *request.Request, output *GetSSHPublicKeyOutput) { op := &request.Operation{ Name: opGetSSHPublicKey, @@ -2167,7 +3545,28 @@ func (c *IAM) GetSSHPublicKey(input *GetSSHPublicKeyInput) (*GetSSHPublicKeyOutp const opGetServerCertificate = "GetServerCertificate" -// GetServerCertificateRequest generates a request for the GetServerCertificate operation. +// GetServerCertificateRequest generates a "aws/request.Request" representing the +// client's request for the GetServerCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetServerCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetServerCertificateRequest method. +// req, resp := client.GetServerCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) GetServerCertificateRequest(input *GetServerCertificateInput) (req *request.Request, output *GetServerCertificateOutput) { op := &request.Operation{ Name: opGetServerCertificate, @@ -2185,7 +3584,7 @@ func (c *IAM) GetServerCertificateRequest(input *GetServerCertificateInput) (req return } -// Retrieves information about the specified server certificate. +// Retrieves information about the specified server certificate stored in IAM. // // For more information about working with server certificates, including a // list of AWS services that can use the server certificates that you manage @@ -2199,7 +3598,28 @@ func (c *IAM) GetServerCertificate(input *GetServerCertificateInput) (*GetServer const opGetUser = "GetUser" -// GetUserRequest generates a request for the GetUser operation. +// GetUserRequest generates a "aws/request.Request" representing the +// client's request for the GetUser operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetUser method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetUserRequest method. +// req, resp := client.GetUserRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) GetUserRequest(input *GetUserInput) (req *request.Request, output *GetUserOutput) { op := &request.Operation{ Name: opGetUser, @@ -2217,11 +3637,11 @@ func (c *IAM) GetUserRequest(input *GetUserInput) (req *request.Request, output return } -// Retrieves information about the specified user, including the user's creation -// date, path, unique ID, and ARN. +// Retrieves information about the specified IAM user, including the user's +// creation date, path, unique ID, and ARN. // // If you do not specify a user name, IAM determines the user name implicitly -// based on the AWS access key ID used to sign the request. +// based on the AWS access key ID used to sign the request to this API. func (c *IAM) GetUser(input *GetUserInput) (*GetUserOutput, error) { req, out := c.GetUserRequest(input) err := req.Send() @@ -2230,7 +3650,28 @@ func (c *IAM) GetUser(input *GetUserInput) (*GetUserOutput, error) { const opGetUserPolicy = "GetUserPolicy" -// GetUserPolicyRequest generates a request for the GetUserPolicy operation. +// GetUserPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetUserPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetUserPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetUserPolicyRequest method. +// req, resp := client.GetUserPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) GetUserPolicyRequest(input *GetUserPolicyInput) (req *request.Request, output *GetUserPolicyOutput) { op := &request.Operation{ Name: opGetUserPolicy, @@ -2249,15 +3690,21 @@ func (c *IAM) GetUserPolicyRequest(input *GetUserPolicyInput) (req *request.Requ } // Retrieves the specified inline policy document that is embedded in the specified -// user. +// IAM user. // -// A user can also have managed policies attached to it. To retrieve a managed -// policy document that is attached to a user, use GetPolicy to determine the -// policy's default version, then use GetPolicyVersion to retrieve the policy +// Policies returned by this API are URL-encoded compliant with RFC 3986 (https://tools.ietf.org/html/rfc3986). +// You can use a URL decoding method to convert the policy back to plain JSON +// text. For example, if you use Java, you can use the decode method of the +// java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs +// provide similar functionality. +// +// An IAM user can also have managed policies attached to it. To retrieve +// a managed policy document that is attached to a user, use GetPolicy to determine +// the policy's default version, then use GetPolicyVersion to retrieve the policy // document. // -// For more information about policies, refer to Managed Policies and Inline -// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// For more information about policies, see Managed Policies and Inline Policies +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) // in the IAM User Guide. func (c *IAM) GetUserPolicy(input *GetUserPolicyInput) (*GetUserPolicyOutput, error) { req, out := c.GetUserPolicyRequest(input) @@ -2267,7 +3714,28 @@ func (c *IAM) GetUserPolicy(input *GetUserPolicyInput) (*GetUserPolicyOutput, er const opListAccessKeys = "ListAccessKeys" -// ListAccessKeysRequest generates a request for the ListAccessKeys operation. +// ListAccessKeysRequest generates a "aws/request.Request" representing the +// client's request for the ListAccessKeys operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListAccessKeys method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListAccessKeysRequest method. +// req, resp := client.ListAccessKeysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) ListAccessKeysRequest(input *ListAccessKeysInput) (req *request.Request, output *ListAccessKeysOutput) { op := &request.Operation{ Name: opListAccessKeys, @@ -2292,7 +3760,7 @@ func (c *IAM) ListAccessKeysRequest(input *ListAccessKeysInput) (req *request.Re } // Returns information about the access key IDs associated with the specified -// user. If there are none, the action returns an empty list. +// IAM user. If there are none, the action returns an empty list. // // Although each user is limited to a small number of keys, you can still paginate // the results using the MaxItems and Marker parameters. @@ -2302,7 +3770,7 @@ func (c *IAM) ListAccessKeysRequest(input *ListAccessKeysInput) (req *request.Re // works for access keys under the AWS account, you can use this action to manage // root credentials even if the AWS account has no associated users. // -// To ensure the security of your AWS account, the secret access key is accessible +// To ensure the security of your AWS account, the secret access key is accessible // only during key and user creation. func (c *IAM) ListAccessKeys(input *ListAccessKeysInput) (*ListAccessKeysOutput, error) { req, out := c.ListAccessKeysRequest(input) @@ -2310,6 +3778,23 @@ func (c *IAM) ListAccessKeys(input *ListAccessKeysInput) (*ListAccessKeysOutput, return out, err } +// ListAccessKeysPages iterates over the pages of a ListAccessKeys operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAccessKeys method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAccessKeys operation. +// pageNum := 0 +// err := client.ListAccessKeysPages(params, +// func(page *ListAccessKeysOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *IAM) ListAccessKeysPages(input *ListAccessKeysInput, fn func(p *ListAccessKeysOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListAccessKeysRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -2320,7 +3805,28 @@ func (c *IAM) ListAccessKeysPages(input *ListAccessKeysInput, fn func(p *ListAcc const opListAccountAliases = "ListAccountAliases" -// ListAccountAliasesRequest generates a request for the ListAccountAliases operation. +// ListAccountAliasesRequest generates a "aws/request.Request" representing the +// client's request for the ListAccountAliases operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListAccountAliases method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListAccountAliasesRequest method. +// req, resp := client.ListAccountAliasesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) ListAccountAliasesRequest(input *ListAccountAliasesInput) (req *request.Request, output *ListAccountAliasesOutput) { op := &request.Operation{ Name: opListAccountAliases, @@ -2344,9 +3850,9 @@ func (c *IAM) ListAccountAliasesRequest(input *ListAccountAliasesInput) (req *re return } -// Lists the account alias associated with the account (Note: you can have only -// one). For information about using an AWS account alias, see Using an Alias -// for Your AWS Account ID (http://docs.aws.amazon.com/IAM/latest/UserGuide/AccountAlias.html) +// Lists the account alias associated with the AWS account (Note: you can have +// only one). For information about using an AWS account alias, see Using an +// Alias for Your AWS Account ID (http://docs.aws.amazon.com/IAM/latest/UserGuide/AccountAlias.html) // in the IAM User Guide. func (c *IAM) ListAccountAliases(input *ListAccountAliasesInput) (*ListAccountAliasesOutput, error) { req, out := c.ListAccountAliasesRequest(input) @@ -2354,6 +3860,23 @@ func (c *IAM) ListAccountAliases(input *ListAccountAliasesInput) (*ListAccountAl return out, err } +// ListAccountAliasesPages iterates over the pages of a ListAccountAliases operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAccountAliases method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAccountAliases operation. +// pageNum := 0 +// err := client.ListAccountAliasesPages(params, +// func(page *ListAccountAliasesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *IAM) ListAccountAliasesPages(input *ListAccountAliasesInput, fn func(p *ListAccountAliasesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListAccountAliasesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -2364,7 +3887,28 @@ func (c *IAM) ListAccountAliasesPages(input *ListAccountAliasesInput, fn func(p const opListAttachedGroupPolicies = "ListAttachedGroupPolicies" -// ListAttachedGroupPoliciesRequest generates a request for the ListAttachedGroupPolicies operation. +// ListAttachedGroupPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListAttachedGroupPolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListAttachedGroupPolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListAttachedGroupPoliciesRequest method. +// req, resp := client.ListAttachedGroupPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) ListAttachedGroupPoliciesRequest(input *ListAttachedGroupPoliciesInput) (req *request.Request, output *ListAttachedGroupPoliciesOutput) { op := &request.Operation{ Name: opListAttachedGroupPolicies, @@ -2388,11 +3932,11 @@ func (c *IAM) ListAttachedGroupPoliciesRequest(input *ListAttachedGroupPoliciesI return } -// Lists all managed policies that are attached to the specified group. +// Lists all managed policies that are attached to the specified IAM group. // -// A group can also have inline policies embedded with it. To list the inline -// policies for a group, use the ListGroupPolicies API. For information about -// policies, refer to Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// An IAM group can also have inline policies embedded with it. To list the +// inline policies for a group, use the ListGroupPolicies API. For information +// about policies, see Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) // in the IAM User Guide. // // You can paginate the results using the MaxItems and Marker parameters. You @@ -2406,6 +3950,23 @@ func (c *IAM) ListAttachedGroupPolicies(input *ListAttachedGroupPoliciesInput) ( return out, err } +// ListAttachedGroupPoliciesPages iterates over the pages of a ListAttachedGroupPolicies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAttachedGroupPolicies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAttachedGroupPolicies operation. +// pageNum := 0 +// err := client.ListAttachedGroupPoliciesPages(params, +// func(page *ListAttachedGroupPoliciesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *IAM) ListAttachedGroupPoliciesPages(input *ListAttachedGroupPoliciesInput, fn func(p *ListAttachedGroupPoliciesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListAttachedGroupPoliciesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -2416,7 +3977,28 @@ func (c *IAM) ListAttachedGroupPoliciesPages(input *ListAttachedGroupPoliciesInp const opListAttachedRolePolicies = "ListAttachedRolePolicies" -// ListAttachedRolePoliciesRequest generates a request for the ListAttachedRolePolicies operation. +// ListAttachedRolePoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListAttachedRolePolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListAttachedRolePolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListAttachedRolePoliciesRequest method. +// req, resp := client.ListAttachedRolePoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) ListAttachedRolePoliciesRequest(input *ListAttachedRolePoliciesInput) (req *request.Request, output *ListAttachedRolePoliciesOutput) { op := &request.Operation{ Name: opListAttachedRolePolicies, @@ -2440,11 +4022,11 @@ func (c *IAM) ListAttachedRolePoliciesRequest(input *ListAttachedRolePoliciesInp return } -// Lists all managed policies that are attached to the specified role. +// Lists all managed policies that are attached to the specified IAM role. // -// A role can also have inline policies embedded with it. To list the inline -// policies for a role, use the ListRolePolicies API. For information about -// policies, refer to Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// An IAM role can also have inline policies embedded with it. To list the +// inline policies for a role, use the ListRolePolicies API. For information +// about policies, see Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) // in the IAM User Guide. // // You can paginate the results using the MaxItems and Marker parameters. You @@ -2458,6 +4040,23 @@ func (c *IAM) ListAttachedRolePolicies(input *ListAttachedRolePoliciesInput) (*L return out, err } +// ListAttachedRolePoliciesPages iterates over the pages of a ListAttachedRolePolicies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAttachedRolePolicies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAttachedRolePolicies operation. +// pageNum := 0 +// err := client.ListAttachedRolePoliciesPages(params, +// func(page *ListAttachedRolePoliciesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *IAM) ListAttachedRolePoliciesPages(input *ListAttachedRolePoliciesInput, fn func(p *ListAttachedRolePoliciesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListAttachedRolePoliciesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -2468,7 +4067,28 @@ func (c *IAM) ListAttachedRolePoliciesPages(input *ListAttachedRolePoliciesInput const opListAttachedUserPolicies = "ListAttachedUserPolicies" -// ListAttachedUserPoliciesRequest generates a request for the ListAttachedUserPolicies operation. +// ListAttachedUserPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListAttachedUserPolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListAttachedUserPolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListAttachedUserPoliciesRequest method. +// req, resp := client.ListAttachedUserPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) ListAttachedUserPoliciesRequest(input *ListAttachedUserPoliciesInput) (req *request.Request, output *ListAttachedUserPoliciesOutput) { op := &request.Operation{ Name: opListAttachedUserPolicies, @@ -2492,11 +4112,11 @@ func (c *IAM) ListAttachedUserPoliciesRequest(input *ListAttachedUserPoliciesInp return } -// Lists all managed policies that are attached to the specified user. +// Lists all managed policies that are attached to the specified IAM user. // -// A user can also have inline policies embedded with it. To list the inline -// policies for a user, use the ListUserPolicies API. For information about -// policies, refer to Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// An IAM user can also have inline policies embedded with it. To list the +// inline policies for a user, use the ListUserPolicies API. For information +// about policies, see Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) // in the IAM User Guide. // // You can paginate the results using the MaxItems and Marker parameters. You @@ -2510,6 +4130,23 @@ func (c *IAM) ListAttachedUserPolicies(input *ListAttachedUserPoliciesInput) (*L return out, err } +// ListAttachedUserPoliciesPages iterates over the pages of a ListAttachedUserPolicies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAttachedUserPolicies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAttachedUserPolicies operation. +// pageNum := 0 +// err := client.ListAttachedUserPoliciesPages(params, +// func(page *ListAttachedUserPoliciesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *IAM) ListAttachedUserPoliciesPages(input *ListAttachedUserPoliciesInput, fn func(p *ListAttachedUserPoliciesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListAttachedUserPoliciesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -2520,7 +4157,28 @@ func (c *IAM) ListAttachedUserPoliciesPages(input *ListAttachedUserPoliciesInput const opListEntitiesForPolicy = "ListEntitiesForPolicy" -// ListEntitiesForPolicyRequest generates a request for the ListEntitiesForPolicy operation. +// ListEntitiesForPolicyRequest generates a "aws/request.Request" representing the +// client's request for the ListEntitiesForPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListEntitiesForPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListEntitiesForPolicyRequest method. +// req, resp := client.ListEntitiesForPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) ListEntitiesForPolicyRequest(input *ListEntitiesForPolicyInput) (req *request.Request, output *ListEntitiesForPolicyOutput) { op := &request.Operation{ Name: opListEntitiesForPolicy, @@ -2544,8 +4202,8 @@ func (c *IAM) ListEntitiesForPolicyRequest(input *ListEntitiesForPolicyInput) (r return } -// Lists all users, groups, and roles that the specified managed policy is attached -// to. +// Lists all IAM users, groups, and roles that the specified managed policy +// is attached to. // // You can use the optional EntityFilter parameter to limit the results to // a particular type of entity (users, groups, or roles). For example, to list @@ -2559,6 +4217,23 @@ func (c *IAM) ListEntitiesForPolicy(input *ListEntitiesForPolicyInput) (*ListEnt return out, err } +// ListEntitiesForPolicyPages iterates over the pages of a ListEntitiesForPolicy operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListEntitiesForPolicy method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListEntitiesForPolicy operation. +// pageNum := 0 +// err := client.ListEntitiesForPolicyPages(params, +// func(page *ListEntitiesForPolicyOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *IAM) ListEntitiesForPolicyPages(input *ListEntitiesForPolicyInput, fn func(p *ListEntitiesForPolicyOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListEntitiesForPolicyRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -2569,7 +4244,28 @@ func (c *IAM) ListEntitiesForPolicyPages(input *ListEntitiesForPolicyInput, fn f const opListGroupPolicies = "ListGroupPolicies" -// ListGroupPoliciesRequest generates a request for the ListGroupPolicies operation. +// ListGroupPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListGroupPolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListGroupPolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListGroupPoliciesRequest method. +// req, resp := client.ListGroupPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) ListGroupPoliciesRequest(input *ListGroupPoliciesInput) (req *request.Request, output *ListGroupPoliciesOutput) { op := &request.Operation{ Name: opListGroupPolicies, @@ -2594,11 +4290,11 @@ func (c *IAM) ListGroupPoliciesRequest(input *ListGroupPoliciesInput) (req *requ } // Lists the names of the inline policies that are embedded in the specified -// group. +// IAM group. // -// A group can also have managed policies attached to it. To list the managed -// policies that are attached to a group, use ListAttachedGroupPolicies. For -// more information about policies, refer to Managed Policies and Inline Policies +// An IAM group can also have managed policies attached to it. To list the +// managed policies that are attached to a group, use ListAttachedGroupPolicies. +// For more information about policies, see Managed Policies and Inline Policies // (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) // in the IAM User Guide. // @@ -2611,6 +4307,23 @@ func (c *IAM) ListGroupPolicies(input *ListGroupPoliciesInput) (*ListGroupPolici return out, err } +// ListGroupPoliciesPages iterates over the pages of a ListGroupPolicies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListGroupPolicies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListGroupPolicies operation. +// pageNum := 0 +// err := client.ListGroupPoliciesPages(params, +// func(page *ListGroupPoliciesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *IAM) ListGroupPoliciesPages(input *ListGroupPoliciesInput, fn func(p *ListGroupPoliciesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListGroupPoliciesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -2621,7 +4334,28 @@ func (c *IAM) ListGroupPoliciesPages(input *ListGroupPoliciesInput, fn func(p *L const opListGroups = "ListGroups" -// ListGroupsRequest generates a request for the ListGroups operation. +// ListGroupsRequest generates a "aws/request.Request" representing the +// client's request for the ListGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListGroupsRequest method. +// req, resp := client.ListGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) ListGroupsRequest(input *ListGroupsInput) (req *request.Request, output *ListGroupsOutput) { op := &request.Operation{ Name: opListGroups, @@ -2645,7 +4379,7 @@ func (c *IAM) ListGroupsRequest(input *ListGroupsInput) (req *request.Request, o return } -// Lists the groups that have the specified path prefix. +// Lists the IAM groups that have the specified path prefix. // // You can paginate the results using the MaxItems and Marker parameters. func (c *IAM) ListGroups(input *ListGroupsInput) (*ListGroupsOutput, error) { @@ -2654,6 +4388,23 @@ func (c *IAM) ListGroups(input *ListGroupsInput) (*ListGroupsOutput, error) { return out, err } +// ListGroupsPages iterates over the pages of a ListGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListGroups operation. +// pageNum := 0 +// err := client.ListGroupsPages(params, +// func(page *ListGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *IAM) ListGroupsPages(input *ListGroupsInput, fn func(p *ListGroupsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListGroupsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -2664,7 +4415,28 @@ func (c *IAM) ListGroupsPages(input *ListGroupsInput, fn func(p *ListGroupsOutpu const opListGroupsForUser = "ListGroupsForUser" -// ListGroupsForUserRequest generates a request for the ListGroupsForUser operation. +// ListGroupsForUserRequest generates a "aws/request.Request" representing the +// client's request for the ListGroupsForUser operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListGroupsForUser method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListGroupsForUserRequest method. +// req, resp := client.ListGroupsForUserRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) ListGroupsForUserRequest(input *ListGroupsForUserInput) (req *request.Request, output *ListGroupsForUserOutput) { op := &request.Operation{ Name: opListGroupsForUser, @@ -2688,7 +4460,7 @@ func (c *IAM) ListGroupsForUserRequest(input *ListGroupsForUserInput) (req *requ return } -// Lists the groups the specified user belongs to. +// Lists the IAM groups that the specified IAM user belongs to. // // You can paginate the results using the MaxItems and Marker parameters. func (c *IAM) ListGroupsForUser(input *ListGroupsForUserInput) (*ListGroupsForUserOutput, error) { @@ -2697,6 +4469,23 @@ func (c *IAM) ListGroupsForUser(input *ListGroupsForUserInput) (*ListGroupsForUs return out, err } +// ListGroupsForUserPages iterates over the pages of a ListGroupsForUser operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListGroupsForUser method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListGroupsForUser operation. +// pageNum := 0 +// err := client.ListGroupsForUserPages(params, +// func(page *ListGroupsForUserOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *IAM) ListGroupsForUserPages(input *ListGroupsForUserInput, fn func(p *ListGroupsForUserOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListGroupsForUserRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -2707,7 +4496,28 @@ func (c *IAM) ListGroupsForUserPages(input *ListGroupsForUserInput, fn func(p *L const opListInstanceProfiles = "ListInstanceProfiles" -// ListInstanceProfilesRequest generates a request for the ListInstanceProfiles operation. +// ListInstanceProfilesRequest generates a "aws/request.Request" representing the +// client's request for the ListInstanceProfiles operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListInstanceProfiles method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListInstanceProfilesRequest method. +// req, resp := client.ListInstanceProfilesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) ListInstanceProfilesRequest(input *ListInstanceProfilesInput) (req *request.Request, output *ListInstanceProfilesOutput) { op := &request.Operation{ Name: opListInstanceProfiles, @@ -2742,6 +4552,23 @@ func (c *IAM) ListInstanceProfiles(input *ListInstanceProfilesInput) (*ListInsta return out, err } +// ListInstanceProfilesPages iterates over the pages of a ListInstanceProfiles operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListInstanceProfiles method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListInstanceProfiles operation. +// pageNum := 0 +// err := client.ListInstanceProfilesPages(params, +// func(page *ListInstanceProfilesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *IAM) ListInstanceProfilesPages(input *ListInstanceProfilesInput, fn func(p *ListInstanceProfilesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListInstanceProfilesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -2752,7 +4579,28 @@ func (c *IAM) ListInstanceProfilesPages(input *ListInstanceProfilesInput, fn fun const opListInstanceProfilesForRole = "ListInstanceProfilesForRole" -// ListInstanceProfilesForRoleRequest generates a request for the ListInstanceProfilesForRole operation. +// ListInstanceProfilesForRoleRequest generates a "aws/request.Request" representing the +// client's request for the ListInstanceProfilesForRole operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListInstanceProfilesForRole method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListInstanceProfilesForRoleRequest method. +// req, resp := client.ListInstanceProfilesForRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) ListInstanceProfilesForRoleRequest(input *ListInstanceProfilesForRoleInput) (req *request.Request, output *ListInstanceProfilesForRoleOutput) { op := &request.Operation{ Name: opListInstanceProfilesForRole, @@ -2776,9 +4624,9 @@ func (c *IAM) ListInstanceProfilesForRoleRequest(input *ListInstanceProfilesForR return } -// Lists the instance profiles that have the specified associated role. If there -// are none, the action returns an empty list. For more information about instance -// profiles, go to About Instance Profiles (http://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +// Lists the instance profiles that have the specified associated IAM role. +// If there are none, the action returns an empty list. For more information +// about instance profiles, go to About Instance Profiles (http://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). // // You can paginate the results using the MaxItems and Marker parameters. func (c *IAM) ListInstanceProfilesForRole(input *ListInstanceProfilesForRoleInput) (*ListInstanceProfilesForRoleOutput, error) { @@ -2787,6 +4635,23 @@ func (c *IAM) ListInstanceProfilesForRole(input *ListInstanceProfilesForRoleInpu return out, err } +// ListInstanceProfilesForRolePages iterates over the pages of a ListInstanceProfilesForRole operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListInstanceProfilesForRole method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListInstanceProfilesForRole operation. +// pageNum := 0 +// err := client.ListInstanceProfilesForRolePages(params, +// func(page *ListInstanceProfilesForRoleOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *IAM) ListInstanceProfilesForRolePages(input *ListInstanceProfilesForRoleInput, fn func(p *ListInstanceProfilesForRoleOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListInstanceProfilesForRoleRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -2797,7 +4662,28 @@ func (c *IAM) ListInstanceProfilesForRolePages(input *ListInstanceProfilesForRol const opListMFADevices = "ListMFADevices" -// ListMFADevicesRequest generates a request for the ListMFADevices operation. +// ListMFADevicesRequest generates a "aws/request.Request" representing the +// client's request for the ListMFADevices operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListMFADevices method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListMFADevicesRequest method. +// req, resp := client.ListMFADevicesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) ListMFADevicesRequest(input *ListMFADevicesInput) (req *request.Request, output *ListMFADevicesOutput) { op := &request.Operation{ Name: opListMFADevices, @@ -2821,10 +4707,10 @@ func (c *IAM) ListMFADevicesRequest(input *ListMFADevicesInput) (req *request.Re return } -// Lists the MFA devices. If the request includes the user name, then this action -// lists all the MFA devices associated with the specified user name. If you -// do not specify a user name, IAM determines the user name implicitly based -// on the AWS access key ID signing the request. +// Lists the MFA devices for an IAM user. If the request includes a IAM user +// name, then this action lists all the MFA devices associated with the specified +// user. If you do not specify a user name, IAM determines the user name implicitly +// based on the AWS access key ID signing the request for this API. // // You can paginate the results using the MaxItems and Marker parameters. func (c *IAM) ListMFADevices(input *ListMFADevicesInput) (*ListMFADevicesOutput, error) { @@ -2833,6 +4719,23 @@ func (c *IAM) ListMFADevices(input *ListMFADevicesInput) (*ListMFADevicesOutput, return out, err } +// ListMFADevicesPages iterates over the pages of a ListMFADevices operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListMFADevices method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListMFADevices operation. +// pageNum := 0 +// err := client.ListMFADevicesPages(params, +// func(page *ListMFADevicesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *IAM) ListMFADevicesPages(input *ListMFADevicesInput, fn func(p *ListMFADevicesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListMFADevicesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -2843,7 +4746,28 @@ func (c *IAM) ListMFADevicesPages(input *ListMFADevicesInput, fn func(p *ListMFA const opListOpenIDConnectProviders = "ListOpenIDConnectProviders" -// ListOpenIDConnectProvidersRequest generates a request for the ListOpenIDConnectProviders operation. +// ListOpenIDConnectProvidersRequest generates a "aws/request.Request" representing the +// client's request for the ListOpenIDConnectProviders operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListOpenIDConnectProviders method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListOpenIDConnectProvidersRequest method. +// req, resp := client.ListOpenIDConnectProvidersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) ListOpenIDConnectProvidersRequest(input *ListOpenIDConnectProvidersInput) (req *request.Request, output *ListOpenIDConnectProvidersOutput) { op := &request.Operation{ Name: opListOpenIDConnectProviders, @@ -2861,7 +4785,8 @@ func (c *IAM) ListOpenIDConnectProvidersRequest(input *ListOpenIDConnectProvider return } -// Lists information about the OpenID Connect providers in the AWS account. +// Lists information about the IAM OpenID Connect (OIDC) provider resource objects +// defined in the AWS account. func (c *IAM) ListOpenIDConnectProviders(input *ListOpenIDConnectProvidersInput) (*ListOpenIDConnectProvidersOutput, error) { req, out := c.ListOpenIDConnectProvidersRequest(input) err := req.Send() @@ -2870,7 +4795,28 @@ func (c *IAM) ListOpenIDConnectProviders(input *ListOpenIDConnectProvidersInput) const opListPolicies = "ListPolicies" -// ListPoliciesRequest generates a request for the ListPolicies operation. +// ListPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListPolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListPolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListPoliciesRequest method. +// req, resp := client.ListPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) ListPoliciesRequest(input *ListPoliciesInput) (req *request.Request, output *ListPoliciesOutput) { op := &request.Operation{ Name: opListPolicies, @@ -2894,8 +4840,8 @@ func (c *IAM) ListPoliciesRequest(input *ListPoliciesInput) (req *request.Reques return } -// Lists all the managed policies that are available to your account, including -// your own customer managed policies and all AWS managed policies. +// Lists all the managed policies that are available in your AWS account, including +// your own customer-defined managed policies and all AWS managed policies. // // You can filter the list of policies that is returned using the optional // OnlyAttached, Scope, and PathPrefix parameters. For example, to list only @@ -2904,8 +4850,8 @@ func (c *IAM) ListPoliciesRequest(input *ListPoliciesInput) (req *request.Reques // // You can paginate the results using the MaxItems and Marker parameters. // -// For more information about managed policies, refer to Managed Policies and -// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// For more information about managed policies, see Managed Policies and Inline +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) // in the IAM User Guide. func (c *IAM) ListPolicies(input *ListPoliciesInput) (*ListPoliciesOutput, error) { req, out := c.ListPoliciesRequest(input) @@ -2913,6 +4859,23 @@ func (c *IAM) ListPolicies(input *ListPoliciesInput) (*ListPoliciesOutput, error return out, err } +// ListPoliciesPages iterates over the pages of a ListPolicies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListPolicies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListPolicies operation. +// pageNum := 0 +// err := client.ListPoliciesPages(params, +// func(page *ListPoliciesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *IAM) ListPoliciesPages(input *ListPoliciesInput, fn func(p *ListPoliciesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListPoliciesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -2923,7 +4886,28 @@ func (c *IAM) ListPoliciesPages(input *ListPoliciesInput, fn func(p *ListPolicie const opListPolicyVersions = "ListPolicyVersions" -// ListPolicyVersionsRequest generates a request for the ListPolicyVersions operation. +// ListPolicyVersionsRequest generates a "aws/request.Request" representing the +// client's request for the ListPolicyVersions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListPolicyVersions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListPolicyVersionsRequest method. +// req, resp := client.ListPolicyVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) ListPolicyVersionsRequest(input *ListPolicyVersionsInput) (req *request.Request, output *ListPolicyVersionsOutput) { op := &request.Operation{ Name: opListPolicyVersions, @@ -2948,10 +4932,10 @@ func (c *IAM) ListPolicyVersionsRequest(input *ListPolicyVersionsInput) (req *re } // Lists information about the versions of the specified managed policy, including -// the version that is set as the policy's default version. +// the version that is currently set as the policy's default version. // -// For more information about managed policies, refer to Managed Policies and -// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// For more information about managed policies, see Managed Policies and Inline +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) // in the IAM User Guide. func (c *IAM) ListPolicyVersions(input *ListPolicyVersionsInput) (*ListPolicyVersionsOutput, error) { req, out := c.ListPolicyVersionsRequest(input) @@ -2959,6 +4943,23 @@ func (c *IAM) ListPolicyVersions(input *ListPolicyVersionsInput) (*ListPolicyVer return out, err } +// ListPolicyVersionsPages iterates over the pages of a ListPolicyVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListPolicyVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListPolicyVersions operation. +// pageNum := 0 +// err := client.ListPolicyVersionsPages(params, +// func(page *ListPolicyVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *IAM) ListPolicyVersionsPages(input *ListPolicyVersionsInput, fn func(p *ListPolicyVersionsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListPolicyVersionsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -2969,7 +4970,28 @@ func (c *IAM) ListPolicyVersionsPages(input *ListPolicyVersionsInput, fn func(p const opListRolePolicies = "ListRolePolicies" -// ListRolePoliciesRequest generates a request for the ListRolePolicies operation. +// ListRolePoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListRolePolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListRolePolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListRolePoliciesRequest method. +// req, resp := client.ListRolePoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) ListRolePoliciesRequest(input *ListRolePoliciesInput) (req *request.Request, output *ListRolePoliciesOutput) { op := &request.Operation{ Name: opListRolePolicies, @@ -2994,12 +5016,11 @@ func (c *IAM) ListRolePoliciesRequest(input *ListRolePoliciesInput) (req *reques } // Lists the names of the inline policies that are embedded in the specified -// role. +// IAM role. // -// A role can also have managed policies attached to it. To list the managed +// An IAM role can also have managed policies attached to it. To list the managed // policies that are attached to a role, use ListAttachedRolePolicies. For more -// information about policies, refer to Managed Policies and Inline Policies -// (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// information about policies, see Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) // in the IAM User Guide. // // You can paginate the results using the MaxItems and Marker parameters. If @@ -3011,6 +5032,23 @@ func (c *IAM) ListRolePolicies(input *ListRolePoliciesInput) (*ListRolePoliciesO return out, err } +// ListRolePoliciesPages iterates over the pages of a ListRolePolicies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListRolePolicies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListRolePolicies operation. +// pageNum := 0 +// err := client.ListRolePoliciesPages(params, +// func(page *ListRolePoliciesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *IAM) ListRolePoliciesPages(input *ListRolePoliciesInput, fn func(p *ListRolePoliciesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListRolePoliciesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -3021,7 +5059,28 @@ func (c *IAM) ListRolePoliciesPages(input *ListRolePoliciesInput, fn func(p *Lis const opListRoles = "ListRoles" -// ListRolesRequest generates a request for the ListRoles operation. +// ListRolesRequest generates a "aws/request.Request" representing the +// client's request for the ListRoles operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListRoles method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListRolesRequest method. +// req, resp := client.ListRolesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) ListRolesRequest(input *ListRolesInput) (req *request.Request, output *ListRolesOutput) { op := &request.Operation{ Name: opListRoles, @@ -3045,9 +5104,9 @@ func (c *IAM) ListRolesRequest(input *ListRolesInput) (req *request.Request, out return } -// Lists the roles that have the specified path prefix. If there are none, the -// action returns an empty list. For more information about roles, go to Working -// with Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// Lists the IAM roles that have the specified path prefix. If there are none, +// the action returns an empty list. For more information about roles, go to +// Working with Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). // // You can paginate the results using the MaxItems and Marker parameters. func (c *IAM) ListRoles(input *ListRolesInput) (*ListRolesOutput, error) { @@ -3056,6 +5115,23 @@ func (c *IAM) ListRoles(input *ListRolesInput) (*ListRolesOutput, error) { return out, err } +// ListRolesPages iterates over the pages of a ListRoles operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListRoles method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListRoles operation. +// pageNum := 0 +// err := client.ListRolesPages(params, +// func(page *ListRolesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *IAM) ListRolesPages(input *ListRolesInput, fn func(p *ListRolesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListRolesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -3066,7 +5142,28 @@ func (c *IAM) ListRolesPages(input *ListRolesInput, fn func(p *ListRolesOutput, const opListSAMLProviders = "ListSAMLProviders" -// ListSAMLProvidersRequest generates a request for the ListSAMLProviders operation. +// ListSAMLProvidersRequest generates a "aws/request.Request" representing the +// client's request for the ListSAMLProviders operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListSAMLProviders method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListSAMLProvidersRequest method. +// req, resp := client.ListSAMLProvidersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) ListSAMLProvidersRequest(input *ListSAMLProvidersInput) (req *request.Request, output *ListSAMLProvidersOutput) { op := &request.Operation{ Name: opListSAMLProviders, @@ -3084,9 +5181,9 @@ func (c *IAM) ListSAMLProvidersRequest(input *ListSAMLProvidersInput) (req *requ return } -// Lists the SAML providers in the account. +// Lists the SAML provider resource objects defined in IAM in the account. // -// This operation requires Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// This operation requires Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). func (c *IAM) ListSAMLProviders(input *ListSAMLProvidersInput) (*ListSAMLProvidersOutput, error) { req, out := c.ListSAMLProvidersRequest(input) err := req.Send() @@ -3095,12 +5192,39 @@ func (c *IAM) ListSAMLProviders(input *ListSAMLProvidersInput) (*ListSAMLProvide const opListSSHPublicKeys = "ListSSHPublicKeys" -// ListSSHPublicKeysRequest generates a request for the ListSSHPublicKeys operation. +// ListSSHPublicKeysRequest generates a "aws/request.Request" representing the +// client's request for the ListSSHPublicKeys operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListSSHPublicKeys method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListSSHPublicKeysRequest method. +// req, resp := client.ListSSHPublicKeysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) ListSSHPublicKeysRequest(input *ListSSHPublicKeysInput) (req *request.Request, output *ListSSHPublicKeysOutput) { op := &request.Operation{ Name: opListSSHPublicKeys, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, } if input == nil { @@ -3130,9 +5254,55 @@ func (c *IAM) ListSSHPublicKeys(input *ListSSHPublicKeysInput) (*ListSSHPublicKe return out, err } +// ListSSHPublicKeysPages iterates over the pages of a ListSSHPublicKeys operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListSSHPublicKeys method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListSSHPublicKeys operation. +// pageNum := 0 +// err := client.ListSSHPublicKeysPages(params, +// func(page *ListSSHPublicKeysOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) ListSSHPublicKeysPages(input *ListSSHPublicKeysInput, fn func(p *ListSSHPublicKeysOutput, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListSSHPublicKeysRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListSSHPublicKeysOutput), lastPage) + }) +} + const opListServerCertificates = "ListServerCertificates" -// ListServerCertificatesRequest generates a request for the ListServerCertificates operation. +// ListServerCertificatesRequest generates a "aws/request.Request" representing the +// client's request for the ListServerCertificates operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListServerCertificates method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListServerCertificatesRequest method. +// req, resp := client.ListServerCertificatesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) ListServerCertificatesRequest(input *ListServerCertificatesInput) (req *request.Request, output *ListServerCertificatesOutput) { op := &request.Operation{ Name: opListServerCertificates, @@ -3156,8 +5326,8 @@ func (c *IAM) ListServerCertificatesRequest(input *ListServerCertificatesInput) return } -// Lists the server certificates that have the specified path prefix. If none -// exist, the action returns an empty list. +// Lists the server certificates stored in IAM that have the specified path +// prefix. If none exist, the action returns an empty list. // // You can paginate the results using the MaxItems and Marker parameters. // @@ -3171,6 +5341,23 @@ func (c *IAM) ListServerCertificates(input *ListServerCertificatesInput) (*ListS return out, err } +// ListServerCertificatesPages iterates over the pages of a ListServerCertificates operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListServerCertificates method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListServerCertificates operation. +// pageNum := 0 +// err := client.ListServerCertificatesPages(params, +// func(page *ListServerCertificatesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *IAM) ListServerCertificatesPages(input *ListServerCertificatesInput, fn func(p *ListServerCertificatesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListServerCertificatesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -3181,7 +5368,28 @@ func (c *IAM) ListServerCertificatesPages(input *ListServerCertificatesInput, fn const opListSigningCertificates = "ListSigningCertificates" -// ListSigningCertificatesRequest generates a request for the ListSigningCertificates operation. +// ListSigningCertificatesRequest generates a "aws/request.Request" representing the +// client's request for the ListSigningCertificates operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListSigningCertificates method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListSigningCertificatesRequest method. +// req, resp := client.ListSigningCertificatesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) ListSigningCertificatesRequest(input *ListSigningCertificatesInput) (req *request.Request, output *ListSigningCertificatesOutput) { op := &request.Operation{ Name: opListSigningCertificates, @@ -3206,21 +5414,39 @@ func (c *IAM) ListSigningCertificatesRequest(input *ListSigningCertificatesInput } // Returns information about the signing certificates associated with the specified -// user. If there are none, the action returns an empty list. +// IAM user. If there are none, the action returns an empty list. // // Although each user is limited to a small number of signing certificates, // you can still paginate the results using the MaxItems and Marker parameters. // // If the UserName field is not specified, the user name is determined implicitly -// based on the AWS access key ID used to sign the request. Because this action -// works for access keys under the AWS account, you can use this action to manage -// root credentials even if the AWS account has no associated users. +// based on the AWS access key ID used to sign the request for this API. Because +// this action works for access keys under the AWS account, you can use this +// action to manage root credentials even if the AWS account has no associated +// users. func (c *IAM) ListSigningCertificates(input *ListSigningCertificatesInput) (*ListSigningCertificatesOutput, error) { req, out := c.ListSigningCertificatesRequest(input) err := req.Send() return out, err } +// ListSigningCertificatesPages iterates over the pages of a ListSigningCertificates operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListSigningCertificates method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListSigningCertificates operation. +// pageNum := 0 +// err := client.ListSigningCertificatesPages(params, +// func(page *ListSigningCertificatesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *IAM) ListSigningCertificatesPages(input *ListSigningCertificatesInput, fn func(p *ListSigningCertificatesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListSigningCertificatesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -3231,7 +5457,28 @@ func (c *IAM) ListSigningCertificatesPages(input *ListSigningCertificatesInput, const opListUserPolicies = "ListUserPolicies" -// ListUserPoliciesRequest generates a request for the ListUserPolicies operation. +// ListUserPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListUserPolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListUserPolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListUserPoliciesRequest method. +// req, resp := client.ListUserPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) ListUserPoliciesRequest(input *ListUserPoliciesInput) (req *request.Request, output *ListUserPoliciesOutput) { op := &request.Operation{ Name: opListUserPolicies, @@ -3255,12 +5502,11 @@ func (c *IAM) ListUserPoliciesRequest(input *ListUserPoliciesInput) (req *reques return } -// Lists the names of the inline policies embedded in the specified user. +// Lists the names of the inline policies embedded in the specified IAM user. // -// A user can also have managed policies attached to it. To list the managed +// An IAM user can also have managed policies attached to it. To list the managed // policies that are attached to a user, use ListAttachedUserPolicies. For more -// information about policies, refer to Managed Policies and Inline Policies -// (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// information about policies, see Managed Policies and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) // in the IAM User Guide. // // You can paginate the results using the MaxItems and Marker parameters. If @@ -3272,6 +5518,23 @@ func (c *IAM) ListUserPolicies(input *ListUserPoliciesInput) (*ListUserPoliciesO return out, err } +// ListUserPoliciesPages iterates over the pages of a ListUserPolicies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListUserPolicies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListUserPolicies operation. +// pageNum := 0 +// err := client.ListUserPoliciesPages(params, +// func(page *ListUserPoliciesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *IAM) ListUserPoliciesPages(input *ListUserPoliciesInput, fn func(p *ListUserPoliciesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListUserPoliciesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -3282,7 +5545,28 @@ func (c *IAM) ListUserPoliciesPages(input *ListUserPoliciesInput, fn func(p *Lis const opListUsers = "ListUsers" -// ListUsersRequest generates a request for the ListUsers operation. +// ListUsersRequest generates a "aws/request.Request" representing the +// client's request for the ListUsers operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListUsers method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListUsersRequest method. +// req, resp := client.ListUsersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) ListUsersRequest(input *ListUsersInput) (req *request.Request, output *ListUsersOutput) { op := &request.Operation{ Name: opListUsers, @@ -3317,6 +5601,23 @@ func (c *IAM) ListUsers(input *ListUsersInput) (*ListUsersOutput, error) { return out, err } +// ListUsersPages iterates over the pages of a ListUsers operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListUsers method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListUsers operation. +// pageNum := 0 +// err := client.ListUsersPages(params, +// func(page *ListUsersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *IAM) ListUsersPages(input *ListUsersInput, fn func(p *ListUsersOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListUsersRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -3327,7 +5628,28 @@ func (c *IAM) ListUsersPages(input *ListUsersInput, fn func(p *ListUsersOutput, const opListVirtualMFADevices = "ListVirtualMFADevices" -// ListVirtualMFADevicesRequest generates a request for the ListVirtualMFADevices operation. +// ListVirtualMFADevicesRequest generates a "aws/request.Request" representing the +// client's request for the ListVirtualMFADevices operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListVirtualMFADevices method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListVirtualMFADevicesRequest method. +// req, resp := client.ListVirtualMFADevicesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) ListVirtualMFADevicesRequest(input *ListVirtualMFADevicesInput) (req *request.Request, output *ListVirtualMFADevicesOutput) { op := &request.Operation{ Name: opListVirtualMFADevices, @@ -3351,7 +5673,7 @@ func (c *IAM) ListVirtualMFADevicesRequest(input *ListVirtualMFADevicesInput) (r return } -// Lists the virtual MFA devices under the AWS account by assignment status. +// Lists the virtual MFA devices defined in the AWS account by assignment status. // If you do not specify an assignment status, the action returns a list of // all virtual MFA devices. Assignment status can be Assigned, Unassigned, or // Any. @@ -3363,6 +5685,23 @@ func (c *IAM) ListVirtualMFADevices(input *ListVirtualMFADevicesInput) (*ListVir return out, err } +// ListVirtualMFADevicesPages iterates over the pages of a ListVirtualMFADevices operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListVirtualMFADevices method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListVirtualMFADevices operation. +// pageNum := 0 +// err := client.ListVirtualMFADevicesPages(params, +// func(page *ListVirtualMFADevicesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *IAM) ListVirtualMFADevicesPages(input *ListVirtualMFADevicesInput, fn func(p *ListVirtualMFADevicesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListVirtualMFADevicesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -3373,7 +5712,28 @@ func (c *IAM) ListVirtualMFADevicesPages(input *ListVirtualMFADevicesInput, fn f const opPutGroupPolicy = "PutGroupPolicy" -// PutGroupPolicyRequest generates a request for the PutGroupPolicy operation. +// PutGroupPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutGroupPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutGroupPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutGroupPolicyRequest method. +// req, resp := client.PutGroupPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) PutGroupPolicyRequest(input *PutGroupPolicyInput) (req *request.Request, output *PutGroupPolicyOutput) { op := &request.Operation{ Name: opPutGroupPolicy, @@ -3393,23 +5753,23 @@ func (c *IAM) PutGroupPolicyRequest(input *PutGroupPolicyInput) (req *request.Re return } -// Adds (or updates) an inline policy document that is embedded in the specified -// group. +// Adds or updates an inline policy document that is embedded in the specified +// IAM group. // // A user can also have managed policies attached to it. To attach a managed // policy to a group, use AttachGroupPolicy. To create a new managed policy, -// use CreatePolicy. For information about policies, refer to Managed Policies -// and Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// use CreatePolicy. For information about policies, see Managed Policies and +// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) // in the IAM User Guide. // // For information about limits on the number of inline policies that you can // embed in a group, see Limitations on IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) // in the IAM User Guide. // -// Because policy documents can be large, you should use POST rather than GET -// when calling PutGroupPolicy. For general information about using the Query -// API with IAM, go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) -// in the Using IAM guide. +// Because policy documents can be large, you should use POST rather than +// GET when calling PutGroupPolicy. For general information about using the +// Query API with IAM, go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in the IAM User Guide. func (c *IAM) PutGroupPolicy(input *PutGroupPolicyInput) (*PutGroupPolicyOutput, error) { req, out := c.PutGroupPolicyRequest(input) err := req.Send() @@ -3418,7 +5778,28 @@ func (c *IAM) PutGroupPolicy(input *PutGroupPolicyInput) (*PutGroupPolicyOutput, const opPutRolePolicy = "PutRolePolicy" -// PutRolePolicyRequest generates a request for the PutRolePolicy operation. +// PutRolePolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutRolePolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutRolePolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutRolePolicyRequest method. +// req, resp := client.PutRolePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) PutRolePolicyRequest(input *PutRolePolicyInput) (req *request.Request, output *PutRolePolicyOutput) { op := &request.Operation{ Name: opPutRolePolicy, @@ -3438,29 +5819,30 @@ func (c *IAM) PutRolePolicyRequest(input *PutRolePolicyInput) (req *request.Requ return } -// Adds (or updates) an inline policy document that is embedded in the specified -// role. +// Adds or updates an inline policy document that is embedded in the specified +// IAM role. // // When you embed an inline policy in a role, the inline policy is used as -// the role's access (permissions) policy. The role's trust policy is created -// at the same time as the role, using CreateRole. You can update a role's trust -// policy using UpdateAssumeRolePolicy. For more information about roles, go -// to Using Roles to Delegate Permissions and Federate Identities (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html). +// part of the role's access (permissions) policy. The role's trust policy is +// created at the same time as the role, using CreateRole. You can update a +// role's trust policy using UpdateAssumeRolePolicy. For more information about +// IAM roles, go to Using Roles to Delegate Permissions and Federate Identities +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html). // // A role can also have a managed policy attached to it. To attach a managed // policy to a role, use AttachRolePolicy. To create a new managed policy, use -// CreatePolicy. For information about policies, refer to Managed Policies and -// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// CreatePolicy. For information about policies, see Managed Policies and Inline +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) // in the IAM User Guide. // // For information about limits on the number of inline policies that you can // embed with a role, see Limitations on IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) // in the IAM User Guide. // -// Because policy documents can be large, you should use POST rather than GET -// when calling PutRolePolicy. For general information about using the Query +// Because policy documents can be large, you should use POST rather than +// GET when calling PutRolePolicy. For general information about using the Query // API with IAM, go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) -// in the Using IAM guide. +// in the IAM User Guide. func (c *IAM) PutRolePolicy(input *PutRolePolicyInput) (*PutRolePolicyOutput, error) { req, out := c.PutRolePolicyRequest(input) err := req.Send() @@ -3469,7 +5851,28 @@ func (c *IAM) PutRolePolicy(input *PutRolePolicyInput) (*PutRolePolicyOutput, er const opPutUserPolicy = "PutUserPolicy" -// PutUserPolicyRequest generates a request for the PutUserPolicy operation. +// PutUserPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutUserPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutUserPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutUserPolicyRequest method. +// req, resp := client.PutUserPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) PutUserPolicyRequest(input *PutUserPolicyInput) (req *request.Request, output *PutUserPolicyOutput) { op := &request.Operation{ Name: opPutUserPolicy, @@ -3489,23 +5892,23 @@ func (c *IAM) PutUserPolicyRequest(input *PutUserPolicyInput) (req *request.Requ return } -// Adds (or updates) an inline policy document that is embedded in the specified -// user. +// Adds or updates an inline policy document that is embedded in the specified +// IAM user. // -// A user can also have a managed policy attached to it. To attach a managed +// An IAM user can also have a managed policy attached to it. To attach a managed // policy to a user, use AttachUserPolicy. To create a new managed policy, use -// CreatePolicy. For information about policies, refer to Managed Policies and -// Inline Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// CreatePolicy. For information about policies, see Managed Policies and Inline +// Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) // in the IAM User Guide. // // For information about limits on the number of inline policies that you can // embed in a user, see Limitations on IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) // in the IAM User Guide. // -// Because policy documents can be large, you should use POST rather than GET -// when calling PutUserPolicy. For general information about using the Query +// Because policy documents can be large, you should use POST rather than +// GET when calling PutUserPolicy. For general information about using the Query // API with IAM, go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) -// in the Using IAM guide. +// in the IAM User Guide. func (c *IAM) PutUserPolicy(input *PutUserPolicyInput) (*PutUserPolicyOutput, error) { req, out := c.PutUserPolicyRequest(input) err := req.Send() @@ -3514,7 +5917,28 @@ func (c *IAM) PutUserPolicy(input *PutUserPolicyInput) (*PutUserPolicyOutput, er const opRemoveClientIDFromOpenIDConnectProvider = "RemoveClientIDFromOpenIDConnectProvider" -// RemoveClientIDFromOpenIDConnectProviderRequest generates a request for the RemoveClientIDFromOpenIDConnectProvider operation. +// RemoveClientIDFromOpenIDConnectProviderRequest generates a "aws/request.Request" representing the +// client's request for the RemoveClientIDFromOpenIDConnectProvider operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveClientIDFromOpenIDConnectProvider method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveClientIDFromOpenIDConnectProviderRequest method. +// req, resp := client.RemoveClientIDFromOpenIDConnectProviderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) RemoveClientIDFromOpenIDConnectProviderRequest(input *RemoveClientIDFromOpenIDConnectProviderInput) (req *request.Request, output *RemoveClientIDFromOpenIDConnectProviderOutput) { op := &request.Operation{ Name: opRemoveClientIDFromOpenIDConnectProvider, @@ -3535,10 +5959,11 @@ func (c *IAM) RemoveClientIDFromOpenIDConnectProviderRequest(input *RemoveClient } // Removes the specified client ID (also known as audience) from the list of -// client IDs registered for the specified IAM OpenID Connect provider. +// client IDs registered for the specified IAM OpenID Connect (OIDC) provider +// resource object. // // This action is idempotent; it does not fail or return an error if you try -// to remove a client ID that was removed previously. +// to remove a client ID that does not exist. func (c *IAM) RemoveClientIDFromOpenIDConnectProvider(input *RemoveClientIDFromOpenIDConnectProviderInput) (*RemoveClientIDFromOpenIDConnectProviderOutput, error) { req, out := c.RemoveClientIDFromOpenIDConnectProviderRequest(input) err := req.Send() @@ -3547,7 +5972,28 @@ func (c *IAM) RemoveClientIDFromOpenIDConnectProvider(input *RemoveClientIDFromO const opRemoveRoleFromInstanceProfile = "RemoveRoleFromInstanceProfile" -// RemoveRoleFromInstanceProfileRequest generates a request for the RemoveRoleFromInstanceProfile operation. +// RemoveRoleFromInstanceProfileRequest generates a "aws/request.Request" representing the +// client's request for the RemoveRoleFromInstanceProfile operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveRoleFromInstanceProfile method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveRoleFromInstanceProfileRequest method. +// req, resp := client.RemoveRoleFromInstanceProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) RemoveRoleFromInstanceProfileRequest(input *RemoveRoleFromInstanceProfileInput) (req *request.Request, output *RemoveRoleFromInstanceProfileOutput) { op := &request.Operation{ Name: opRemoveRoleFromInstanceProfile, @@ -3567,13 +6013,14 @@ func (c *IAM) RemoveRoleFromInstanceProfileRequest(input *RemoveRoleFromInstance return } -// Removes the specified role from the specified instance profile. +// Removes the specified IAM role from the specified EC2 instance profile. // // Make sure you do not have any Amazon EC2 instances running with the role // you are about to remove from the instance profile. Removing a role from an -// instance profile that is associated with a running instance will break any -// applications running on the instance. For more information about roles, -// go to Working with Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// instance profile that is associated with a running instance break any applications +// running on the instance. +// +// For more information about IAM roles, go to Working with Roles (http://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). // For more information about instance profiles, go to About Instance Profiles // (http://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). func (c *IAM) RemoveRoleFromInstanceProfile(input *RemoveRoleFromInstanceProfileInput) (*RemoveRoleFromInstanceProfileOutput, error) { @@ -3584,7 +6031,28 @@ func (c *IAM) RemoveRoleFromInstanceProfile(input *RemoveRoleFromInstanceProfile const opRemoveUserFromGroup = "RemoveUserFromGroup" -// RemoveUserFromGroupRequest generates a request for the RemoveUserFromGroup operation. +// RemoveUserFromGroupRequest generates a "aws/request.Request" representing the +// client's request for the RemoveUserFromGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveUserFromGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveUserFromGroupRequest method. +// req, resp := client.RemoveUserFromGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) RemoveUserFromGroupRequest(input *RemoveUserFromGroupInput) (req *request.Request, output *RemoveUserFromGroupOutput) { op := &request.Operation{ Name: opRemoveUserFromGroup, @@ -3613,7 +6081,28 @@ func (c *IAM) RemoveUserFromGroup(input *RemoveUserFromGroupInput) (*RemoveUserF const opResyncMFADevice = "ResyncMFADevice" -// ResyncMFADeviceRequest generates a request for the ResyncMFADevice operation. +// ResyncMFADeviceRequest generates a "aws/request.Request" representing the +// client's request for the ResyncMFADevice operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ResyncMFADevice method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ResyncMFADeviceRequest method. +// req, resp := client.ResyncMFADeviceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) ResyncMFADeviceRequest(input *ResyncMFADeviceInput) (req *request.Request, output *ResyncMFADeviceOutput) { op := &request.Operation{ Name: opResyncMFADevice, @@ -3633,11 +6122,12 @@ func (c *IAM) ResyncMFADeviceRequest(input *ResyncMFADeviceInput) (req *request. return } -// Synchronizes the specified MFA device with AWS servers. +// Synchronizes the specified MFA device with its IAM resource object on the +// AWS servers. // // For more information about creating and working with virtual MFA devices, // go to Using a Virtual MFA Device (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_VirtualMFA.html) -// in the Using IAM guide. +// in the IAM User Guide. func (c *IAM) ResyncMFADevice(input *ResyncMFADeviceInput) (*ResyncMFADeviceOutput, error) { req, out := c.ResyncMFADeviceRequest(input) err := req.Send() @@ -3646,7 +6136,28 @@ func (c *IAM) ResyncMFADevice(input *ResyncMFADeviceInput) (*ResyncMFADeviceOutp const opSetDefaultPolicyVersion = "SetDefaultPolicyVersion" -// SetDefaultPolicyVersionRequest generates a request for the SetDefaultPolicyVersion operation. +// SetDefaultPolicyVersionRequest generates a "aws/request.Request" representing the +// client's request for the SetDefaultPolicyVersion operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetDefaultPolicyVersion method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetDefaultPolicyVersionRequest method. +// req, resp := client.SetDefaultPolicyVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) SetDefaultPolicyVersionRequest(input *SetDefaultPolicyVersionInput) (req *request.Request, output *SetDefaultPolicyVersionOutput) { op := &request.Operation{ Name: opSetDefaultPolicyVersion, @@ -3673,7 +6184,7 @@ func (c *IAM) SetDefaultPolicyVersionRequest(input *SetDefaultPolicyVersionInput // to. To list the users, groups, and roles that the policy is attached to, // use the ListEntitiesForPolicy API. // -// For information about managed policies, refer to Managed Policies and Inline +// For information about managed policies, see Managed Policies and Inline // Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) // in the IAM User Guide. func (c *IAM) SetDefaultPolicyVersion(input *SetDefaultPolicyVersionInput) (*SetDefaultPolicyVersionOutput, error) { @@ -3684,12 +6195,39 @@ func (c *IAM) SetDefaultPolicyVersion(input *SetDefaultPolicyVersionInput) (*Set const opSimulateCustomPolicy = "SimulateCustomPolicy" -// SimulateCustomPolicyRequest generates a request for the SimulateCustomPolicy operation. +// SimulateCustomPolicyRequest generates a "aws/request.Request" representing the +// client's request for the SimulateCustomPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SimulateCustomPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SimulateCustomPolicyRequest method. +// req, resp := client.SimulateCustomPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) SimulateCustomPolicyRequest(input *SimulateCustomPolicyInput) (req *request.Request, output *SimulatePolicyResponse) { op := &request.Operation{ Name: opSimulateCustomPolicy, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, } if input == nil { @@ -3725,14 +6263,66 @@ func (c *IAM) SimulateCustomPolicy(input *SimulateCustomPolicyInput) (*SimulateP return out, err } +// SimulateCustomPolicyPages iterates over the pages of a SimulateCustomPolicy operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See SimulateCustomPolicy method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a SimulateCustomPolicy operation. +// pageNum := 0 +// err := client.SimulateCustomPolicyPages(params, +// func(page *SimulatePolicyResponse, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) SimulateCustomPolicyPages(input *SimulateCustomPolicyInput, fn func(p *SimulatePolicyResponse, lastPage bool) (shouldContinue bool)) error { + page, _ := c.SimulateCustomPolicyRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*SimulatePolicyResponse), lastPage) + }) +} + const opSimulatePrincipalPolicy = "SimulatePrincipalPolicy" -// SimulatePrincipalPolicyRequest generates a request for the SimulatePrincipalPolicy operation. +// SimulatePrincipalPolicyRequest generates a "aws/request.Request" representing the +// client's request for the SimulatePrincipalPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SimulatePrincipalPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SimulatePrincipalPolicyRequest method. +// req, resp := client.SimulatePrincipalPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) SimulatePrincipalPolicyRequest(input *SimulatePrincipalPolicyInput) (req *request.Request, output *SimulatePolicyResponse) { op := &request.Operation{ Name: opSimulatePrincipalPolicy, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, } if input == nil { @@ -3761,7 +6351,7 @@ func (c *IAM) SimulatePrincipalPolicyRequest(input *SimulatePrincipalPolicyInput // The simulation does not perform the API actions, it only checks the authorization // to determine if the simulated policies allow or deny the actions. // -// Note: This API discloses information about the permissions granted to other +// Note: This API discloses information about the permissions granted to other // users. If you do not want users to see other user's permissions, then consider // allowing them to use SimulateCustomPolicy instead. // @@ -3778,9 +6368,55 @@ func (c *IAM) SimulatePrincipalPolicy(input *SimulatePrincipalPolicyInput) (*Sim return out, err } +// SimulatePrincipalPolicyPages iterates over the pages of a SimulatePrincipalPolicy operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See SimulatePrincipalPolicy method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a SimulatePrincipalPolicy operation. +// pageNum := 0 +// err := client.SimulatePrincipalPolicyPages(params, +// func(page *SimulatePolicyResponse, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *IAM) SimulatePrincipalPolicyPages(input *SimulatePrincipalPolicyInput, fn func(p *SimulatePolicyResponse, lastPage bool) (shouldContinue bool)) error { + page, _ := c.SimulatePrincipalPolicyRequest(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*SimulatePolicyResponse), lastPage) + }) +} + const opUpdateAccessKey = "UpdateAccessKey" -// UpdateAccessKeyRequest generates a request for the UpdateAccessKey operation. +// UpdateAccessKeyRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAccessKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateAccessKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateAccessKeyRequest method. +// req, resp := client.UpdateAccessKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) UpdateAccessKeyRequest(input *UpdateAccessKeyInput) (req *request.Request, output *UpdateAccessKeyOutput) { op := &request.Operation{ Name: opUpdateAccessKey, @@ -3820,7 +6456,28 @@ func (c *IAM) UpdateAccessKey(input *UpdateAccessKeyInput) (*UpdateAccessKeyOutp const opUpdateAccountPasswordPolicy = "UpdateAccountPasswordPolicy" -// UpdateAccountPasswordPolicyRequest generates a request for the UpdateAccountPasswordPolicy operation. +// UpdateAccountPasswordPolicyRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAccountPasswordPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateAccountPasswordPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateAccountPasswordPolicyRequest method. +// req, resp := client.UpdateAccountPasswordPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) UpdateAccountPasswordPolicyRequest(input *UpdateAccountPasswordPolicyInput) (req *request.Request, output *UpdateAccountPasswordPolicyOutput) { op := &request.Operation{ Name: opUpdateAccountPasswordPolicy, @@ -3858,7 +6515,28 @@ func (c *IAM) UpdateAccountPasswordPolicy(input *UpdateAccountPasswordPolicyInpu const opUpdateAssumeRolePolicy = "UpdateAssumeRolePolicy" -// UpdateAssumeRolePolicyRequest generates a request for the UpdateAssumeRolePolicy operation. +// UpdateAssumeRolePolicyRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAssumeRolePolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateAssumeRolePolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateAssumeRolePolicyRequest method. +// req, resp := client.UpdateAssumeRolePolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) UpdateAssumeRolePolicyRequest(input *UpdateAssumeRolePolicyInput) (req *request.Request, output *UpdateAssumeRolePolicyOutput) { op := &request.Operation{ Name: opUpdateAssumeRolePolicy, @@ -3878,9 +6556,10 @@ func (c *IAM) UpdateAssumeRolePolicyRequest(input *UpdateAssumeRolePolicyInput) return } -// Updates the policy that grants an entity permission to assume a role. For -// more information about roles, go to Using Roles to Delegate Permissions and -// Federate Identities (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html). +// Updates the policy that grants an IAM entity permission to assume a role. +// This is typically referred to as the "role trust policy". For more information +// about roles, go to Using Roles to Delegate Permissions and Federate Identities +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html). func (c *IAM) UpdateAssumeRolePolicy(input *UpdateAssumeRolePolicyInput) (*UpdateAssumeRolePolicyOutput, error) { req, out := c.UpdateAssumeRolePolicyRequest(input) err := req.Send() @@ -3889,7 +6568,28 @@ func (c *IAM) UpdateAssumeRolePolicy(input *UpdateAssumeRolePolicyInput) (*Updat const opUpdateGroup = "UpdateGroup" -// UpdateGroupRequest generates a request for the UpdateGroup operation. +// UpdateGroupRequest generates a "aws/request.Request" representing the +// client's request for the UpdateGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateGroupRequest method. +// req, resp := client.UpdateGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) UpdateGroupRequest(input *UpdateGroupInput) (req *request.Request, output *UpdateGroupOutput) { op := &request.Operation{ Name: opUpdateGroup, @@ -3909,16 +6609,17 @@ func (c *IAM) UpdateGroupRequest(input *UpdateGroupInput) (req *request.Request, return } -// Updates the name and/or the path of the specified group. +// Updates the name and/or the path of the specified IAM group. // -// You should understand the implications of changing a group's path or name. +// You should understand the implications of changing a group's path or name. // For more information, see Renaming Users and Groups (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_WorkingWithGroupsAndUsers.html) -// in the IAM User Guide. To change a group name the requester must have appropriate -// permissions on both the source object and the target object. For example, -// to change Managers to MGRs, the entity making the request must have permission -// on Managers and MGRs, or must have permission on all (*). For more information -// about permissions, see Permissions and Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/PermissionsAndPolicies.html" -// target="blank). +// in the IAM User Guide. +// +// To change an IAM group name the requester must have appropriate permissions +// on both the source object and the target object. For example, to change "Managers" +// to "MGRs", the entity making the request must have permission on both "Managers" +// and "MGRs", or must have permission on all (*). For more information about +// permissions, see Permissions and Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/PermissionsAndPolicies.html). func (c *IAM) UpdateGroup(input *UpdateGroupInput) (*UpdateGroupOutput, error) { req, out := c.UpdateGroupRequest(input) err := req.Send() @@ -3927,7 +6628,28 @@ func (c *IAM) UpdateGroup(input *UpdateGroupInput) (*UpdateGroupOutput, error) { const opUpdateLoginProfile = "UpdateLoginProfile" -// UpdateLoginProfileRequest generates a request for the UpdateLoginProfile operation. +// UpdateLoginProfileRequest generates a "aws/request.Request" representing the +// client's request for the UpdateLoginProfile operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateLoginProfile method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateLoginProfileRequest method. +// req, resp := client.UpdateLoginProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) UpdateLoginProfileRequest(input *UpdateLoginProfileInput) (req *request.Request, output *UpdateLoginProfileOutput) { op := &request.Operation{ Name: opUpdateLoginProfile, @@ -3947,10 +6669,10 @@ func (c *IAM) UpdateLoginProfileRequest(input *UpdateLoginProfileInput) (req *re return } -// Changes the password for the specified user. +// Changes the password for the specified IAM user. // -// Users can change their own passwords by calling ChangePassword. For more -// information about modifying passwords, see Managing Passwords (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingLogins.html) +// IAM users can change their own passwords by calling ChangePassword. For +// more information about modifying passwords, see Managing Passwords (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingLogins.html) // in the IAM User Guide. func (c *IAM) UpdateLoginProfile(input *UpdateLoginProfileInput) (*UpdateLoginProfileOutput, error) { req, out := c.UpdateLoginProfileRequest(input) @@ -3960,7 +6682,28 @@ func (c *IAM) UpdateLoginProfile(input *UpdateLoginProfileInput) (*UpdateLoginPr const opUpdateOpenIDConnectProviderThumbprint = "UpdateOpenIDConnectProviderThumbprint" -// UpdateOpenIDConnectProviderThumbprintRequest generates a request for the UpdateOpenIDConnectProviderThumbprint operation. +// UpdateOpenIDConnectProviderThumbprintRequest generates a "aws/request.Request" representing the +// client's request for the UpdateOpenIDConnectProviderThumbprint operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateOpenIDConnectProviderThumbprint method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateOpenIDConnectProviderThumbprintRequest method. +// req, resp := client.UpdateOpenIDConnectProviderThumbprintRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) UpdateOpenIDConnectProviderThumbprintRequest(input *UpdateOpenIDConnectProviderThumbprintInput) (req *request.Request, output *UpdateOpenIDConnectProviderThumbprintOutput) { op := &request.Operation{ Name: opUpdateOpenIDConnectProviderThumbprint, @@ -3980,7 +6723,8 @@ func (c *IAM) UpdateOpenIDConnectProviderThumbprintRequest(input *UpdateOpenIDCo return } -// Replaces the existing list of server certificate thumbprints with a new list. +// Replaces the existing list of server certificate thumbprints associated with +// an OpenID Connect (OIDC) provider resource object with a new list of thumbprints. // // The list that you pass with this action completely replaces the existing // list of thumbprints. (The lists are not merged.) @@ -3988,12 +6732,12 @@ func (c *IAM) UpdateOpenIDConnectProviderThumbprintRequest(input *UpdateOpenIDCo // Typically, you need to update a thumbprint only when the identity provider's // certificate changes, which occurs rarely. However, if the provider's certificate // does change, any attempt to assume an IAM role that specifies the OIDC provider -// as a principal will fail until the certificate thumbprint is updated. +// as a principal fails until the certificate thumbprint is updated. // -// Because trust for the OpenID Connect provider is ultimately derived from -// the provider's certificate and is validated by the thumbprint, it is a best -// practice to limit access to the UpdateOpenIDConnectProviderThumbprint action -// to highly-privileged users. +// Because trust for the OIDC provider is ultimately derived from the provider's +// certificate and is validated by the thumbprint, it is a best practice to +// limit access to the UpdateOpenIDConnectProviderThumbprint action to highly-privileged +// users. func (c *IAM) UpdateOpenIDConnectProviderThumbprint(input *UpdateOpenIDConnectProviderThumbprintInput) (*UpdateOpenIDConnectProviderThumbprintOutput, error) { req, out := c.UpdateOpenIDConnectProviderThumbprintRequest(input) err := req.Send() @@ -4002,7 +6746,28 @@ func (c *IAM) UpdateOpenIDConnectProviderThumbprint(input *UpdateOpenIDConnectPr const opUpdateSAMLProvider = "UpdateSAMLProvider" -// UpdateSAMLProviderRequest generates a request for the UpdateSAMLProvider operation. +// UpdateSAMLProviderRequest generates a "aws/request.Request" representing the +// client's request for the UpdateSAMLProvider operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateSAMLProvider method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateSAMLProviderRequest method. +// req, resp := client.UpdateSAMLProviderRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) UpdateSAMLProviderRequest(input *UpdateSAMLProviderInput) (req *request.Request, output *UpdateSAMLProviderOutput) { op := &request.Operation{ Name: opUpdateSAMLProvider, @@ -4020,9 +6785,9 @@ func (c *IAM) UpdateSAMLProviderRequest(input *UpdateSAMLProviderInput) (req *re return } -// Updates the metadata document for an existing SAML provider. +// Updates the metadata document for an existing SAML provider resource object. // -// This operation requires Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// This operation requires Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). func (c *IAM) UpdateSAMLProvider(input *UpdateSAMLProviderInput) (*UpdateSAMLProviderOutput, error) { req, out := c.UpdateSAMLProviderRequest(input) err := req.Send() @@ -4031,7 +6796,28 @@ func (c *IAM) UpdateSAMLProvider(input *UpdateSAMLProviderInput) (*UpdateSAMLPro const opUpdateSSHPublicKey = "UpdateSSHPublicKey" -// UpdateSSHPublicKeyRequest generates a request for the UpdateSSHPublicKey operation. +// UpdateSSHPublicKeyRequest generates a "aws/request.Request" representing the +// client's request for the UpdateSSHPublicKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateSSHPublicKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateSSHPublicKeyRequest method. +// req, resp := client.UpdateSSHPublicKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) UpdateSSHPublicKeyRequest(input *UpdateSSHPublicKeyInput) (req *request.Request, output *UpdateSSHPublicKeyOutput) { op := &request.Operation{ Name: opUpdateSSHPublicKey, @@ -4051,7 +6837,7 @@ func (c *IAM) UpdateSSHPublicKeyRequest(input *UpdateSSHPublicKeyInput) (req *re return } -// Sets the status of the specified SSH public key to active or inactive. SSH +// Sets the status of an IAM user's SSH public key to active or inactive. SSH // public keys that are inactive cannot be used for authentication. This action // can be used to disable a user's SSH public key as part of a key rotation // work flow. @@ -4069,7 +6855,28 @@ func (c *IAM) UpdateSSHPublicKey(input *UpdateSSHPublicKeyInput) (*UpdateSSHPubl const opUpdateServerCertificate = "UpdateServerCertificate" -// UpdateServerCertificateRequest generates a request for the UpdateServerCertificate operation. +// UpdateServerCertificateRequest generates a "aws/request.Request" representing the +// client's request for the UpdateServerCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateServerCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateServerCertificateRequest method. +// req, resp := client.UpdateServerCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) UpdateServerCertificateRequest(input *UpdateServerCertificateInput) (req *request.Request, output *UpdateServerCertificateOutput) { op := &request.Operation{ Name: opUpdateServerCertificate, @@ -4089,21 +6896,24 @@ func (c *IAM) UpdateServerCertificateRequest(input *UpdateServerCertificateInput return } -// Updates the name and/or the path of the specified server certificate. +// Updates the name and/or the path of the specified server certificate stored +// in IAM. // // For more information about working with server certificates, including a // list of AWS services that can use the server certificates that you manage // with IAM, go to Working with Server Certificates (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html) // in the IAM User Guide. // -// You should understand the implications of changing a server certificate's +// You should understand the implications of changing a server certificate's // path or name. For more information, see Renaming a Server Certificate (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs_manage.html#RenamingServerCerts) -// in the IAM User Guide. To change a server certificate name the requester -// must have appropriate permissions on both the source object and the target -// object. For example, to change the name from ProductionCert to ProdCert, -// the entity making the request must have permission on ProductionCert and -// ProdCert, or must have permission on all (*). For more information about -// permissions, see Access Management (http://docs.aws.amazon.com/IAM/latest/UserGuide/access.html) +// in the IAM User Guide. +// +// To change a server certificate name the requester must have appropriate +// permissions on both the source object and the target object. For example, +// to change the name from "ProductionCert" to "ProdCert", the entity making +// the request must have permission on "ProductionCert" and "ProdCert", or must +// have permission on all (*). For more information about permissions, see Access +// Management (http://docs.aws.amazon.com/IAM/latest/UserGuide/access.html) // in the IAM User Guide. func (c *IAM) UpdateServerCertificate(input *UpdateServerCertificateInput) (*UpdateServerCertificateOutput, error) { req, out := c.UpdateServerCertificateRequest(input) @@ -4113,7 +6923,28 @@ func (c *IAM) UpdateServerCertificate(input *UpdateServerCertificateInput) (*Upd const opUpdateSigningCertificate = "UpdateSigningCertificate" -// UpdateSigningCertificateRequest generates a request for the UpdateSigningCertificate operation. +// UpdateSigningCertificateRequest generates a "aws/request.Request" representing the +// client's request for the UpdateSigningCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateSigningCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateSigningCertificateRequest method. +// req, resp := client.UpdateSigningCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) UpdateSigningCertificateRequest(input *UpdateSigningCertificateInput) (req *request.Request, output *UpdateSigningCertificateOutput) { op := &request.Operation{ Name: opUpdateSigningCertificate, @@ -4133,9 +6964,9 @@ func (c *IAM) UpdateSigningCertificateRequest(input *UpdateSigningCertificateInp return } -// Changes the status of the specified signing certificate from active to disabled, -// or vice versa. This action can be used to disable a user's signing certificate -// as part of a certificate rotation work flow. +// Changes the status of the specified user signing certificate from active +// to disabled, or vice versa. This action can be used to disable an IAM user's +// signing certificate as part of a certificate rotation work flow. // // If the UserName field is not specified, the UserName is determined implicitly // based on the AWS access key ID used to sign the request. Because this action @@ -4149,7 +6980,28 @@ func (c *IAM) UpdateSigningCertificate(input *UpdateSigningCertificateInput) (*U const opUpdateUser = "UpdateUser" -// UpdateUserRequest generates a request for the UpdateUser operation. +// UpdateUserRequest generates a "aws/request.Request" representing the +// client's request for the UpdateUser operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateUser method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateUserRequest method. +// req, resp := client.UpdateUserRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) UpdateUserRequest(input *UpdateUserInput) (req *request.Request, output *UpdateUserOutput) { op := &request.Operation{ Name: opUpdateUser, @@ -4169,17 +7021,18 @@ func (c *IAM) UpdateUserRequest(input *UpdateUserInput) (req *request.Request, o return } -// Updates the name and/or the path of the specified user. +// Updates the name and/or the path of the specified IAM user. // -// You should understand the implications of changing a user's path or name. -// For more information, see Renaming an IAM User (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_manage.html#id_users_renaming) +// You should understand the implications of changing an IAM user's path +// or name. For more information, see Renaming an IAM User (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_manage.html#id_users_renaming) // and Renaming an IAM Group (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_groups_manage_rename.html) -// in the IAM User Guide. To change a user name the requester must have appropriate -// permissions on both the source object and the target object. For example, -// to change Bob to Robert, the entity making the request must have permission -// on Bob and Robert, or must have permission on all (*). For more information -// about permissions, see Permissions and Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/PermissionsAndPolicies.html" -// target="blank). +// in the IAM User Guide. +// +// To change a user name the requester must have appropriate permissions +// on both the source object and the target object. For example, to change Bob +// to Robert, the entity making the request must have permission on Bob and +// Robert, or must have permission on all (*). For more information about permissions, +// see Permissions and Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/PermissionsAndPolicies.html). func (c *IAM) UpdateUser(input *UpdateUserInput) (*UpdateUserOutput, error) { req, out := c.UpdateUserRequest(input) err := req.Send() @@ -4188,7 +7041,28 @@ func (c *IAM) UpdateUser(input *UpdateUserInput) (*UpdateUserOutput, error) { const opUploadSSHPublicKey = "UploadSSHPublicKey" -// UploadSSHPublicKeyRequest generates a request for the UploadSSHPublicKey operation. +// UploadSSHPublicKeyRequest generates a "aws/request.Request" representing the +// client's request for the UploadSSHPublicKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UploadSSHPublicKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UploadSSHPublicKeyRequest method. +// req, resp := client.UploadSSHPublicKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) UploadSSHPublicKeyRequest(input *UploadSSHPublicKeyInput) (req *request.Request, output *UploadSSHPublicKeyOutput) { op := &request.Operation{ Name: opUploadSSHPublicKey, @@ -4221,7 +7095,28 @@ func (c *IAM) UploadSSHPublicKey(input *UploadSSHPublicKeyInput) (*UploadSSHPubl const opUploadServerCertificate = "UploadServerCertificate" -// UploadServerCertificateRequest generates a request for the UploadServerCertificate operation. +// UploadServerCertificateRequest generates a "aws/request.Request" representing the +// client's request for the UploadServerCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UploadServerCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UploadServerCertificateRequest method. +// req, resp := client.UploadServerCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) UploadServerCertificateRequest(input *UploadServerCertificateInput) (req *request.Request, output *UploadServerCertificateOutput) { op := &request.Operation{ Name: opUploadServerCertificate, @@ -4252,7 +7147,7 @@ func (c *IAM) UploadServerCertificateRequest(input *UploadServerCertificateInput // see Limitations on IAM Entities and Objects (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html) // in the IAM User Guide. // -// Because the body of the public key certificate, private key, and the certificate +// Because the body of the public key certificate, private key, and the certificate // chain can be large, you should use POST rather than GET when calling UploadServerCertificate. // For information about setting up signatures and authorization through the // API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) @@ -4267,7 +7162,28 @@ func (c *IAM) UploadServerCertificate(input *UploadServerCertificateInput) (*Upl const opUploadSigningCertificate = "UploadSigningCertificate" -// UploadSigningCertificateRequest generates a request for the UploadSigningCertificate operation. +// UploadSigningCertificateRequest generates a "aws/request.Request" representing the +// client's request for the UploadSigningCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UploadSigningCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UploadSigningCertificateRequest method. +// req, resp := client.UploadSigningCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *IAM) UploadSigningCertificateRequest(input *UploadSigningCertificateInput) (req *request.Request, output *UploadSigningCertificateOutput) { op := &request.Operation{ Name: opUploadSigningCertificate, @@ -4286,22 +7202,23 @@ func (c *IAM) UploadSigningCertificateRequest(input *UploadSigningCertificateInp } // Uploads an X.509 signing certificate and associates it with the specified -// user. Some AWS services use X.509 signing certificates to validate requests +// IAM user. Some AWS services use X.509 signing certificates to validate requests // that are signed with a corresponding private key. When you upload the certificate, // its default status is Active. // -// If the UserName field is not specified, the user name is determined implicitly -// based on the AWS access key ID used to sign the request. Because this action -// works for access keys under the AWS account, you can use this action to manage -// root credentials even if the AWS account has no associated users. +// If the UserName field is not specified, the IAM user name is determined +// implicitly based on the AWS access key ID used to sign the request. Because +// this action works for access keys under the AWS account, you can use this +// action to manage root credentials even if the AWS account has no associated +// users. // -// Because the body of a X.509 certificate can be large, you should use POST +// Because the body of a X.509 certificate can be large, you should use POST // rather than GET when calling UploadSigningCertificate. For information about // setting up signatures and authorization through the API, go to Signing AWS // API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) // in the AWS General Reference. For general information about using the Query // API with IAM, go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) -// in the Using IAMguide. +// in the IAM User Guide. func (c *IAM) UploadSigningCertificate(input *UploadSigningCertificateInput) (*UploadSigningCertificateOutput, error) { req, out := c.UploadSigningCertificateRequest(input) err := req.Send() @@ -4313,7 +7230,7 @@ func (c *IAM) UploadSigningCertificate(input *UploadSigningCertificateInput) (*U // This data type is used as a response element in the CreateAccessKey and // ListAccessKeys actions. // -// The SecretAccessKey value is returned only in response to CreateAccessKey. +// The SecretAccessKey value is returned only in response to CreateAccessKey. // You can get a secret access key only when you first create an access key; // you cannot recover the secret access key later. If you lose a secret access // key, you must create a new access key. @@ -4434,12 +7351,13 @@ func (s AccessKeyMetadata) GoString() string { type AddClientIDToOpenIDConnectProviderInput struct { _ struct{} `type:"structure"` - // The client ID (also known as audience) to add to the IAM OpenID Connect provider. + // The client ID (also known as audience) to add to the IAM OpenID Connect provider + // resource. ClientID *string `min:"1" type:"string" required:"true"` // The Amazon Resource Name (ARN) of the IAM OpenID Connect (OIDC) provider - // to add the client ID to. You can get a list of OIDC provider ARNs by using - // the ListOpenIDConnectProviders action. + // resource to add the client ID to. You can get a list of OIDC provider ARNs + // by using the ListOpenIDConnectProviders action. OpenIDConnectProviderArn *string `min:"20" type:"string" required:"true"` } @@ -4493,9 +7411,17 @@ type AddRoleToInstanceProfileInput struct { _ struct{} `type:"structure"` // The name of the instance profile to update. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- InstanceProfileName *string `min:"1" type:"string" required:"true"` // The name of the role to add. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- RoleName *string `min:"1" type:"string" required:"true"` } @@ -4549,9 +7475,17 @@ type AddUserToGroupInput struct { _ struct{} `type:"structure"` // The name of the group to update. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- GroupName *string `min:"1" type:"string" required:"true"` // The name of the user to add. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string" required:"true"` } @@ -4605,12 +7539,16 @@ type AttachGroupPolicyInput struct { _ struct{} `type:"structure"` // The name (friendly name, not ARN) of the group to attach the policy to. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- GroupName *string `min:"1" type:"string" required:"true"` - // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // The Amazon Resource Name (ARN) of the IAM policy you want to attach. // - // For more information about ARNs, go to Amazon Resource Names (ARNs) and - // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. PolicyArn *string `min:"20" type:"string" required:"true"` } @@ -4664,14 +7602,18 @@ func (s AttachGroupPolicyOutput) GoString() string { type AttachRolePolicyInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // The Amazon Resource Name (ARN) of the IAM policy you want to attach. // - // For more information about ARNs, go to Amazon Resource Names (ARNs) and - // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. PolicyArn *string `min:"20" type:"string" required:"true"` // The name (friendly name, not ARN) of the role to attach the policy to. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- RoleName *string `min:"1" type:"string" required:"true"` } @@ -4724,14 +7666,18 @@ func (s AttachRolePolicyOutput) GoString() string { type AttachUserPolicyInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // The Amazon Resource Name (ARN) of the IAM policy you want to attach. // - // For more information about ARNs, go to Amazon Resource Names (ARNs) and - // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. PolicyArn *string `min:"20" type:"string" required:"true"` - // The name (friendly name, not ARN) of the user to attach the policy to. + // The name (friendly name, not ARN) of the IAM user to attach the policy to. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string" required:"true"` } @@ -4820,6 +7766,15 @@ type ChangePasswordInput struct { // The new password. The new password must conform to the AWS account's password // policy, if one exists. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of almost any printable ASCII character + // from the space (\u0020) through the end of the ASCII character range (\u00FF). + // You can also include the tab (\u0009), line feed (\u000A), and carriage return + // (\u000D) characters. Although any of these characters are valid in a password, + // note that many tools, such as the AWS Management Console, might restrict + // the ability to enter certain characters because they have special meaning + // within that tool. NewPassword *string `min:"1" type:"string" required:"true"` // The IAM user's current password. @@ -4877,7 +7832,8 @@ func (s ChangePasswordOutput) GoString() string { // multiple values) to use in the simulation. This information is used when // evaluating the Condition elements of the input policies. // -// This data type is used as an input parameter to SimulatePolicy. +// This data type is used as an input parameter to SimulateCustomPolicy and +// SimulateCustomPolicy . type ContextEntry struct { _ struct{} `type:"structure"` @@ -4921,7 +7877,11 @@ func (s *ContextEntry) Validate() error { type CreateAccessKeyInput struct { _ struct{} `type:"structure"` - // The user name that the new key will belong to. + // The name of the IAM user that the new key will belong to. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string"` } @@ -4952,7 +7912,7 @@ func (s *CreateAccessKeyInput) Validate() error { type CreateAccessKeyOutput struct { _ struct{} `type:"structure"` - // Information about the access key. + // A structure with details about the access key. AccessKey *AccessKey `type:"structure" required:"true"` } @@ -4970,6 +7930,11 @@ type CreateAccountAliasInput struct { _ struct{} `type:"structure"` // The account alias to create. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of lowercase letters, digits, and dashes. + // You cannot start or finish with a dash, nor can you have two dashes in a + // row. AccountAlias *string `min:"3" type:"string" required:"true"` } @@ -5017,14 +7982,24 @@ type CreateGroupInput struct { _ struct{} `type:"structure"` // The name of the group to create. Do not include the path in this value. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- GroupName *string `min:"1" type:"string" required:"true"` // The path to the group. For more information about paths, see IAM Identifiers // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the Using IAM guide. + // in the IAM User Guide. // // This parameter is optional. If it is not included, it defaults to a slash // (/). + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes, containing any + // ASCII character from the ! (\u0021) thru the DEL character (\u007F), including + // most punctuation characters, digits, and upper and lowercased letters. Path *string `min:"1" type:"string"` } @@ -5061,7 +8036,7 @@ func (s *CreateGroupInput) Validate() error { type CreateGroupOutput struct { _ struct{} `type:"structure"` - // Information about the group. + // A structure containing details about the new group. Group *Group `type:"structure" required:"true"` } @@ -5079,14 +8054,24 @@ type CreateInstanceProfileInput struct { _ struct{} `type:"structure"` // The name of the instance profile to create. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- InstanceProfileName *string `min:"1" type:"string" required:"true"` // The path to the instance profile. For more information about paths, see IAM // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the Using IAM guide. + // in the IAM User Guide. // // This parameter is optional. If it is not included, it defaults to a slash // (/). + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes, containing any + // ASCII character from the ! (\u0021) thru the DEL character (\u007F), including + // most punctuation characters, digits, and upper and lowercased letters. Path *string `min:"1" type:"string"` } @@ -5123,7 +8108,7 @@ func (s *CreateInstanceProfileInput) Validate() error { type CreateInstanceProfileOutput struct { _ struct{} `type:"structure"` - // Information about the instance profile. + // A structure containing details about the new instance profile. InstanceProfile *InstanceProfile `type:"structure" required:"true"` } @@ -5141,12 +8126,26 @@ type CreateLoginProfileInput struct { _ struct{} `type:"structure"` // The new password for the user. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of almost any printable ASCII character + // from the space (\u0020) through the end of the ASCII character range (\u00FF). + // You can also include the tab (\u0009), line feed (\u000A), and carriage return + // (\u000D) characters. Although any of these characters are valid in a password, + // note that many tools, such as the AWS Management Console, might restrict + // the ability to enter certain characters because they have special meaning + // within that tool. Password *string `min:"1" type:"string" required:"true"` // Specifies whether the user is required to set a new password on next sign-in. PasswordResetRequired *bool `type:"boolean"` - // The name of the user to create a password for. + // The name of the IAM user to create a password for. The user must already + // exist. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string" required:"true"` } @@ -5186,7 +8185,7 @@ func (s *CreateLoginProfileInput) Validate() error { type CreateLoginProfileOutput struct { _ struct{} `type:"structure"` - // The user name and password create date. + // A structure containing the user name and password create date. LoginProfile *LoginProfile `type:"structure" required:"true"` } @@ -5282,8 +8281,8 @@ func (s *CreateOpenIDConnectProviderInput) Validate() error { type CreateOpenIDConnectProviderOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the IAM OpenID Connect provider that was - // created. For more information, see OpenIDConnectProviderListEntry. + // The Amazon Resource Name (ARN) of the new IAM OpenID Connect provider that + // is created. For more information, see OpenIDConnectProviderListEntry. OpenIDConnectProviderArn *string `min:"20" type:"string"` } @@ -5316,12 +8315,29 @@ type CreatePolicyInput struct { // // This parameter is optional. If it is not included, it defaults to a slash // (/). + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes, containing any + // ASCII character from the ! (\u0021) thru the DEL character (\u007F), including + // most punctuation characters, digits, and upper and lowercased letters. Path *string `type:"string"` - // The policy document. + // The JSON policy document that you want to use as the content for the new + // policy. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). PolicyDocument *string `min:"1" type:"string" required:"true"` - // The name of the policy document. + // The friendly name of the policy. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- PolicyName *string `min:"1" type:"string" required:"true"` } @@ -5361,7 +8377,7 @@ func (s *CreatePolicyInput) Validate() error { type CreatePolicyOutput struct { _ struct{} `type:"structure"` - // Information about the policy. + // A structure containing details about the new policy. Policy *Policy `type:"structure"` } @@ -5378,14 +8394,22 @@ func (s CreatePolicyOutput) GoString() string { type CreatePolicyVersionInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // The Amazon Resource Name (ARN) of the IAM policy to which you want to add + // a new version. // - // For more information about ARNs, go to Amazon Resource Names (ARNs) and - // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. PolicyArn *string `min:"20" type:"string" required:"true"` - // The policy document. + // The JSON policy document that you want to use as the content for this new + // version of the policy. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). PolicyDocument *string `min:"1" type:"string" required:"true"` // Specifies whether to set this version as the policy's default version. @@ -5436,7 +8460,7 @@ func (s *CreatePolicyVersionInput) Validate() error { type CreatePolicyVersionOutput struct { _ struct{} `type:"structure"` - // Information about the policy version. + // A structure containing details about the new policy version. PolicyVersion *PolicyVersion `type:"structure"` } @@ -5455,17 +8479,33 @@ type CreateRoleInput struct { // The trust relationship policy document that grants an entity permission to // assume the role. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). AssumeRolePolicyDocument *string `min:"1" type:"string" required:"true"` // The path to the role. For more information about paths, see IAM Identifiers // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the Using IAM guide. + // in the IAM User Guide. // // This parameter is optional. If it is not included, it defaults to a slash // (/). + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes, containing any + // ASCII character from the ! (\u0021) thru the DEL character (\u007F), including + // most punctuation characters, digits, and upper and lowercased letters. Path *string `min:"1" type:"string"` // The name of the role to create. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- RoleName *string `min:"1" type:"string" required:"true"` } @@ -5508,7 +8548,7 @@ func (s *CreateRoleInput) Validate() error { type CreateRoleOutput struct { _ struct{} `type:"structure"` - // Information about the role. + // A structure containing details about the new role. Role *Role `type:"structure" required:"true"` } @@ -5526,6 +8566,10 @@ type CreateSAMLProviderInput struct { _ struct{} `type:"structure"` // The name of the provider to create. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- Name *string `min:"1" type:"string" required:"true"` // An XML document generated by an identity provider (IdP) that supports SAML @@ -5575,7 +8619,7 @@ func (s *CreateSAMLProviderInput) Validate() error { type CreateSAMLProviderOutput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the SAML provider. + // The Amazon Resource Name (ARN) of the new SAML provider resource in IAM. SAMLProviderArn *string `min:"20" type:"string"` } @@ -5594,13 +8638,23 @@ type CreateUserInput struct { // The path for the user name. For more information about paths, see IAM Identifiers // (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the Using IAM guide. + // in the IAM User Guide. // // This parameter is optional. If it is not included, it defaults to a slash // (/). + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes, containing any + // ASCII character from the ! (\u0021) thru the DEL character (\u007F), including + // most punctuation characters, digits, and upper and lowercased letters. Path *string `min:"1" type:"string"` // The name of the user to create. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string" required:"true"` } @@ -5637,7 +8691,7 @@ func (s *CreateUserInput) Validate() error { type CreateUserOutput struct { _ struct{} `type:"structure"` - // Information about the user. + // A structure with details about the new IAM user. User *User `type:"structure"` } @@ -5656,14 +8710,24 @@ type CreateVirtualMFADeviceInput struct { // The path for the virtual MFA device. For more information about paths, see // IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the Using IAM guide. + // in the IAM User Guide. // // This parameter is optional. If it is not included, it defaults to a slash // (/). + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes, containing any + // ASCII character from the ! (\u0021) thru the DEL character (\u007F), including + // most punctuation characters, digits, and upper and lowercased letters. Path *string `min:"1" type:"string"` // The name of the virtual MFA device. Use with path to uniquely identify a // virtual MFA device. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- VirtualMFADeviceName *string `min:"1" type:"string" required:"true"` } @@ -5700,7 +8764,7 @@ func (s *CreateVirtualMFADeviceInput) Validate() error { type CreateVirtualMFADeviceOutput struct { _ struct{} `type:"structure"` - // A newly created virtual MFA device. + // A structure containing details about the new virtual MFA device. VirtualMFADevice *VirtualMFADevice `type:"structure" required:"true"` } @@ -5719,9 +8783,17 @@ type DeactivateMFADeviceInput struct { // The serial number that uniquely identifies the MFA device. For virtual MFA // devices, the serial number is the device ARN. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =/:,.@- SerialNumber *string `min:"9" type:"string" required:"true"` // The name of the user whose MFA device you want to deactivate. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string" required:"true"` } @@ -5776,9 +8848,17 @@ type DeleteAccessKeyInput struct { // The access key ID for the access key ID and secret access key you want to // delete. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters that can consist of any upper or lowercased letter + // or digit. AccessKeyId *string `min:"16" type:"string" required:"true"` - // The name of the user whose key you want to delete. + // The name of the user whose access key pair you want to delete. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string"` } @@ -5829,6 +8909,11 @@ type DeleteAccountAliasInput struct { _ struct{} `type:"structure"` // The name of the account alias to delete. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of lowercase letters, digits, and dashes. + // You cannot start or finish with a dash, nor can you have two dashes in a + // row. AccountAlias *string `min:"3" type:"string" required:"true"` } @@ -5903,7 +8988,11 @@ func (s DeleteAccountPasswordPolicyOutput) GoString() string { type DeleteGroupInput struct { _ struct{} `type:"structure"` - // The name of the group to delete. + // The name of the IAM group to delete. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- GroupName *string `min:"1" type:"string" required:"true"` } @@ -5952,9 +9041,17 @@ type DeleteGroupPolicyInput struct { // The name (friendly name, not ARN) identifying the group that the policy is // embedded in. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- GroupName *string `min:"1" type:"string" required:"true"` // The name identifying the policy document to delete. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- PolicyName *string `min:"1" type:"string" required:"true"` } @@ -6008,6 +9105,10 @@ type DeleteInstanceProfileInput struct { _ struct{} `type:"structure"` // The name of the instance profile to delete. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- InstanceProfileName *string `min:"1" type:"string" required:"true"` } @@ -6055,6 +9156,10 @@ type DeleteLoginProfileInput struct { _ struct{} `type:"structure"` // The name of the user whose password you want to delete. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string" required:"true"` } @@ -6101,9 +9206,9 @@ func (s DeleteLoginProfileOutput) GoString() string { type DeleteOpenIDConnectProviderInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the IAM OpenID Connect provider to delete. - // You can get a list of OpenID Connect provider ARNs by using the ListOpenIDConnectProviders - // action. + // The Amazon Resource Name (ARN) of the IAM OpenID Connect provider resource + // object to delete. You can get a list of OpenID Connect provider resource + // ARNs by using the ListOpenIDConnectProviders action. OpenIDConnectProviderArn *string `min:"20" type:"string" required:"true"` } @@ -6150,10 +9255,10 @@ func (s DeleteOpenIDConnectProviderOutput) GoString() string { type DeletePolicyInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // The Amazon Resource Name (ARN) of the IAM policy you want to delete. // - // For more information about ARNs, go to Amazon Resource Names (ARNs) and - // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. PolicyArn *string `min:"20" type:"string" required:"true"` } @@ -6201,15 +9306,21 @@ func (s DeletePolicyOutput) GoString() string { type DeletePolicyVersionInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // The Amazon Resource Name (ARN) of the IAM policy from which you want to delete + // a version. // - // For more information about ARNs, go to Amazon Resource Names (ARNs) and - // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. PolicyArn *string `min:"20" type:"string" required:"true"` // The policy version to delete. // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters that consists of the lowercase letter 'v' followed + // by one or two digits, and optionally followed by a period '.' and a string + // of letters and digits. + // // For more information about managed policy versions, see Versioning for Managed // Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) // in the IAM User Guide. @@ -6263,6 +9374,10 @@ type DeleteRoleInput struct { _ struct{} `type:"structure"` // The name of the role to delete. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- RoleName *string `min:"1" type:"string" required:"true"` } @@ -6309,11 +9424,19 @@ func (s DeleteRoleOutput) GoString() string { type DeleteRolePolicyInput struct { _ struct{} `type:"structure"` - // The name identifying the policy document to delete. + // The name of the inline policy to delete from the specified IAM role. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- PolicyName *string `min:"1" type:"string" required:"true"` // The name (friendly name, not ARN) identifying the role that the policy is // embedded in. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- RoleName *string `min:"1" type:"string" required:"true"` } @@ -6414,9 +9537,17 @@ type DeleteSSHPublicKeyInput struct { _ struct{} `type:"structure"` // The unique identifier for the SSH public key. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters that can consist of any upper or lowercased letter + // or digit. SSHPublicKeyId *string `min:"20" type:"string" required:"true"` // The name of the IAM user associated with the SSH public key. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string" required:"true"` } @@ -6470,6 +9601,10 @@ type DeleteServerCertificateInput struct { _ struct{} `type:"structure"` // The name of the server certificate you want to delete. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- ServerCertificateName *string `min:"1" type:"string" required:"true"` } @@ -6517,9 +9652,17 @@ type DeleteSigningCertificateInput struct { _ struct{} `type:"structure"` // The ID of the signing certificate to delete. + // + // The format of this parameter, as described by its regex (http://wikipedia.org/wiki/regex) + // pattern, is a string of characters that can be upper- or lower-cased letters + // or digits. CertificateId *string `min:"24" type:"string" required:"true"` // The name of the user the signing certificate belongs to. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string"` } @@ -6570,6 +9713,10 @@ type DeleteUserInput struct { _ struct{} `type:"structure"` // The name of the user to delete. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string" required:"true"` } @@ -6617,10 +9764,18 @@ type DeleteUserPolicyInput struct { _ struct{} `type:"structure"` // The name identifying the policy document to delete. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- PolicyName *string `min:"1" type:"string" required:"true"` // The name (friendly name, not ARN) identifying the user that the policy is // embedded in. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string" required:"true"` } @@ -6675,6 +9830,10 @@ type DeleteVirtualMFADeviceInput struct { // The serial number that uniquely identifies the MFA device. For virtual MFA // devices, the serial number is the same as the ARN. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =/:,.@- SerialNumber *string `min:"9" type:"string" required:"true"` } @@ -6721,13 +9880,17 @@ func (s DeleteVirtualMFADeviceOutput) GoString() string { type DetachGroupPolicyInput struct { _ struct{} `type:"structure"` - // The name (friendly name, not ARN) of the group to detach the policy from. + // The name (friendly name, not ARN) of the IAM group to detach the policy from. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- GroupName *string `min:"1" type:"string" required:"true"` - // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // The Amazon Resource Name (ARN) of the IAM policy you want to detach. // - // For more information about ARNs, go to Amazon Resource Names (ARNs) and - // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. PolicyArn *string `min:"20" type:"string" required:"true"` } @@ -6781,14 +9944,18 @@ func (s DetachGroupPolicyOutput) GoString() string { type DetachRolePolicyInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // The Amazon Resource Name (ARN) of the IAM policy you want to detach. // - // For more information about ARNs, go to Amazon Resource Names (ARNs) and - // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. PolicyArn *string `min:"20" type:"string" required:"true"` - // The name (friendly name, not ARN) of the role to detach the policy from. + // The name (friendly name, not ARN) of the IAM role to detach the policy from. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- RoleName *string `min:"1" type:"string" required:"true"` } @@ -6841,14 +10008,18 @@ func (s DetachRolePolicyOutput) GoString() string { type DetachUserPolicyInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // The Amazon Resource Name (ARN) of the IAM policy you want to detach. // - // For more information about ARNs, go to Amazon Resource Names (ARNs) and - // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. PolicyArn *string `min:"20" type:"string" required:"true"` - // The name (friendly name, not ARN) of the user to detach the policy from. + // The name (friendly name, not ARN) of the IAM user to detach the policy from. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string" required:"true"` } @@ -6902,16 +10073,28 @@ type EnableMFADeviceInput struct { _ struct{} `type:"structure"` // An authentication code emitted by the device. + // + // The format for this parameter is a string of 6 digits. AuthenticationCode1 *string `min:"6" type:"string" required:"true"` // A subsequent authentication code emitted by the device. + // + // The format for this parameter is a string of 6 digits. AuthenticationCode2 *string `min:"6" type:"string" required:"true"` // The serial number that uniquely identifies the MFA device. For virtual MFA // devices, the serial number is the device ARN. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =/:,.@- SerialNumber *string `min:"9" type:"string" required:"true"` - // The name of the user for whom you want to enable the MFA device. + // The name of the IAM user for whom you want to enable the MFA device. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string" required:"true"` } @@ -6975,7 +10158,8 @@ func (s EnableMFADeviceOutput) GoString() string { // Contains the results of a simulation. // -// This data type is used by the return parameter of SimulatePolicy. +// This data type is used by the return parameter of SimulateCustomPolicy +// and SimulatePrincipalPolicy . type EvaluationResult struct { _ struct{} `type:"structure"` @@ -7010,11 +10194,6 @@ type EvaluationResult struct { // missing context values are instead included under the ResourceSpecificResults // section. To discover the context keys used by a set of policies, you can // call GetContextKeysForCustomPolicy or GetContextKeysForPrincipalPolicy. - // - // If the response includes any keys in this list, then the reported results - // might be untrustworthy because the simulation could not completely evaluate - // all of the conditions specified in the policies that would occur in a real - // world request. MissingContextValues []*string `type:"list"` // The individual results of the simulation of the API action specified in EvalActionName @@ -7071,6 +10250,10 @@ type GetAccessKeyLastUsedInput struct { _ struct{} `type:"structure"` // The identifier of an access key. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters that can consist of any upper or lowercased letter + // or digit. AccessKeyId *string `min:"16" type:"string" required:"true"` } @@ -7126,8 +10309,13 @@ func (s GetAccessKeyLastUsedOutput) GoString() string { type GetAccountAuthorizationDetailsInput struct { _ struct{} `type:"structure"` - // A list of entity types (user, group, role, local managed policy, or AWS managed - // policy) for filtering the results. + // A list of entity types used to filter the results. Only the entities that + // match the types you specify are included in the output. Use the value LocalManagedPolicy + // to include customer managed policies. + // + // The format for this parameter is a comma-separated (if more than one) list + // of strings. Each string value in the list must be one of the valid values + // listed below. Filter []*string `type:"list"` // Use this parameter only when paginating results and only after you receive @@ -7268,147 +10456,6 @@ type GetAccountSummaryOutput struct { // A set of key value pairs containing information about IAM entity usage and // IAM quotas. - // - // SummaryMap contains the following keys: AccessKeysPerUserQuota - // - // The maximum number of active access keys allowed for each IAM user. - // - // AccountAccessKeysPresent - // - // This value is 1 if the AWS account (root) has an access key, otherwise it - // is 0. - // - // AccountMFAEnabled - // - // This value is 1 if the AWS account (root) has an MFA device assigned, otherwise - // it is 0. - // - // AccountSigningCertificatesPresent - // - // This value is 1 if the AWS account (root) has a signing certificate, otherwise - // it is 0. - // - // AssumeRolePolicySizeQuota - // - // The maximum allowed size for assume role policy documents (trust policies), - // in non-whitespace characters. - // - // AttachedPoliciesPerGroupQuota - // - // The maximum number of managed policies that can be attached to an IAM group. - // - // AttachedPoliciesPerRoleQuota - // - // The maximum number of managed policies that can be attached to an IAM role. - // - // AttachedPoliciesPerUserQuota - // - // The maximum number of managed policies that can be attached to an IAM user. - // - // GroupPolicySizeQuota - // - // The maximum allowed size for the aggregate of all inline policies embedded - // in an IAM group, in non-whitespace characters. - // - // Groups - // - // The number of IAM groups in the AWS account. - // - // GroupsPerUserQuota - // - // The maximum number of IAM groups each IAM user can belong to. - // - // GroupsQuota - // - // The maximum number of IAM groups allowed in the AWS account. - // - // InstanceProfiles - // - // The number of instance profiles in the AWS account. - // - // InstanceProfilesQuota - // - // The maximum number of instance profiles allowed in the AWS account. - // - // MFADevices - // - // The number of MFA devices in the AWS account, including those assigned and - // unassigned. - // - // MFADevicesInUse - // - // The number of MFA devices that have been assigned to an IAM user or to the - // AWS account (root). - // - // Policies - // - // The number of customer managed policies in the AWS account. - // - // PoliciesQuota - // - // The maximum number of customer managed policies allowed in the AWS account. - // - // PolicySizeQuota - // - // The maximum allowed size of a customer managed policy, in non-whitespace - // characters. - // - // PolicyVersionsInUse - // - // The number of managed policies that are attached to IAM users, groups, or - // roles in the AWS account. - // - // PolicyVersionsInUseQuota - // - // The maximum number of managed policies that can be attached to IAM users, - // groups, or roles in the AWS account. - // - // Providers - // - // The number of identity providers in the AWS account. - // - // RolePolicySizeQuota - // - // The maximum allowed size for the aggregate of all inline policies (access - // policies, not the trust policy) embedded in an IAM role, in non-whitespace - // characters. - // - // Roles - // - // The number of IAM roles in the AWS account. - // - // RolesQuota - // - // The maximum number of IAM roles allowed in the AWS account. - // - // ServerCertificates - // - // The number of server certificates in the AWS account. - // - // ServerCertificatesQuota - // - // The maximum number of server certificates allowed in the AWS account. - // - // SigningCertificatesPerUserQuota - // - // The maximum number of X.509 signing certificates allowed for each IAM user. - // - // UserPolicySizeQuota - // - // The maximum allowed size for the aggregate of all inline policies embedded - // in an IAM user, in non-whitespace characters. - // - // Users - // - // The number of IAM users in the AWS account. - // - // UsersQuota - // - // The maximum number of IAM users allowed in the AWS account. - // - // VersionsPerPolicyQuota - // - // The maximum number of policy versions allowed for each managed policy. SummaryMap map[string]*int64 `type:"map"` } @@ -7425,9 +10472,15 @@ func (s GetAccountSummaryOutput) GoString() string { type GetContextKeysForCustomPolicyInput struct { _ struct{} `type:"structure"` - // A list of policies for which you want list of context keys used in Condition - // elements. Each document is specified as a string containing the complete, - // valid JSON text of an IAM policy. + // A list of policies for which you want the list of context keys referenced + // in those policies. Each document is specified as a string containing the + // complete, valid JSON text of an IAM policy. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). PolicyInputList []*string `type:"list" required:"true"` } @@ -7459,8 +10512,7 @@ func (s *GetContextKeysForCustomPolicyInput) Validate() error { type GetContextKeysForPolicyResponse struct { _ struct{} `type:"structure"` - // The list of context keys that are used in the Condition elements of the input - // policies. + // The list of context keys that are referenced in the input policies. ContextKeyNames []*string `type:"list"` } @@ -7477,8 +10529,14 @@ func (s GetContextKeysForPolicyResponse) GoString() string { type GetContextKeysForPrincipalPolicyInput struct { _ struct{} `type:"structure"` - // A optional list of additional policies for which you want list of context - // keys used in Condition elements. + // An optional list of additional policies for which you want the list of context + // keys that are referenced. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). PolicyInputList []*string `type:"list"` // The ARN of a user, group, or role whose policies contain the context keys @@ -7488,6 +10546,10 @@ type GetContextKeysForPrincipalPolicyInput struct { // only those context keys that are found in policies attached to that entity. // Note that all parameters are shown in unencoded form here for clarity, but // must be URL encoded to be included as a part of a real HTML request. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. PolicySourceArn *string `min:"20" type:"string" required:"true"` } @@ -7562,6 +10624,10 @@ type GetGroupInput struct { _ struct{} `type:"structure"` // The name of the group. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- GroupName *string `min:"1" type:"string" required:"true"` // Use this parameter only when paginating results and only after you receive @@ -7618,7 +10684,7 @@ func (s *GetGroupInput) Validate() error { type GetGroupOutput struct { _ struct{} `type:"structure"` - // Information about the group. + // A structure that contains details about the group. Group *Group `type:"structure" required:"true"` // A flag that indicates whether there are more items to return. If your results @@ -7651,9 +10717,17 @@ type GetGroupPolicyInput struct { _ struct{} `type:"structure"` // The name of the group the policy is associated with. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- GroupName *string `min:"1" type:"string" required:"true"` // The name of the policy document to get. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- PolicyName *string `min:"1" type:"string" required:"true"` } @@ -7717,6 +10791,10 @@ type GetInstanceProfileInput struct { _ struct{} `type:"structure"` // The name of the instance profile to get information about. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- InstanceProfileName *string `min:"1" type:"string" required:"true"` } @@ -7750,7 +10828,7 @@ func (s *GetInstanceProfileInput) Validate() error { type GetInstanceProfileOutput struct { _ struct{} `type:"structure"` - // Information about the instance profile. + // A structure containing details about the instance profile. InstanceProfile *InstanceProfile `type:"structure" required:"true"` } @@ -7768,6 +10846,10 @@ type GetLoginProfileInput struct { _ struct{} `type:"structure"` // The name of the user whose login profile you want to retrieve. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string" required:"true"` } @@ -7801,7 +10883,7 @@ func (s *GetLoginProfileInput) Validate() error { type GetLoginProfileOutput struct { _ struct{} `type:"structure"` - // The user name and password create date for the user. + // A structure containing the user name and password create date for the user. LoginProfile *LoginProfile `type:"structure" required:"true"` } @@ -7818,9 +10900,13 @@ func (s GetLoginProfileOutput) GoString() string { type GetOpenIDConnectProviderInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the IAM OpenID Connect (OIDC) provider - // to get information for. You can get a list of OIDC provider ARNs by using - // the ListOpenIDConnectProviders action. + // The Amazon Resource Name (ARN) of the OIDC provider resource object in IAM + // to get information for. You can get a list of OIDC provider resource ARNs + // by using the ListOpenIDConnectProviders action. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. OpenIDConnectProviderArn *string `min:"20" type:"string" required:"true"` } @@ -7855,19 +10941,19 @@ type GetOpenIDConnectProviderOutput struct { _ struct{} `type:"structure"` // A list of client IDs (also known as audiences) that are associated with the - // specified IAM OpenID Connect provider. For more information, see CreateOpenIDConnectProvider. + // specified IAM OIDC provider resource object. For more information, see CreateOpenIDConnectProvider. ClientIDList []*string `type:"list"` - // The date and time when the IAM OpenID Connect provider entity was created + // The date and time when the IAM OIDC provider resource object was created // in the AWS account. CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` // A list of certificate thumbprints that are associated with the specified - // IAM OpenID Connect provider. For more information, see CreateOpenIDConnectProvider. + // IAM OIDC provider resource object. For more information, see CreateOpenIDConnectProvider. ThumbprintList []*string `type:"list"` - // The URL that the IAM OpenID Connect provider is associated with. For more - // information, see CreateOpenIDConnectProvider. + // The URL that the IAM OIDC provider resource object is associated with. For + // more information, see CreateOpenIDConnectProvider. Url *string `min:"1" type:"string"` } @@ -7884,10 +10970,11 @@ func (s GetOpenIDConnectProviderOutput) GoString() string { type GetPolicyInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // The Amazon Resource Name (ARN) of the managed policy that you want information + // about. // - // For more information about ARNs, go to Amazon Resource Names (ARNs) and - // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. PolicyArn *string `min:"20" type:"string" required:"true"` } @@ -7922,7 +11009,7 @@ func (s *GetPolicyInput) Validate() error { type GetPolicyOutput struct { _ struct{} `type:"structure"` - // Information about the policy. + // A structure containing details about the policy. Policy *Policy `type:"structure"` } @@ -7939,14 +11026,20 @@ func (s GetPolicyOutput) GoString() string { type GetPolicyVersionInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // The Amazon Resource Name (ARN) of the managed policy that you want information + // about. // - // For more information about ARNs, go to Amazon Resource Names (ARNs) and - // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. PolicyArn *string `min:"20" type:"string" required:"true"` // Identifies the policy version to retrieve. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters that consists of the lowercase letter 'v' followed + // by one or two digits, and optionally followed by a period '.' and a string + // of letters and digits. VersionId *string `type:"string" required:"true"` } @@ -7983,11 +11076,7 @@ func (s *GetPolicyVersionInput) Validate() error { type GetPolicyVersionOutput struct { _ struct{} `type:"structure"` - // Information about the policy version. - // - // For more information about managed policy versions, see Versioning for Managed - // Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) - // in the IAM User Guide. + // A structure containing details about the policy version. PolicyVersion *PolicyVersion `type:"structure"` } @@ -8004,7 +11093,11 @@ func (s GetPolicyVersionOutput) GoString() string { type GetRoleInput struct { _ struct{} `type:"structure"` - // The name of the role to get information about. + // The name of the IAM role to get information about. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- RoleName *string `min:"1" type:"string" required:"true"` } @@ -8038,7 +11131,7 @@ func (s *GetRoleInput) Validate() error { type GetRoleOutput struct { _ struct{} `type:"structure"` - // Information about the role. + // A structure containing details about the IAM role. Role *Role `type:"structure" required:"true"` } @@ -8056,9 +11149,17 @@ type GetRolePolicyInput struct { _ struct{} `type:"structure"` // The name of the policy document to get. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- PolicyName *string `min:"1" type:"string" required:"true"` // The name of the role associated with the policy. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- RoleName *string `min:"1" type:"string" required:"true"` } @@ -8121,7 +11222,12 @@ func (s GetRolePolicyOutput) GoString() string { type GetSAMLProviderInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the SAML provider to get information about. + // The Amazon Resource Name (ARN) of the SAML provider resource object in IAM + // to get information about. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. SAMLProviderArn *string `min:"20" type:"string" required:"true"` } @@ -8184,9 +11290,17 @@ type GetSSHPublicKeyInput struct { Encoding *string `type:"string" required:"true" enum:"encodingType"` // The unique identifier for the SSH public key. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters that can consist of any upper or lowercased letter + // or digit. SSHPublicKeyId *string `min:"20" type:"string" required:"true"` // The name of the IAM user associated with the SSH public key. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string" required:"true"` } @@ -8229,7 +11343,7 @@ func (s *GetSSHPublicKeyInput) Validate() error { type GetSSHPublicKeyOutput struct { _ struct{} `type:"structure"` - // Information about the SSH public key. + // A structure containing details about the SSH public key. SSHPublicKey *SSHPublicKey `type:"structure"` } @@ -8247,6 +11361,10 @@ type GetServerCertificateInput struct { _ struct{} `type:"structure"` // The name of the server certificate you want to retrieve information about. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- ServerCertificateName *string `min:"1" type:"string" required:"true"` } @@ -8280,7 +11398,7 @@ func (s *GetServerCertificateInput) Validate() error { type GetServerCertificateOutput struct { _ struct{} `type:"structure"` - // Information about the server certificate. + // A structure containing details about the server certificate. ServerCertificate *ServerCertificate `type:"structure" required:"true"` } @@ -8300,7 +11418,10 @@ type GetUserInput struct { // The name of the user to get information about. // // This parameter is optional. If it is not included, it defaults to the user - // making the request. + // making the request. The regex pattern (http://wikipedia.org/wiki/regex) for + // this parameter is a string of characters consisting of upper and lowercase + // alphanumeric characters with no spaces. You can also include any of the following + // characters: =,.@- UserName *string `min:"1" type:"string"` } @@ -8331,7 +11452,7 @@ func (s *GetUserInput) Validate() error { type GetUserOutput struct { _ struct{} `type:"structure"` - // Information about the user. + // A structure containing details about the IAM user. User *User `type:"structure" required:"true"` } @@ -8349,9 +11470,17 @@ type GetUserPolicyInput struct { _ struct{} `type:"structure"` // The name of the policy document to get. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- PolicyName *string `min:"1" type:"string" required:"true"` // The name of the user who the policy is associated with. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string" required:"true"` } @@ -8413,9 +11542,13 @@ func (s GetUserPolicyOutput) GoString() string { // Contains information about an IAM group entity. // -// This data type is used as a response element in the following actions: +// This data type is used as a response element in the following actions: // -// CreateGroup GetGroup ListGroups +// CreateGroup +// +// GetGroup +// +// ListGroups type Group struct { _ struct{} `type:"structure"` @@ -8571,6 +11704,10 @@ type ListAccessKeysInput struct { MaxItems *int64 `min:"1" type:"integer"` // The name of the user. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string"` } @@ -8607,7 +11744,7 @@ func (s *ListAccessKeysInput) Validate() error { type ListAccessKeysOutput struct { _ struct{} `type:"structure"` - // A list of access key metadata. + // A list of objects containing metadata about the access keys. AccessKeyMetadata []*AccessKeyMetadata `type:"list" required:"true"` // A flag that indicates whether there are more items to return. If your results @@ -8716,6 +11853,10 @@ type ListAttachedGroupPoliciesInput struct { // The name (friendly name, not ARN) of the group to list attached policies // for. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- GroupName *string `min:"1" type:"string" required:"true"` // Use this parameter only when paginating results and only after you receive @@ -8737,6 +11878,12 @@ type ListAttachedGroupPoliciesInput struct { // The path prefix for filtering the results. This parameter is optional. If // it is not included, it defaults to a slash (/), listing all policies. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes, containing any + // ASCII character from the ! (\u0021) thru the DEL character (\u007F), including + // most punctuation characters, digits, and upper and lowercased letters. PathPrefix *string `type:"string"` } @@ -8824,9 +11971,19 @@ type ListAttachedRolePoliciesInput struct { // The path prefix for filtering the results. This parameter is optional. If // it is not included, it defaults to a slash (/), listing all policies. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes, containing any + // ASCII character from the ! (\u0021) thru the DEL character (\u007F), including + // most punctuation characters, digits, and upper and lowercased letters. PathPrefix *string `type:"string"` // The name (friendly name, not ARN) of the role to list attached policies for. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- RoleName *string `min:"1" type:"string" required:"true"` } @@ -8914,9 +12071,19 @@ type ListAttachedUserPoliciesInput struct { // The path prefix for filtering the results. This parameter is optional. If // it is not included, it defaults to a slash (/), listing all policies. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes, containing any + // ASCII character from the ! (\u0021) thru the DEL character (\u007F), including + // most punctuation characters, digits, and upper and lowercased letters. PathPrefix *string `type:"string"` // The name (friendly name, not ARN) of the user to list attached policies for. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string" required:"true"` } @@ -8990,6 +12157,7 @@ type ListEntitiesForPolicyInput struct { // For example, when EntityFilter is Role, only the roles that are attached // to the specified policy are returned. This parameter is optional. If it is // not included, all attached entities (users, groups, and roles) are returned. + // The argument for this parameter must be one of the valid values listed below. EntityFilter *string `type:"string" enum:"EntityType"` // Use this parameter only when paginating results and only after you receive @@ -9011,12 +12179,18 @@ type ListEntitiesForPolicyInput struct { // The path prefix for filtering the results. This parameter is optional. If // it is not included, it defaults to a slash (/), listing all entities. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes, containing any + // ASCII character from the ! (\u0021) thru the DEL character (\u007F), including + // most punctuation characters, digits, and upper and lowercased letters. PathPrefix *string `min:"1" type:"string"` - // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // The Amazon Resource Name (ARN) of the IAM policy for which you want the versions. // - // For more information about ARNs, go to Amazon Resource Names (ARNs) and - // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. PolicyArn *string `min:"20" type:"string" required:"true"` } @@ -9072,13 +12246,13 @@ type ListEntitiesForPolicyOutput struct { // to use for the Marker parameter in a subsequent pagination request. Marker *string `min:"1" type:"string"` - // A list of groups that the policy is attached to. + // A list of IAM groups that the policy is attached to. PolicyGroups []*PolicyGroup `type:"list"` - // A list of roles that the policy is attached to. + // A list of IAM roles that the policy is attached to. PolicyRoles []*PolicyRole `type:"list"` - // A list of users that the policy is attached to. + // A list of IAM users that the policy is attached to. PolicyUsers []*PolicyUser `type:"list"` } @@ -9096,6 +12270,10 @@ type ListGroupPoliciesInput struct { _ struct{} `type:"structure"` // The name of the group to list policies for. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- GroupName *string `min:"1" type:"string" required:"true"` // Use this parameter only when paginating results and only after you receive @@ -9199,6 +12377,10 @@ type ListGroupsForUserInput struct { MaxItems *int64 `min:"1" type:"integer"` // The name of the user to list groups for. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string" required:"true"` } @@ -9287,8 +12469,13 @@ type ListGroupsInput struct { // The path prefix for filtering the results. For example, the prefix /division_abc/subdivision_xyz/ // gets all groups whose path starts with /division_abc/subdivision_xyz/. // - // This parameter is optional. If it is not included, it defaults to a slash - // (/), listing all groups. + // This parameter is optional. If it is not included, it defaults to a slash + // (/), listing all groups. The regex pattern (http://wikipedia.org/wiki/regex) + // for this parameter is a string of characters consisting of either a forward + // slash (/) by itself or a string that must begin and end with forward slashes, + // containing any ASCII character from the ! (\u0021) thru the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. PathPrefix *string `min:"1" type:"string"` } @@ -9372,6 +12559,10 @@ type ListInstanceProfilesForRoleInput struct { MaxItems *int64 `min:"1" type:"integer"` // The name of the role to list instance profiles for. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- RoleName *string `min:"1" type:"string" required:"true"` } @@ -9460,8 +12651,13 @@ type ListInstanceProfilesInput struct { // The path prefix for filtering the results. For example, the prefix /application_abc/component_xyz/ // gets all instance profiles whose path starts with /application_abc/component_xyz/. // - // This parameter is optional. If it is not included, it defaults to a slash - // (/), listing all instance profiles. + // This parameter is optional. If it is not included, it defaults to a slash + // (/), listing all instance profiles. The regex pattern (http://wikipedia.org/wiki/regex) + // for this parameter is a string of characters consisting of either a forward + // slash (/) by itself or a string that must begin and end with forward slashes, + // containing any ASCII character from the ! (\u0021) thru the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. PathPrefix *string `min:"1" type:"string"` } @@ -9545,6 +12741,10 @@ type ListMFADevicesInput struct { MaxItems *int64 `min:"1" type:"integer"` // The name of the user whose MFA devices you want to list. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string"` } @@ -9625,7 +12825,7 @@ func (s ListOpenIDConnectProvidersInput) GoString() string { type ListOpenIDConnectProvidersOutput struct { _ struct{} `type:"structure"` - // The list of IAM OpenID Connect providers in the AWS account. + // The list of IAM OIDC provider resource objects defined in the AWS account. OpenIDConnectProviderList []*OpenIDConnectProviderListEntry `type:"list"` } @@ -9662,12 +12862,17 @@ type ListPoliciesInput struct { // A flag to filter the results to only the attached policies. // // When OnlyAttached is true, the returned list contains only the policies - // that are attached to a user, group, or role. When OnlyAttached is false, + // that are attached to an IAM user, group, or role. When OnlyAttached is false, // or when the parameter is not included, all policies are returned. OnlyAttached *bool `type:"boolean"` // The path prefix for filtering the results. This parameter is optional. If - // it is not included, it defaults to a slash (/), listing all policies. + // it is not included, it defaults to a slash (/), listing all policies. The + // regex pattern (http://wikipedia.org/wiki/regex) for this parameter is a string + // of characters consisting of either a forward slash (/) by itself or a string + // that must begin and end with forward slashes, containing any ASCII character + // from the ! (\u0021) thru the DEL character (\u007F), including most punctuation + // characters, digits, and upper and lowercased letters. PathPrefix *string `type:"string"` // The scope to use for filtering the results. @@ -9756,10 +12961,10 @@ type ListPolicyVersionsInput struct { // service where to continue from. MaxItems *int64 `min:"1" type:"integer"` - // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // The Amazon Resource Name (ARN) of the IAM policy for which you want the versions. // - // For more information about ARNs, go to Amazon Resource Names (ARNs) and - // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. PolicyArn *string `min:"20" type:"string" required:"true"` } @@ -9851,6 +13056,10 @@ type ListRolePoliciesInput struct { MaxItems *int64 `min:"1" type:"integer"` // The name of the role to list policies for. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- RoleName *string `min:"1" type:"string" required:"true"` } @@ -9939,8 +13148,13 @@ type ListRolesInput struct { // The path prefix for filtering the results. For example, the prefix /application_abc/component_xyz/ // gets all roles whose path starts with /application_abc/component_xyz/. // - // This parameter is optional. If it is not included, it defaults to a slash - // (/), listing all roles. + // This parameter is optional. If it is not included, it defaults to a slash + // (/), listing all roles. The regex pattern (http://wikipedia.org/wiki/regex) + // for this parameter is a string of characters consisting of either a forward + // slash (/) by itself or a string that must begin and end with forward slashes, + // containing any ASCII character from the ! (\u0021) thru the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. PathPrefix *string `min:"1" type:"string"` } @@ -10021,7 +13235,7 @@ func (s ListSAMLProvidersInput) GoString() string { type ListSAMLProvidersOutput struct { _ struct{} `type:"structure"` - // The list of SAML providers for this account. + // The list of SAML provider resource objects defined in IAM for this AWS account. SAMLProviderList []*SAMLProviderListEntry `type:"list"` } @@ -10058,6 +13272,10 @@ type ListSSHPublicKeysInput struct { // The name of the IAM user to list SSH public keys for. If none is specified, // the UserName field is determined implicitly based on the AWS access key used // to sign the request. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string"` } @@ -10106,7 +13324,7 @@ type ListSSHPublicKeysOutput struct { // to use for the Marker parameter in a subsequent pagination request. Marker *string `min:"1" type:"string"` - // A list of SSH public keys. + // A list of the SSH public keys assigned to IAM user. SSHPublicKeys []*SSHPublicKeyMetadata `type:"list"` } @@ -10143,8 +13361,13 @@ type ListServerCertificatesInput struct { // The path prefix for filtering the results. For example: /company/servercerts // would get all server certificates for which the path starts with /company/servercerts. // - // This parameter is optional. If it is not included, it defaults to a slash - // (/), listing all server certificates. + // This parameter is optional. If it is not included, it defaults to a slash + // (/), listing all server certificates. The regex pattern (http://wikipedia.org/wiki/regex) + // for this parameter is a string of characters consisting of either a forward + // slash (/) by itself or a string that must begin and end with forward slashes, + // containing any ASCII character from the ! (\u0021) thru the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. PathPrefix *string `min:"1" type:"string"` } @@ -10227,7 +13450,11 @@ type ListSigningCertificatesInput struct { // service where to continue from. MaxItems *int64 `min:"1" type:"integer"` - // The name of the user. + // The name of the IAM user whose signing certificates you want to examine. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string"` } @@ -10311,6 +13538,10 @@ type ListUserPoliciesInput struct { MaxItems *int64 `min:"1" type:"integer"` // The name of the user to list policies for. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string" required:"true"` } @@ -10399,8 +13630,13 @@ type ListUsersInput struct { // The path prefix for filtering the results. For example: /division_abc/subdivision_xyz/, // which would get all user names whose path starts with /division_abc/subdivision_xyz/. // - // This parameter is optional. If it is not included, it defaults to a slash - // (/), listing all user names. + // This parameter is optional. If it is not included, it defaults to a slash + // (/), listing all user names. The regex pattern (http://wikipedia.org/wiki/regex) + // for this parameter is a string of characters consisting of either a forward + // slash (/) by itself or a string that must begin and end with forward slashes, + // containing any ASCII character from the ! (\u0021) thru the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. PathPrefix *string `min:"1" type:"string"` } @@ -10466,7 +13702,7 @@ func (s ListUsersOutput) GoString() string { type ListVirtualMFADevicesInput struct { _ struct{} `type:"structure"` - // The status (unassigned or assigned) of the devices to list. If you do not + // The status (Unassigned or Assigned) of the devices to list. If you do not // specify an AssignmentStatus, the action defaults to Any which lists both // assigned and unassigned virtual MFA devices. AssignmentStatus *string `type:"string" enum:"assignmentStatusType"` @@ -10988,7 +14224,7 @@ func (s PolicyVersion) GoString() string { // Contains the row and column of a location of a Statement element in a policy // document. // -// This data type is used as a member of the Statement type. +// This data type is used as a member of the Statement type. type Position struct { _ struct{} `type:"structure"` @@ -11013,12 +14249,26 @@ type PutGroupPolicyInput struct { _ struct{} `type:"structure"` // The name of the group to associate the policy with. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- GroupName *string `min:"1" type:"string" required:"true"` // The policy document. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). PolicyDocument *string `min:"1" type:"string" required:"true"` // The name of the policy document. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- PolicyName *string `min:"1" type:"string" required:"true"` } @@ -11078,12 +14328,26 @@ type PutRolePolicyInput struct { _ struct{} `type:"structure"` // The policy document. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). PolicyDocument *string `min:"1" type:"string" required:"true"` // The name of the policy document. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- PolicyName *string `min:"1" type:"string" required:"true"` // The name of the role to associate the policy with. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- RoleName *string `min:"1" type:"string" required:"true"` } @@ -11143,12 +14407,26 @@ type PutUserPolicyInput struct { _ struct{} `type:"structure"` // The policy document. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). PolicyDocument *string `min:"1" type:"string" required:"true"` // The name of the policy document. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- PolicyName *string `min:"1" type:"string" required:"true"` // The name of the user to associate the policy with. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string" required:"true"` } @@ -11207,13 +14485,17 @@ func (s PutUserPolicyOutput) GoString() string { type RemoveClientIDFromOpenIDConnectProviderInput struct { _ struct{} `type:"structure"` - // The client ID (also known as audience) to remove from the IAM OpenID Connect - // provider. For more information about client IDs, see CreateOpenIDConnectProvider. + // The client ID (also known as audience) to remove from the IAM OIDC provider + // resource. For more information about client IDs, see CreateOpenIDConnectProvider. ClientID *string `min:"1" type:"string" required:"true"` - // The Amazon Resource Name (ARN) of the IAM OpenID Connect (OIDC) provider - // to remove the client ID from. You can get a list of OIDC provider ARNs by - // using the ListOpenIDConnectProviders action. + // The Amazon Resource Name (ARN) of the IAM OIDC provider resource to remove + // the client ID from. You can get a list of OIDC provider ARNs by using the + // ListOpenIDConnectProviders action. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. OpenIDConnectProviderArn *string `min:"20" type:"string" required:"true"` } @@ -11267,9 +14549,17 @@ type RemoveRoleFromInstanceProfileInput struct { _ struct{} `type:"structure"` // The name of the instance profile to update. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- InstanceProfileName *string `min:"1" type:"string" required:"true"` // The name of the role to remove. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- RoleName *string `min:"1" type:"string" required:"true"` } @@ -11323,9 +14613,17 @@ type RemoveUserFromGroupInput struct { _ struct{} `type:"structure"` // The name of the group to update. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- GroupName *string `min:"1" type:"string" required:"true"` // The name of the user to remove. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string" required:"true"` } @@ -11428,15 +14726,27 @@ type ResyncMFADeviceInput struct { _ struct{} `type:"structure"` // An authentication code emitted by the device. + // + // The format for this parameter is a sequence of six digits. AuthenticationCode1 *string `min:"6" type:"string" required:"true"` // A subsequent authentication code emitted by the device. + // + // The format for this parameter is a sequence of six digits. AuthenticationCode2 *string `min:"6" type:"string" required:"true"` // Serial number that uniquely identifies the MFA device. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- SerialNumber *string `min:"9" type:"string" required:"true"` // The name of the user whose MFA device you want to resynchronize. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string" required:"true"` } @@ -11500,7 +14810,7 @@ func (s ResyncMFADeviceOutput) GoString() string { // Contains information about an IAM role. // -// This data type is used as a response element in the following actions: +// This data type is used as a response element in the following actions: // // CreateRole // @@ -11770,10 +15080,11 @@ func (s ServerCertificateMetadata) GoString() string { type SetDefaultPolicyVersionInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // The Amazon Resource Name (ARN) of the IAM policy whose default version you + // want to set. // - // For more information about ARNs, go to Amazon Resource Names (ARNs) and - // AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // in the AWS General Reference. PolicyArn *string `min:"20" type:"string" required:"true"` @@ -11870,8 +15181,8 @@ type SimulateCustomPolicyInput struct { // identifier, such as iam:CreateUser. ActionNames []*string `type:"list" required:"true"` - // The ARN of the user that you want to use as the simulated caller of the APIs. - // CallerArn is required if you include a ResourcePolicy so that the policy's + // The ARN of the IAM user that you want to use as the simulated caller of the + // APIs. CallerArn is required if you include a ResourcePolicy so that the policy's // Principal element has a value to use in evaluating the policy. // // You can specify only the ARN of an IAM user. You cannot specify the ARN @@ -11879,8 +15190,8 @@ type SimulateCustomPolicyInput struct { CallerArn *string `min:"1" type:"string"` // A list of context keys and corresponding values for the simulation to use. - // Whenever a context key is evaluated by a Condition element in one of the - // simulated IAM permission policies, the corresponding value is supplied. + // Whenever a context key is evaluated in one of the simulated IAM permission + // policies, the corresponding value is supplied. ContextEntries []*ContextEntry `type:"list"` // Use this parameter only when paginating results and only after you receive @@ -11908,6 +15219,12 @@ type SimulateCustomPolicyInput struct { // a call to GetFederationToken (http://docs.aws.amazon.com/IAM/latest/APIReference/API_GetFederationToken.html) // or one of the AssumeRole (http://docs.aws.amazon.com/IAM/latest/APIReference/API_AssumeRole.html) // APIs to restrict what a user can do while using the temporary credentials. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). PolicyInputList []*string `type:"list" required:"true"` // A list of ARNs of AWS resources to include in the simulation. If this parameter @@ -11922,6 +15239,10 @@ type SimulateCustomPolicyInput struct { // // If you include a ResourcePolicy, then it must be applicable to all of the // resources included in the simulation or you receive an invalid input error. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. ResourceArns []*string `type:"list"` // Specifies the type of simulation to run. Different APIs that support resource-based @@ -11940,27 +15261,27 @@ type SimulateCustomPolicyInput struct { // the EC2 scenario options, see Supported Platforms (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html) // in the AWS EC2 User Guide. // - // EC2-Classic-InstanceStore + // EC2-Classic-InstanceStore // // instance, image, security-group // - // EC2-Classic-EBS + // EC2-Classic-EBS // // instance, image, security-group, volume // - // EC2-VPC-InstanceStore + // EC2-VPC-InstanceStore // // instance, image, security-group, network-interface // - // EC2-VPC-InstanceStore-Subnet + // EC2-VPC-InstanceStore-Subnet // // instance, image, security-group, network-interface, subnet // - // EC2-VPC-EBS + // EC2-VPC-EBS // // instance, image, security-group, network-interface, volume // - // EC2-VPC-EBS-Subnet + // EC2-VPC-EBS-Subnet // // instance, image, security-group, network-interface, subnet, volume ResourceHandlingOption *string `min:"1" type:"string"` @@ -11979,6 +15300,12 @@ type SimulateCustomPolicyInput struct { // A resource-based policy to include in the simulation provided as a string. // Each resource in the simulation is treated as if it had this policy attached. // You can include only one resource-based policy in a simulation. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). ResourcePolicy *string `min:"1" type:"string"` } @@ -12075,24 +15402,28 @@ type SimulatePrincipalPolicyInput struct { // such as iam:CreateUser. ActionNames []*string `type:"list" required:"true"` - // The ARN of the user that you want to specify as the simulated caller of the - // APIs. If you do not specify a CallerArn, it defaults to the ARN of the user - // that you specify in PolicySourceArn, if you specified a user. If you include - // both a PolicySourceArn (for example, arn:aws:iam::123456789012:user/David) + // The ARN of the IAM user that you want to specify as the simulated caller + // of the APIs. If you do not specify a CallerArn, it defaults to the ARN of + // the user that you specify in PolicySourceArn, if you specified a user. If + // you include both a PolicySourceArn (for example, arn:aws:iam::123456789012:user/David) // and a CallerArn (for example, arn:aws:iam::123456789012:user/Bob), the result // is that you simulate calling the APIs as Bob, as if Bob had David's policies. // // You can specify only the ARN of an IAM user. You cannot specify the ARN // of an assumed role, federated user, or a service principal. // - // CallerArn is required if you include a ResourcePolicy and the PolicySourceArn + // CallerArn is required if you include a ResourcePolicy and the PolicySourceArn // is not the ARN for an IAM user. This is required so that the resource-based // policy's Principal element has a value to use in evaluating the policy. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. CallerArn *string `min:"1" type:"string"` // A list of context keys and corresponding values for the simulation to use. - // Whenever a context key is evaluated by a Condition element in one of the - // simulated policies, the corresponding value is supplied. + // Whenever a context key is evaluated in one of the simulated IAM permission + // policies, the corresponding value is supplied. ContextEntries []*ContextEntry `type:"list"` // Use this parameter only when paginating results and only after you receive @@ -12115,6 +15446,12 @@ type SimulatePrincipalPolicyInput struct { // An optional list of additional policy documents to include in the simulation. // Each document is specified as a string containing the complete, valid JSON // text of an IAM policy. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). PolicyInputList []*string `type:"list"` // The Amazon Resource Name (ARN) of a user, group, or role whose policies you @@ -12122,6 +15459,10 @@ type SimulatePrincipalPolicyInput struct { // the simulation includes all policies that are associated with that entity. // If you specify a user, the simulation also includes all policies that are // attached to any groups the user belongs to. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. PolicySourceArn *string `min:"20" type:"string" required:"true"` // A list of ARNs of AWS resources to include in the simulation. If this parameter @@ -12133,6 +15474,10 @@ type SimulatePrincipalPolicyInput struct { // The simulation does not automatically retrieve policies for the specified // resources. If you want to include a resource policy in the simulation, then // you must include the policy as a string in the ResourcePolicy parameter. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. ResourceArns []*string `type:"list"` // Specifies the type of simulation to run. Different APIs that support resource-based @@ -12151,27 +15496,27 @@ type SimulatePrincipalPolicyInput struct { // the EC2 scenario options, see Supported Platforms (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html) // in the AWS EC2 User Guide. // - // EC2-Classic-InstanceStore + // EC2-Classic-InstanceStore // // instance, image, security-group // - // EC2-Classic-EBS + // EC2-Classic-EBS // // instance, image, security-group, volume // - // EC2-VPC-InstanceStore + // EC2-VPC-InstanceStore // // instance, image, security-group, network-interface // - // EC2-VPC-InstanceStore-Subnet + // EC2-VPC-InstanceStore-Subnet // // instance, image, security-group, network-interface, subnet // - // EC2-VPC-EBS + // EC2-VPC-EBS // // instance, image, security-group, network-interface, volume // - // EC2-VPC-EBS-Subnet + // EC2-VPC-EBS-Subnet // // instance, image, security-group, network-interface, subnet, volume ResourceHandlingOption *string `min:"1" type:"string"` @@ -12190,6 +15535,12 @@ type SimulatePrincipalPolicyInput struct { // A resource-based policy to include in the simulation provided as a string. // Each resource in the simulation is treated as if it had this policy attached. // You can include only one resource-based policy in a simulation. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). ResourcePolicy *string `min:"1" type:"string"` } @@ -12253,8 +15604,8 @@ func (s *SimulatePrincipalPolicyInput) Validate() error { // Contains a reference to a Statement element in a policy document that determines // the result of the simulation. // -// This data type is used by the MatchedStatements member of the EvaluationResult -// type. +// This data type is used by the MatchedStatements member of the EvaluationResult +// type. type Statement struct { _ struct{} `type:"structure"` @@ -12285,6 +15636,10 @@ type UpdateAccessKeyInput struct { _ struct{} `type:"structure"` // The access key ID of the secret access key you want to update. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters that can consist of any upper or lowercased letter + // or digit. AccessKeyId *string `min:"16" type:"string" required:"true"` // The status you want to assign to the secret access key. Active means the @@ -12293,6 +15648,10 @@ type UpdateAccessKeyInput struct { Status *string `type:"string" required:"true" enum:"statusType"` // The name of the user whose key you want to update. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string"` } @@ -12450,9 +15809,19 @@ type UpdateAssumeRolePolicyInput struct { _ struct{} `type:"structure"` // The policy that grants an entity permission to assume the role. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). PolicyDocument *string `min:"1" type:"string" required:"true"` - // The name of the role to update. + // The name of the role to update with the new policy. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- RoleName *string `min:"1" type:"string" required:"true"` } @@ -12505,14 +15874,28 @@ func (s UpdateAssumeRolePolicyOutput) GoString() string { type UpdateGroupInput struct { _ struct{} `type:"structure"` - // Name of the group to update. If you're changing the name of the group, this - // is the original name. + // Name of the IAM group to update. If you're changing the name of the group, + // this is the original name. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- GroupName *string `min:"1" type:"string" required:"true"` - // New name for the group. Only include this if changing the group's name. + // New name for the IAM group. Only include this if changing the group's name. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- NewGroupName *string `min:"1" type:"string"` - // New path for the group. Only include this if changing the group's path. + // New path for the IAM group. Only include this if changing the group's path. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes, containing any + // ASCII character from the ! (\u0021) thru the DEL character (\u007F), including + // most punctuation characters, digits, and upper and lowercased letters. NewPath *string `min:"1" type:"string"` } @@ -12565,13 +15948,26 @@ func (s UpdateGroupOutput) GoString() string { type UpdateLoginProfileInput struct { _ struct{} `type:"structure"` - // The new password for the specified user. + // The new password for the specified IAM user. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). However, the format can be further + // restricted by the account administrator by setting a password policy on the + // AWS account. For more information, see UpdateAccountPasswordPolicy. Password *string `min:"1" type:"string"` - // Require the specified user to set a new password on next sign-in. + // Allows this new password to be used only once by requiring the specified + // IAM user to set a new password on next sign-in. PasswordResetRequired *bool `type:"boolean"` // The name of the user whose password you want to update. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string" required:"true"` } @@ -12621,9 +16017,13 @@ func (s UpdateLoginProfileOutput) GoString() string { type UpdateOpenIDConnectProviderThumbprintInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the IAM OpenID Connect (OIDC) provider - // to update the thumbprint for. You can get a list of OIDC provider ARNs by - // using the ListOpenIDConnectProviders action. + // The Amazon Resource Name (ARN) of the IAM OIDC provider resource object for + // which you want to update the thumbprint. You can get a list of OIDC provider + // ARNs by using the ListOpenIDConnectProviders action. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. OpenIDConnectProviderArn *string `min:"20" type:"string" required:"true"` // A list of certificate thumbprints that are associated with the specified @@ -12685,6 +16085,10 @@ type UpdateSAMLProviderInput struct { SAMLMetadataDocument *string `min:"1000" type:"string" required:"true"` // The Amazon Resource Name (ARN) of the SAML provider to update. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. SAMLProviderArn *string `min:"20" type:"string" required:"true"` } @@ -12742,6 +16146,10 @@ type UpdateSSHPublicKeyInput struct { _ struct{} `type:"structure"` // The unique identifier for the SSH public key. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters that can consist of any upper or lowercased letter + // or digit. SSHPublicKeyId *string `min:"20" type:"string" required:"true"` // The status to assign to the SSH public key. Active means the key can be used @@ -12750,6 +16158,10 @@ type UpdateSSHPublicKeyInput struct { Status *string `type:"string" required:"true" enum:"statusType"` // The name of the IAM user associated with the SSH public key. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string" required:"true"` } @@ -12807,14 +16219,28 @@ type UpdateServerCertificateInput struct { // The new path for the server certificate. Include this only if you are updating // the server certificate's path. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes, containing any + // ASCII character from the ! (\u0021) thru the DEL character (\u007F), including + // most punctuation characters, digits, and upper and lowercased letters. NewPath *string `min:"1" type:"string"` // The new name for the server certificate. Include this only if you are updating // the server certificate's name. The name of the certificate cannot contain // any spaces. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- NewServerCertificateName *string `min:"1" type:"string"` // The name of the server certificate that you want to update. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- ServerCertificateName *string `min:"1" type:"string" required:"true"` } @@ -12868,6 +16294,10 @@ type UpdateSigningCertificateInput struct { _ struct{} `type:"structure"` // The ID of the signing certificate you want to update. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters that can consist of any upper or lowercased letter + // or digit. CertificateId *string `min:"24" type:"string" required:"true"` // The status you want to assign to the certificate. Active means the certificate @@ -12875,7 +16305,11 @@ type UpdateSigningCertificateInput struct { // be used. Status *string `type:"string" required:"true" enum:"statusType"` - // The name of the user the signing certificate belongs to. + // The name of the IAM user the signing certificate belongs to. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string"` } @@ -12928,16 +16362,30 @@ func (s UpdateSigningCertificateOutput) GoString() string { type UpdateUserInput struct { _ struct{} `type:"structure"` - // New path for the user. Include this parameter only if you're changing the - // user's path. + // New path for the IAM user. Include this parameter only if you're changing + // the user's path. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes, containing any + // ASCII character from the ! (\u0021) thru the DEL character (\u007F), including + // most punctuation characters, digits, and upper and lowercased letters. NewPath *string `min:"1" type:"string"` // New name for the user. Include this parameter only if you're changing the // user's name. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- NewUserName *string `min:"1" type:"string"` // Name of the user to update. If you're changing the name of the user, this // is the original user name. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string" required:"true"` } @@ -12992,9 +16440,19 @@ type UploadSSHPublicKeyInput struct { // The SSH public key. The public key must be encoded in ssh-rsa format or PEM // format. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). SSHPublicKeyBody *string `min:"1" type:"string" required:"true"` // The name of the IAM user to associate the SSH public key with. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string" required:"true"` } @@ -13052,30 +16510,56 @@ type UploadServerCertificateInput struct { _ struct{} `type:"structure"` // The contents of the public key certificate in PEM-encoded format. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). CertificateBody *string `min:"1" type:"string" required:"true"` // The contents of the certificate chain. This is typically a concatenation // of the PEM-encoded public key certificates of the chain. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). CertificateChain *string `min:"1" type:"string"` // The path for the server certificate. For more information about paths, see // IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) - // in the Using IAM guide. + // in the IAM User Guide. // // This parameter is optional. If it is not included, it defaults to a slash - // (/). + // (/). The regex pattern (http://wikipedia.org/wiki/regex) for this parameter + // is a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes, containing any + // ASCII character from the ! (\u0021) thru the DEL character (\u007F), including + // most punctuation characters, digits, and upper and lowercased letters. // - // If you are uploading a server certificate specifically for use with Amazon + // If you are uploading a server certificate specifically for use with Amazon // CloudFront distributions, you must specify a path using the --path option. // The path must begin with /cloudfront and must include a trailing slash (for // example, /cloudfront/test/). Path *string `min:"1" type:"string"` // The contents of the private key in PEM-encoded format. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). PrivateKey *string `min:"1" type:"string" required:"true"` // The name for the server certificate. Do not include the path in this value. // The name of the certificate cannot contain any spaces. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- ServerCertificateName *string `min:"1" type:"string" required:"true"` } @@ -13146,9 +16630,19 @@ type UploadSigningCertificateInput struct { _ struct{} `type:"structure"` // The contents of the signing certificate. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of any printable ASCII character ranging + // from the space character (\u0020) through end of the ASCII character range + // (\u00FF). It also includes the special characters tab (\u0009), line feed + // (\u000A), and carriage return (\u000D). CertificateBody *string `min:"1" type:"string" required:"true"` // The name of the user the signing certificate is for. + // + // The regex pattern (http://wikipedia.org/wiki/regex) for this parameter is + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- UserName *string `min:"1" type:"string"` } @@ -13201,7 +16695,7 @@ func (s UploadSigningCertificateOutput) GoString() string { // Contains information about an IAM user entity. // -// This data type is used as a response element in the following actions: +// This data type is used as a response element in the following actions: // // CreateUser // @@ -13340,7 +16834,7 @@ type VirtualMFADevice struct { // Contains information about an IAM user entity. // - // This data type is used as a response element in the following actions: + // This data type is used as a response element in the following actions: // // CreateUser // diff --git a/vendor/github.com/aws/aws-sdk-go/service/iam/service.go b/vendor/github.com/aws/aws-sdk-go/service/iam/service.go index e4416226b..362916f98 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/iam/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/iam/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/query" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // AWS Identity and Access Management (IAM) is a web service that you can use @@ -17,17 +17,19 @@ import ( // information about IAM, see AWS Identity and Access Management (IAM) (http://aws.amazon.com/iam/). // For the user guide for IAM, see Using IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/). // -// AWS provides SDKs that consist of libraries and sample code for various +// AWS provides SDKs that consist of libraries and sample code for various // programming languages and platforms (Java, Ruby, .NET, iOS, Android, etc.). // The SDKs provide a convenient way to create programmatic access to IAM and // AWS. For example, the SDKs take care of tasks such as cryptographically signing // requests (see below), managing errors, and retrying requests automatically. // For information about the AWS SDKs, including how to download and install // them, see the Tools for Amazon Web Services (http://aws.amazon.com/tools/) -// page. We recommend that you use the AWS SDKs to make programmatic API calls -// to IAM. However, you can also use the IAM Query API to make direct calls -// to the IAM web service. To learn more about the IAM Query API, see Making -// Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// page. +// +// We recommend that you use the AWS SDKs to make programmatic API calls to +// IAM. However, you can also use the IAM Query API to make direct calls to +// the IAM web service. To learn more about the IAM Query API, see Making Query +// Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) // in the Using IAM guide. IAM supports GET and POST requests for all actions. // That is, the API does not require you to use GET for some actions and POST // for others. However, GET requests are subject to the limitation size of a @@ -52,11 +54,15 @@ import ( // // For more information, see the following: // -// AWS Security Credentials (http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html). +// AWS Security Credentials (http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html). // This topic provides general information about the types of credentials used -// for accessing AWS. IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAMBestPractices.html). +// for accessing AWS. +// +// IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAMBestPractices.html). // This topic presents a list of suggestions for using the IAM service to help -// secure your AWS resources. Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html). +// secure your AWS resources. +// +// Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html). // This set of topics walk you through the process of signing a request using // an access key ID and secret access key. //The service client's operations are safe to be used concurrently. @@ -105,7 +111,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(query.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go b/vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go index db65419d4..aa5d71858 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesis/api.go @@ -15,7 +15,28 @@ import ( const opAddTagsToStream = "AddTagsToStream" -// AddTagsToStreamRequest generates a request for the AddTagsToStream operation. +// AddTagsToStreamRequest generates a "aws/request.Request" representing the +// client's request for the AddTagsToStream operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddTagsToStream method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddTagsToStreamRequest method. +// req, resp := client.AddTagsToStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Kinesis) AddTagsToStreamRequest(input *AddTagsToStreamInput) (req *request.Request, output *AddTagsToStreamOutput) { op := &request.Operation{ Name: opAddTagsToStream, @@ -48,7 +69,28 @@ func (c *Kinesis) AddTagsToStream(input *AddTagsToStreamInput) (*AddTagsToStream const opCreateStream = "CreateStream" -// CreateStreamRequest generates a request for the CreateStream operation. +// CreateStreamRequest generates a "aws/request.Request" representing the +// client's request for the CreateStream operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateStream method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateStreamRequest method. +// req, resp := client.CreateStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Kinesis) CreateStreamRequest(input *CreateStreamInput) (req *request.Request, output *CreateStreamOutput) { op := &request.Operation{ Name: opCreateStream, @@ -112,7 +154,28 @@ func (c *Kinesis) CreateStream(input *CreateStreamInput) (*CreateStreamOutput, e const opDecreaseStreamRetentionPeriod = "DecreaseStreamRetentionPeriod" -// DecreaseStreamRetentionPeriodRequest generates a request for the DecreaseStreamRetentionPeriod operation. +// DecreaseStreamRetentionPeriodRequest generates a "aws/request.Request" representing the +// client's request for the DecreaseStreamRetentionPeriod operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DecreaseStreamRetentionPeriod method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DecreaseStreamRetentionPeriodRequest method. +// req, resp := client.DecreaseStreamRetentionPeriodRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Kinesis) DecreaseStreamRetentionPeriodRequest(input *DecreaseStreamRetentionPeriodInput) (req *request.Request, output *DecreaseStreamRetentionPeriodOutput) { op := &request.Operation{ Name: opDecreaseStreamRetentionPeriod, @@ -147,7 +210,28 @@ func (c *Kinesis) DecreaseStreamRetentionPeriod(input *DecreaseStreamRetentionPe const opDeleteStream = "DeleteStream" -// DeleteStreamRequest generates a request for the DeleteStream operation. +// DeleteStreamRequest generates a "aws/request.Request" representing the +// client's request for the DeleteStream operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteStream method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteStreamRequest method. +// req, resp := client.DeleteStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Kinesis) DeleteStreamRequest(input *DeleteStreamInput) (req *request.Request, output *DeleteStreamOutput) { op := &request.Operation{ Name: opDeleteStream, @@ -195,7 +279,28 @@ func (c *Kinesis) DeleteStream(input *DeleteStreamInput) (*DeleteStreamOutput, e const opDescribeStream = "DescribeStream" -// DescribeStreamRequest generates a request for the DescribeStream operation. +// DescribeStreamRequest generates a "aws/request.Request" representing the +// client's request for the DescribeStream operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeStream method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeStreamRequest method. +// req, resp := client.DescribeStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Kinesis) DescribeStreamRequest(input *DescribeStreamInput) (req *request.Request, output *DescribeStreamOutput) { op := &request.Operation{ Name: opDescribeStream, @@ -251,6 +356,23 @@ func (c *Kinesis) DescribeStream(input *DescribeStreamInput) (*DescribeStreamOut return out, err } +// DescribeStreamPages iterates over the pages of a DescribeStream operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeStream method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeStream operation. +// pageNum := 0 +// err := client.DescribeStreamPages(params, +// func(page *DescribeStreamOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *Kinesis) DescribeStreamPages(input *DescribeStreamInput, fn func(p *DescribeStreamOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeStreamRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -261,7 +383,28 @@ func (c *Kinesis) DescribeStreamPages(input *DescribeStreamInput, fn func(p *Des const opDisableEnhancedMonitoring = "DisableEnhancedMonitoring" -// DisableEnhancedMonitoringRequest generates a request for the DisableEnhancedMonitoring operation. +// DisableEnhancedMonitoringRequest generates a "aws/request.Request" representing the +// client's request for the DisableEnhancedMonitoring operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableEnhancedMonitoring method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableEnhancedMonitoringRequest method. +// req, resp := client.DisableEnhancedMonitoringRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Kinesis) DisableEnhancedMonitoringRequest(input *DisableEnhancedMonitoringInput) (req *request.Request, output *EnhancedMonitoringOutput) { op := &request.Operation{ Name: opDisableEnhancedMonitoring, @@ -288,7 +431,28 @@ func (c *Kinesis) DisableEnhancedMonitoring(input *DisableEnhancedMonitoringInpu const opEnableEnhancedMonitoring = "EnableEnhancedMonitoring" -// EnableEnhancedMonitoringRequest generates a request for the EnableEnhancedMonitoring operation. +// EnableEnhancedMonitoringRequest generates a "aws/request.Request" representing the +// client's request for the EnableEnhancedMonitoring operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableEnhancedMonitoring method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableEnhancedMonitoringRequest method. +// req, resp := client.EnableEnhancedMonitoringRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Kinesis) EnableEnhancedMonitoringRequest(input *EnableEnhancedMonitoringInput) (req *request.Request, output *EnhancedMonitoringOutput) { op := &request.Operation{ Name: opEnableEnhancedMonitoring, @@ -315,7 +479,28 @@ func (c *Kinesis) EnableEnhancedMonitoring(input *EnableEnhancedMonitoringInput) const opGetRecords = "GetRecords" -// GetRecordsRequest generates a request for the GetRecords operation. +// GetRecordsRequest generates a "aws/request.Request" representing the +// client's request for the GetRecords operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetRecords method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetRecordsRequest method. +// req, resp := client.GetRecordsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Kinesis) GetRecordsRequest(input *GetRecordsInput) (req *request.Request, output *GetRecordsOutput) { op := &request.Operation{ Name: opGetRecords, @@ -394,7 +579,28 @@ func (c *Kinesis) GetRecords(input *GetRecordsInput) (*GetRecordsOutput, error) const opGetShardIterator = "GetShardIterator" -// GetShardIteratorRequest generates a request for the GetShardIterator operation. +// GetShardIteratorRequest generates a "aws/request.Request" representing the +// client's request for the GetShardIterator operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetShardIterator method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetShardIteratorRequest method. +// req, resp := client.GetShardIteratorRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Kinesis) GetShardIteratorRequest(input *GetShardIteratorInput) (req *request.Request, output *GetShardIteratorOutput) { op := &request.Operation{ Name: opGetShardIterator, @@ -458,7 +664,28 @@ func (c *Kinesis) GetShardIterator(input *GetShardIteratorInput) (*GetShardItera const opIncreaseStreamRetentionPeriod = "IncreaseStreamRetentionPeriod" -// IncreaseStreamRetentionPeriodRequest generates a request for the IncreaseStreamRetentionPeriod operation. +// IncreaseStreamRetentionPeriodRequest generates a "aws/request.Request" representing the +// client's request for the IncreaseStreamRetentionPeriod operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the IncreaseStreamRetentionPeriod method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the IncreaseStreamRetentionPeriodRequest method. +// req, resp := client.IncreaseStreamRetentionPeriodRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Kinesis) IncreaseStreamRetentionPeriodRequest(input *IncreaseStreamRetentionPeriodInput) (req *request.Request, output *IncreaseStreamRetentionPeriodOutput) { op := &request.Operation{ Name: opIncreaseStreamRetentionPeriod, @@ -497,7 +724,28 @@ func (c *Kinesis) IncreaseStreamRetentionPeriod(input *IncreaseStreamRetentionPe const opListStreams = "ListStreams" -// ListStreamsRequest generates a request for the ListStreams operation. +// ListStreamsRequest generates a "aws/request.Request" representing the +// client's request for the ListStreams operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListStreams method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListStreamsRequest method. +// req, resp := client.ListStreamsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Kinesis) ListStreamsRequest(input *ListStreamsInput) (req *request.Request, output *ListStreamsOutput) { op := &request.Operation{ Name: opListStreams, @@ -543,6 +791,23 @@ func (c *Kinesis) ListStreams(input *ListStreamsInput) (*ListStreamsOutput, erro return out, err } +// ListStreamsPages iterates over the pages of a ListStreams operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListStreams method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListStreams operation. +// pageNum := 0 +// err := client.ListStreamsPages(params, +// func(page *ListStreamsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *Kinesis) ListStreamsPages(input *ListStreamsInput, fn func(p *ListStreamsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListStreamsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -553,7 +818,28 @@ func (c *Kinesis) ListStreamsPages(input *ListStreamsInput, fn func(p *ListStrea const opListTagsForStream = "ListTagsForStream" -// ListTagsForStreamRequest generates a request for the ListTagsForStream operation. +// ListTagsForStreamRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForStream operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTagsForStream method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTagsForStreamRequest method. +// req, resp := client.ListTagsForStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Kinesis) ListTagsForStreamRequest(input *ListTagsForStreamInput) (req *request.Request, output *ListTagsForStreamOutput) { op := &request.Operation{ Name: opListTagsForStream, @@ -580,7 +866,28 @@ func (c *Kinesis) ListTagsForStream(input *ListTagsForStreamInput) (*ListTagsFor const opMergeShards = "MergeShards" -// MergeShardsRequest generates a request for the MergeShards operation. +// MergeShardsRequest generates a "aws/request.Request" representing the +// client's request for the MergeShards operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the MergeShards method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the MergeShardsRequest method. +// req, resp := client.MergeShardsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Kinesis) MergeShardsRequest(input *MergeShardsInput) (req *request.Request, output *MergeShardsOutput) { op := &request.Operation{ Name: opMergeShards, @@ -644,7 +951,28 @@ func (c *Kinesis) MergeShards(input *MergeShardsInput) (*MergeShardsOutput, erro const opPutRecord = "PutRecord" -// PutRecordRequest generates a request for the PutRecord operation. +// PutRecordRequest generates a "aws/request.Request" representing the +// client's request for the PutRecord operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutRecord method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutRecordRequest method. +// req, resp := client.PutRecordRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Kinesis) PutRecordRequest(input *PutRecordInput) (req *request.Request, output *PutRecordOutput) { op := &request.Operation{ Name: opPutRecord, @@ -708,7 +1036,28 @@ func (c *Kinesis) PutRecord(input *PutRecordInput) (*PutRecordOutput, error) { const opPutRecords = "PutRecords" -// PutRecordsRequest generates a request for the PutRecords operation. +// PutRecordsRequest generates a "aws/request.Request" representing the +// client's request for the PutRecords operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutRecords method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutRecordsRequest method. +// req, resp := client.PutRecordsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Kinesis) PutRecordsRequest(input *PutRecordsInput) (req *request.Request, output *PutRecordsOutput) { op := &request.Operation{ Name: opPutRecords, @@ -794,7 +1143,28 @@ func (c *Kinesis) PutRecords(input *PutRecordsInput) (*PutRecordsOutput, error) const opRemoveTagsFromStream = "RemoveTagsFromStream" -// RemoveTagsFromStreamRequest generates a request for the RemoveTagsFromStream operation. +// RemoveTagsFromStreamRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTagsFromStream operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveTagsFromStream method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveTagsFromStreamRequest method. +// req, resp := client.RemoveTagsFromStreamRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Kinesis) RemoveTagsFromStreamRequest(input *RemoveTagsFromStreamInput) (req *request.Request, output *RemoveTagsFromStreamOutput) { op := &request.Operation{ Name: opRemoveTagsFromStream, @@ -826,7 +1196,28 @@ func (c *Kinesis) RemoveTagsFromStream(input *RemoveTagsFromStreamInput) (*Remov const opSplitShard = "SplitShard" -// SplitShardRequest generates a request for the SplitShard operation. +// SplitShardRequest generates a "aws/request.Request" representing the +// client's request for the SplitShard operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SplitShard method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SplitShardRequest method. +// req, resp := client.SplitShardRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Kinesis) SplitShardRequest(input *SplitShardInput) (req *request.Request, output *SplitShardOutput) { op := &request.Operation{ Name: opSplitShard, diff --git a/vendor/github.com/aws/aws-sdk-go/service/kinesis/service.go b/vendor/github.com/aws/aws-sdk-go/service/kinesis/service.go index 687b7466b..e2fe21e77 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kinesis/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kinesis/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // Amazon Kinesis Streams is a managed service that scales elastically for real @@ -61,7 +61,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/api.go b/vendor/github.com/aws/aws-sdk-go/service/kms/api.go index 3caa3a83f..18055b42d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kms/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kms/api.go @@ -14,7 +14,28 @@ import ( const opCancelKeyDeletion = "CancelKeyDeletion" -// CancelKeyDeletionRequest generates a request for the CancelKeyDeletion operation. +// CancelKeyDeletionRequest generates a "aws/request.Request" representing the +// client's request for the CancelKeyDeletion operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CancelKeyDeletion method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CancelKeyDeletionRequest method. +// req, resp := client.CancelKeyDeletionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *KMS) CancelKeyDeletionRequest(input *CancelKeyDeletionInput) (req *request.Request, output *CancelKeyDeletionOutput) { op := &request.Operation{ Name: opCancelKeyDeletion, @@ -36,8 +57,8 @@ func (c *KMS) CancelKeyDeletionRequest(input *CancelKeyDeletionInput) (req *requ // is successful, the CMK is set to the Disabled state. To enable a CMK, use // EnableKey. // -// For more information about scheduling and canceling deletion of a CMK, go -// to Deleting Customer Master Keys (http://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html) +// For more information about scheduling and canceling deletion of a CMK, see +// Deleting Customer Master Keys (http://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html) // in the AWS Key Management Service Developer Guide. func (c *KMS) CancelKeyDeletion(input *CancelKeyDeletionInput) (*CancelKeyDeletionOutput, error) { req, out := c.CancelKeyDeletionRequest(input) @@ -47,7 +68,28 @@ func (c *KMS) CancelKeyDeletion(input *CancelKeyDeletionInput) (*CancelKeyDeleti const opCreateAlias = "CreateAlias" -// CreateAliasRequest generates a request for the CreateAlias operation. +// CreateAliasRequest generates a "aws/request.Request" representing the +// client's request for the CreateAlias operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateAlias method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateAliasRequest method. +// req, resp := client.CreateAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *KMS) CreateAliasRequest(input *CreateAliasInput) (req *request.Request, output *CreateAliasOutput) { op := &request.Operation{ Name: opCreateAlias, @@ -87,7 +129,28 @@ func (c *KMS) CreateAlias(input *CreateAliasInput) (*CreateAliasOutput, error) { const opCreateGrant = "CreateGrant" -// CreateGrantRequest generates a request for the CreateGrant operation. +// CreateGrantRequest generates a "aws/request.Request" representing the +// client's request for the CreateGrant operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateGrant method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateGrantRequest method. +// req, resp := client.CreateGrantRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *KMS) CreateGrantRequest(input *CreateGrantInput) (req *request.Request, output *CreateGrantOutput) { op := &request.Operation{ Name: opCreateGrant, @@ -118,7 +181,28 @@ func (c *KMS) CreateGrant(input *CreateGrantInput) (*CreateGrantOutput, error) { const opCreateKey = "CreateKey" -// CreateKeyRequest generates a request for the CreateKey operation. +// CreateKeyRequest generates a "aws/request.Request" representing the +// client's request for the CreateKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateKeyRequest method. +// req, resp := client.CreateKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *KMS) CreateKeyRequest(input *CreateKeyInput) (req *request.Request, output *CreateKeyOutput) { op := &request.Operation{ Name: opCreateKey, @@ -136,10 +220,17 @@ func (c *KMS) CreateKeyRequest(input *CreateKeyInput) (req *request.Request, out return } -// Creates a customer master key. Customer master keys can be used to encrypt -// small amounts of data (less than 4K) directly, but they are most commonly -// used to encrypt or envelope data keys that are then used to encrypt customer -// data. For more information about data keys, see GenerateDataKey and GenerateDataKeyWithoutPlaintext. +// Creates a customer master key (CMK). +// +// You can use a CMK to encrypt small amounts of data (4 KiB or less) directly, +// but CMKs are more commonly used to encrypt data encryption keys (DEKs), which +// are used to encrypt raw data. For more information about DEKs and the difference +// between CMKs and DEKs, see the following: +// +// The GenerateDataKey operation +// +// AWS Key Management Service Concepts (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html) +// in the AWS Key Management Service Developer Guide func (c *KMS) CreateKey(input *CreateKeyInput) (*CreateKeyOutput, error) { req, out := c.CreateKeyRequest(input) err := req.Send() @@ -148,7 +239,28 @@ func (c *KMS) CreateKey(input *CreateKeyInput) (*CreateKeyOutput, error) { const opDecrypt = "Decrypt" -// DecryptRequest generates a request for the Decrypt operation. +// DecryptRequest generates a "aws/request.Request" representing the +// client's request for the Decrypt operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the Decrypt method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DecryptRequest method. +// req, resp := client.DecryptRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *KMS) DecryptRequest(input *DecryptInput) (req *request.Request, output *DecryptOutput) { op := &request.Operation{ Name: opDecrypt, @@ -167,17 +279,22 @@ func (c *KMS) DecryptRequest(input *DecryptInput) (req *request.Request, output } // Decrypts ciphertext. Ciphertext is plaintext that has been previously encrypted -// by using any of the following functions: GenerateDataKey GenerateDataKeyWithoutPlaintext -// Encrypt +// by using any of the following functions: // -// Note that if a caller has been granted access permissions to all keys (through, -// for example, IAM user policies that grant Decrypt permission on all resources), -// then ciphertext encrypted by using keys in other accounts where the key grants -// access to the caller can be decrypted. To remedy this, we recommend that -// you do not grant Decrypt access in an IAM user policy. Instead grant Decrypt -// access only in key policies. If you must grant Decrypt access in an IAM user -// policy, you should scope the resource to specific keys or to specific trusted -// accounts. +// GenerateDataKey +// +// GenerateDataKeyWithoutPlaintext +// +// Encrypt +// +// Note that if a caller has been granted access permissions to all keys +// (through, for example, IAM user policies that grant Decrypt permission on +// all resources), then ciphertext encrypted by using keys in other accounts +// where the key grants access to the caller can be decrypted. To remedy this, +// we recommend that you do not grant Decrypt access in an IAM user policy. +// Instead grant Decrypt access only in key policies. If you must grant Decrypt +// access in an IAM user policy, you should scope the resource to specific keys +// or to specific trusted accounts. func (c *KMS) Decrypt(input *DecryptInput) (*DecryptOutput, error) { req, out := c.DecryptRequest(input) err := req.Send() @@ -186,7 +303,28 @@ func (c *KMS) Decrypt(input *DecryptInput) (*DecryptOutput, error) { const opDeleteAlias = "DeleteAlias" -// DeleteAliasRequest generates a request for the DeleteAlias operation. +// DeleteAliasRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAlias operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteAlias method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteAliasRequest method. +// req, resp := client.DeleteAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *KMS) DeleteAliasRequest(input *DeleteAliasInput) (req *request.Request, output *DeleteAliasOutput) { op := &request.Operation{ Name: opDeleteAlias, @@ -215,7 +353,28 @@ func (c *KMS) DeleteAlias(input *DeleteAliasInput) (*DeleteAliasOutput, error) { const opDescribeKey = "DescribeKey" -// DescribeKeyRequest generates a request for the DescribeKey operation. +// DescribeKeyRequest generates a "aws/request.Request" representing the +// client's request for the DescribeKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeKeyRequest method. +// req, resp := client.DescribeKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *KMS) DescribeKeyRequest(input *DescribeKeyInput) (req *request.Request, output *DescribeKeyOutput) { op := &request.Operation{ Name: opDescribeKey, @@ -242,7 +401,28 @@ func (c *KMS) DescribeKey(input *DescribeKeyInput) (*DescribeKeyOutput, error) { const opDisableKey = "DisableKey" -// DisableKeyRequest generates a request for the DisableKey operation. +// DisableKeyRequest generates a "aws/request.Request" representing the +// client's request for the DisableKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableKeyRequest method. +// req, resp := client.DisableKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *KMS) DisableKeyRequest(input *DisableKeyInput) (req *request.Request, output *DisableKeyOutput) { op := &request.Operation{ Name: opDisableKey, @@ -262,9 +442,9 @@ func (c *KMS) DisableKeyRequest(input *DisableKeyInput) (req *request.Request, o return } -// Sets the state of a master key to disabled, thereby preventing its use for -// cryptographic operations. For more information about how key state affects -// the use of a master key, go to How Key State Affects the Use of a Customer +// Sets the state of a customer master key (CMK) to disabled, thereby preventing +// its use for cryptographic operations. For more information about how key +// state affects the use of a CMK, see How Key State Affects the Use of a Customer // Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) // in the AWS Key Management Service Developer Guide. func (c *KMS) DisableKey(input *DisableKeyInput) (*DisableKeyOutput, error) { @@ -275,7 +455,28 @@ func (c *KMS) DisableKey(input *DisableKeyInput) (*DisableKeyOutput, error) { const opDisableKeyRotation = "DisableKeyRotation" -// DisableKeyRotationRequest generates a request for the DisableKeyRotation operation. +// DisableKeyRotationRequest generates a "aws/request.Request" representing the +// client's request for the DisableKeyRotation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableKeyRotation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableKeyRotationRequest method. +// req, resp := client.DisableKeyRotationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *KMS) DisableKeyRotationRequest(input *DisableKeyRotationInput) (req *request.Request, output *DisableKeyRotationOutput) { op := &request.Operation{ Name: opDisableKeyRotation, @@ -304,7 +505,28 @@ func (c *KMS) DisableKeyRotation(input *DisableKeyRotationInput) (*DisableKeyRot const opEnableKey = "EnableKey" -// EnableKeyRequest generates a request for the EnableKey operation. +// EnableKeyRequest generates a "aws/request.Request" representing the +// client's request for the EnableKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableKeyRequest method. +// req, resp := client.EnableKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *KMS) EnableKeyRequest(input *EnableKeyInput) (req *request.Request, output *EnableKeyOutput) { op := &request.Operation{ Name: opEnableKey, @@ -333,7 +555,28 @@ func (c *KMS) EnableKey(input *EnableKeyInput) (*EnableKeyOutput, error) { const opEnableKeyRotation = "EnableKeyRotation" -// EnableKeyRotationRequest generates a request for the EnableKeyRotation operation. +// EnableKeyRotationRequest generates a "aws/request.Request" representing the +// client's request for the EnableKeyRotation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableKeyRotation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableKeyRotationRequest method. +// req, resp := client.EnableKeyRotationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *KMS) EnableKeyRotationRequest(input *EnableKeyRotationInput) (req *request.Request, output *EnableKeyRotationOutput) { op := &request.Operation{ Name: opEnableKeyRotation, @@ -362,7 +605,28 @@ func (c *KMS) EnableKeyRotation(input *EnableKeyRotationInput) (*EnableKeyRotati const opEncrypt = "Encrypt" -// EncryptRequest generates a request for the Encrypt operation. +// EncryptRequest generates a "aws/request.Request" representing the +// client's request for the Encrypt operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the Encrypt method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EncryptRequest method. +// req, resp := client.EncryptRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *KMS) EncryptRequest(input *EncryptInput) (req *request.Request, output *EncryptOutput) { op := &request.Operation{ Name: opEncrypt, @@ -381,15 +645,18 @@ func (c *KMS) EncryptRequest(input *EncryptInput) (req *request.Request, output } // Encrypts plaintext into ciphertext by using a customer master key. The Encrypt -// function has two primary use cases: You can encrypt up to 4 KB of arbitrary -// data such as an RSA key, a database password, or other sensitive customer -// information. If you are moving encrypted data from one region to another, -// you can use this API to encrypt in the new region the plaintext data key -// that was used to encrypt the data in the original region. This provides you -// with an encrypted copy of the data key that can be decrypted in the new region -// and used there to decrypt the encrypted data. +// function has two primary use cases: // -// Unless you are moving encrypted data from one region to another, you don't +// You can encrypt up to 4 KB of arbitrary data such as an RSA key, a database +// password, or other sensitive customer information. +// +// If you are moving encrypted data from one region to another, you can use +// this API to encrypt in the new region the plaintext data key that was used +// to encrypt the data in the original region. This provides you with an encrypted +// copy of the data key that can be decrypted in the new region and used there +// to decrypt the encrypted data. +// +// Unless you are moving encrypted data from one region to another, you don't // use this function to encrypt a generated data key within a region. You retrieve // data keys already encrypted by calling the GenerateDataKey or GenerateDataKeyWithoutPlaintext // function. Data keys don't need to be encrypted again by calling Encrypt. @@ -405,7 +672,28 @@ func (c *KMS) Encrypt(input *EncryptInput) (*EncryptOutput, error) { const opGenerateDataKey = "GenerateDataKey" -// GenerateDataKeyRequest generates a request for the GenerateDataKey operation. +// GenerateDataKeyRequest generates a "aws/request.Request" representing the +// client's request for the GenerateDataKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GenerateDataKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GenerateDataKeyRequest method. +// req, resp := client.GenerateDataKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *KMS) GenerateDataKeyRequest(input *GenerateDataKeyInput) (req *request.Request, output *GenerateDataKeyOutput) { op := &request.Operation{ Name: opGenerateDataKey, @@ -435,16 +723,18 @@ func (c *KMS) GenerateDataKeyRequest(input *GenerateDataKeyInput) (req *request. // memory. Store the encrypted data key (contained in the CiphertextBlob field) // alongside of the locally encrypted data. // -// You should not call the Encrypt function to re-encrypt your data keys within +// You should not call the Encrypt function to re-encrypt your data keys within // a region. GenerateDataKey always returns the data key encrypted and tied // to the customer master key that will be used to decrypt it. There is no need -// to decrypt it twice. If you decide to use the optional EncryptionContext -// parameter, you must also store the context in full or at least store enough -// information along with the encrypted data to be able to reconstruct the context -// when submitting the ciphertext to the Decrypt API. It is a good practice -// to choose a context that you can reconstruct on the fly to better secure -// the ciphertext. For more information about how this parameter is used, see -// Encryption Context (http://docs.aws.amazon.com/kms/latest/developerguide/encrypt-context.html). +// to decrypt it twice. +// +// If you decide to use the optional EncryptionContext parameter, you must +// also store the context in full or at least store enough information along +// with the encrypted data to be able to reconstruct the context when submitting +// the ciphertext to the Decrypt API. It is a good practice to choose a context +// that you can reconstruct on the fly to better secure the ciphertext. For +// more information about how this parameter is used, see Encryption Context +// (http://docs.aws.amazon.com/kms/latest/developerguide/encrypt-context.html). // // To decrypt data, pass the encrypted data key to the Decrypt API. Decrypt // uses the associated master key to decrypt the encrypted data key and returns @@ -461,7 +751,28 @@ func (c *KMS) GenerateDataKey(input *GenerateDataKeyInput) (*GenerateDataKeyOutp const opGenerateDataKeyWithoutPlaintext = "GenerateDataKeyWithoutPlaintext" -// GenerateDataKeyWithoutPlaintextRequest generates a request for the GenerateDataKeyWithoutPlaintext operation. +// GenerateDataKeyWithoutPlaintextRequest generates a "aws/request.Request" representing the +// client's request for the GenerateDataKeyWithoutPlaintext operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GenerateDataKeyWithoutPlaintext method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GenerateDataKeyWithoutPlaintextRequest method. +// req, resp := client.GenerateDataKeyWithoutPlaintextRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *KMS) GenerateDataKeyWithoutPlaintextRequest(input *GenerateDataKeyWithoutPlaintextInput) (req *request.Request, output *GenerateDataKeyWithoutPlaintextOutput) { op := &request.Operation{ Name: opGenerateDataKeyWithoutPlaintext, @@ -492,7 +803,28 @@ func (c *KMS) GenerateDataKeyWithoutPlaintext(input *GenerateDataKeyWithoutPlain const opGenerateRandom = "GenerateRandom" -// GenerateRandomRequest generates a request for the GenerateRandom operation. +// GenerateRandomRequest generates a "aws/request.Request" representing the +// client's request for the GenerateRandom operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GenerateRandom method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GenerateRandomRequest method. +// req, resp := client.GenerateRandomRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *KMS) GenerateRandomRequest(input *GenerateRandomInput) (req *request.Request, output *GenerateRandomOutput) { op := &request.Operation{ Name: opGenerateRandom, @@ -519,7 +851,28 @@ func (c *KMS) GenerateRandom(input *GenerateRandomInput) (*GenerateRandomOutput, const opGetKeyPolicy = "GetKeyPolicy" -// GetKeyPolicyRequest generates a request for the GetKeyPolicy operation. +// GetKeyPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetKeyPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetKeyPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetKeyPolicyRequest method. +// req, resp := client.GetKeyPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *KMS) GetKeyPolicyRequest(input *GetKeyPolicyInput) (req *request.Request, output *GetKeyPolicyOutput) { op := &request.Operation{ Name: opGetKeyPolicy, @@ -546,7 +899,28 @@ func (c *KMS) GetKeyPolicy(input *GetKeyPolicyInput) (*GetKeyPolicyOutput, error const opGetKeyRotationStatus = "GetKeyRotationStatus" -// GetKeyRotationStatusRequest generates a request for the GetKeyRotationStatus operation. +// GetKeyRotationStatusRequest generates a "aws/request.Request" representing the +// client's request for the GetKeyRotationStatus operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetKeyRotationStatus method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetKeyRotationStatusRequest method. +// req, resp := client.GetKeyRotationStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *KMS) GetKeyRotationStatusRequest(input *GetKeyRotationStatusInput) (req *request.Request, output *GetKeyRotationStatusOutput) { op := &request.Operation{ Name: opGetKeyRotationStatus, @@ -574,7 +948,28 @@ func (c *KMS) GetKeyRotationStatus(input *GetKeyRotationStatusInput) (*GetKeyRot const opListAliases = "ListAliases" -// ListAliasesRequest generates a request for the ListAliases operation. +// ListAliasesRequest generates a "aws/request.Request" representing the +// client's request for the ListAliases operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListAliases method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListAliasesRequest method. +// req, resp := client.ListAliasesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *KMS) ListAliasesRequest(input *ListAliasesInput) (req *request.Request, output *ListAliasesOutput) { op := &request.Operation{ Name: opListAliases, @@ -605,6 +1000,23 @@ func (c *KMS) ListAliases(input *ListAliasesInput) (*ListAliasesOutput, error) { return out, err } +// ListAliasesPages iterates over the pages of a ListAliases operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListAliases method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListAliases operation. +// pageNum := 0 +// err := client.ListAliasesPages(params, +// func(page *ListAliasesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *KMS) ListAliasesPages(input *ListAliasesInput, fn func(p *ListAliasesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListAliasesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -615,7 +1027,28 @@ func (c *KMS) ListAliasesPages(input *ListAliasesInput, fn func(p *ListAliasesOu const opListGrants = "ListGrants" -// ListGrantsRequest generates a request for the ListGrants operation. +// ListGrantsRequest generates a "aws/request.Request" representing the +// client's request for the ListGrants operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListGrants method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListGrantsRequest method. +// req, resp := client.ListGrantsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *KMS) ListGrantsRequest(input *ListGrantsInput) (req *request.Request, output *ListGrantsResponse) { op := &request.Operation{ Name: opListGrants, @@ -646,6 +1079,23 @@ func (c *KMS) ListGrants(input *ListGrantsInput) (*ListGrantsResponse, error) { return out, err } +// ListGrantsPages iterates over the pages of a ListGrants operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListGrants method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListGrants operation. +// pageNum := 0 +// err := client.ListGrantsPages(params, +// func(page *ListGrantsResponse, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *KMS) ListGrantsPages(input *ListGrantsInput, fn func(p *ListGrantsResponse, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListGrantsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -656,7 +1106,28 @@ func (c *KMS) ListGrantsPages(input *ListGrantsInput, fn func(p *ListGrantsRespo const opListKeyPolicies = "ListKeyPolicies" -// ListKeyPoliciesRequest generates a request for the ListKeyPolicies operation. +// ListKeyPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListKeyPolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListKeyPolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListKeyPoliciesRequest method. +// req, resp := client.ListKeyPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *KMS) ListKeyPoliciesRequest(input *ListKeyPoliciesInput) (req *request.Request, output *ListKeyPoliciesOutput) { op := &request.Operation{ Name: opListKeyPolicies, @@ -687,6 +1158,23 @@ func (c *KMS) ListKeyPolicies(input *ListKeyPoliciesInput) (*ListKeyPoliciesOutp return out, err } +// ListKeyPoliciesPages iterates over the pages of a ListKeyPolicies operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListKeyPolicies method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListKeyPolicies operation. +// pageNum := 0 +// err := client.ListKeyPoliciesPages(params, +// func(page *ListKeyPoliciesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *KMS) ListKeyPoliciesPages(input *ListKeyPoliciesInput, fn func(p *ListKeyPoliciesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListKeyPoliciesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -697,7 +1185,28 @@ func (c *KMS) ListKeyPoliciesPages(input *ListKeyPoliciesInput, fn func(p *ListK const opListKeys = "ListKeys" -// ListKeysRequest generates a request for the ListKeys operation. +// ListKeysRequest generates a "aws/request.Request" representing the +// client's request for the ListKeys operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListKeys method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListKeysRequest method. +// req, resp := client.ListKeysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *KMS) ListKeysRequest(input *ListKeysInput) (req *request.Request, output *ListKeysOutput) { op := &request.Operation{ Name: opListKeys, @@ -728,6 +1237,23 @@ func (c *KMS) ListKeys(input *ListKeysInput) (*ListKeysOutput, error) { return out, err } +// ListKeysPages iterates over the pages of a ListKeys operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListKeys method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListKeys operation. +// pageNum := 0 +// err := client.ListKeysPages(params, +// func(page *ListKeysOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *KMS) ListKeysPages(input *ListKeysInput, fn func(p *ListKeysOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListKeysRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -738,7 +1264,28 @@ func (c *KMS) ListKeysPages(input *ListKeysInput, fn func(p *ListKeysOutput, las const opListRetirableGrants = "ListRetirableGrants" -// ListRetirableGrantsRequest generates a request for the ListRetirableGrants operation. +// ListRetirableGrantsRequest generates a "aws/request.Request" representing the +// client's request for the ListRetirableGrants operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListRetirableGrants method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListRetirableGrantsRequest method. +// req, resp := client.ListRetirableGrantsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *KMS) ListRetirableGrantsRequest(input *ListRetirableGrantsInput) (req *request.Request, output *ListGrantsResponse) { op := &request.Operation{ Name: opListRetirableGrants, @@ -769,7 +1316,28 @@ func (c *KMS) ListRetirableGrants(input *ListRetirableGrantsInput) (*ListGrantsR const opPutKeyPolicy = "PutKeyPolicy" -// PutKeyPolicyRequest generates a request for the PutKeyPolicy operation. +// PutKeyPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutKeyPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutKeyPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutKeyPolicyRequest method. +// req, resp := client.PutKeyPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *KMS) PutKeyPolicyRequest(input *PutKeyPolicyInput) (req *request.Request, output *PutKeyPolicyOutput) { op := &request.Operation{ Name: opPutKeyPolicy, @@ -789,7 +1357,10 @@ func (c *KMS) PutKeyPolicyRequest(input *PutKeyPolicyInput) (req *request.Reques return } -// Attaches a policy to the specified key. +// Attaches a key policy to the specified customer master key (CMK). +// +// For more information about key policies, see Key Policies (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html) +// in the AWS Key Management Service Developer Guide. func (c *KMS) PutKeyPolicy(input *PutKeyPolicyInput) (*PutKeyPolicyOutput, error) { req, out := c.PutKeyPolicyRequest(input) err := req.Send() @@ -798,7 +1369,28 @@ func (c *KMS) PutKeyPolicy(input *PutKeyPolicyInput) (*PutKeyPolicyOutput, error const opReEncrypt = "ReEncrypt" -// ReEncryptRequest generates a request for the ReEncrypt operation. +// ReEncryptRequest generates a "aws/request.Request" representing the +// client's request for the ReEncrypt operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ReEncrypt method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ReEncryptRequest method. +// req, resp := client.ReEncryptRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *KMS) ReEncryptRequest(input *ReEncryptInput) (req *request.Request, output *ReEncryptOutput) { op := &request.Operation{ Name: opReEncrypt, @@ -836,7 +1428,28 @@ func (c *KMS) ReEncrypt(input *ReEncryptInput) (*ReEncryptOutput, error) { const opRetireGrant = "RetireGrant" -// RetireGrantRequest generates a request for the RetireGrant operation. +// RetireGrantRequest generates a "aws/request.Request" representing the +// client's request for the RetireGrant operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RetireGrant method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RetireGrantRequest method. +// req, resp := client.RetireGrantRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *KMS) RetireGrantRequest(input *RetireGrantInput) (req *request.Request, output *RetireGrantOutput) { op := &request.Operation{ Name: opRetireGrant, @@ -858,13 +1471,18 @@ func (c *KMS) RetireGrantRequest(input *RetireGrantInput) (req *request.Request, // Retires a grant. You can retire a grant when you're done using it to clean // up. You should revoke a grant when you intend to actively deny operations -// that depend on it. The following are permitted to call this API: The account -// that created the grant The RetiringPrincipal, if present The GranteePrincipal, -// if RetireGrant is a grantee operation The grant to retire must be identified -// by its grant token or by a combination of the key ARN and the grant ID. A -// grant token is a unique variable-length base64-encoded string. A grant ID -// is a 64 character unique identifier of a grant. Both are returned by the -// CreateGrant function. +// that depend on it. The following are permitted to call this API: +// +// The account that created the grant +// +// The RetiringPrincipal, if present +// +// The GranteePrincipal, if RetireGrant is a grantee operation +// +// The grant to retire must be identified by its grant token or by a combination +// of the key ARN and the grant ID. A grant token is a unique variable-length +// base64-encoded string. A grant ID is a 64 character unique identifier of +// a grant. Both are returned by the CreateGrant function. func (c *KMS) RetireGrant(input *RetireGrantInput) (*RetireGrantOutput, error) { req, out := c.RetireGrantRequest(input) err := req.Send() @@ -873,7 +1491,28 @@ func (c *KMS) RetireGrant(input *RetireGrantInput) (*RetireGrantOutput, error) { const opRevokeGrant = "RevokeGrant" -// RevokeGrantRequest generates a request for the RevokeGrant operation. +// RevokeGrantRequest generates a "aws/request.Request" representing the +// client's request for the RevokeGrant operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RevokeGrant method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RevokeGrantRequest method. +// req, resp := client.RevokeGrantRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *KMS) RevokeGrantRequest(input *RevokeGrantInput) (req *request.Request, output *RevokeGrantOutput) { op := &request.Operation{ Name: opRevokeGrant, @@ -903,7 +1542,28 @@ func (c *KMS) RevokeGrant(input *RevokeGrantInput) (*RevokeGrantOutput, error) { const opScheduleKeyDeletion = "ScheduleKeyDeletion" -// ScheduleKeyDeletionRequest generates a request for the ScheduleKeyDeletion operation. +// ScheduleKeyDeletionRequest generates a "aws/request.Request" representing the +// client's request for the ScheduleKeyDeletion operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ScheduleKeyDeletion method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ScheduleKeyDeletionRequest method. +// req, resp := client.ScheduleKeyDeletionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *KMS) ScheduleKeyDeletionRequest(input *ScheduleKeyDeletionInput) (req *request.Request, output *ScheduleKeyDeletionOutput) { op := &request.Operation{ Name: opScheduleKeyDeletion, @@ -934,7 +1594,7 @@ func (c *KMS) ScheduleKeyDeletionRequest(input *ScheduleKeyDeletionInput) (req * // a CMK is deleted, all data that was encrypted under the CMK is rendered unrecoverable. // To restrict the use of a CMK without deleting it, use DisableKey. // -// For more information about scheduling a CMK for deletion, go to Deleting +// For more information about scheduling a CMK for deletion, see Deleting // Customer Master Keys (http://docs.aws.amazon.com/kms/latest/developerguide/deleting-keys.html) // in the AWS Key Management Service Developer Guide. func (c *KMS) ScheduleKeyDeletion(input *ScheduleKeyDeletionInput) (*ScheduleKeyDeletionOutput, error) { @@ -945,7 +1605,28 @@ func (c *KMS) ScheduleKeyDeletion(input *ScheduleKeyDeletionInput) (*ScheduleKey const opUpdateAlias = "UpdateAlias" -// UpdateAliasRequest generates a request for the UpdateAlias operation. +// UpdateAliasRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAlias operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateAlias method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateAliasRequest method. +// req, resp := client.UpdateAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *KMS) UpdateAliasRequest(input *UpdateAliasInput) (req *request.Request, output *UpdateAliasOutput) { op := &request.Operation{ Name: opUpdateAlias, @@ -986,7 +1667,28 @@ func (c *KMS) UpdateAlias(input *UpdateAliasInput) (*UpdateAliasOutput, error) { const opUpdateKeyDescription = "UpdateKeyDescription" -// UpdateKeyDescriptionRequest generates a request for the UpdateKeyDescription operation. +// UpdateKeyDescriptionRequest generates a "aws/request.Request" representing the +// client's request for the UpdateKeyDescription operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateKeyDescription method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateKeyDescriptionRequest method. +// req, resp := client.UpdateKeyDescriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *KMS) UpdateKeyDescriptionRequest(input *UpdateKeyDescriptionInput) (req *request.Request, output *UpdateKeyDescriptionOutput) { op := &request.Operation{ Name: opUpdateKeyDescription, @@ -1044,12 +1746,14 @@ type CancelKeyDeletionInput struct { // deletion. // // To specify this value, use the unique key ID or the Amazon Resource Name - // (ARN) of the CMK. Examples: Unique key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // (ARN) of the CMK. Examples: // + // Unique key ID: 1234abcd-12ab-34cd-56ef-1234567890ab // - // To obtain the unique key ID and key ARN for a given CMK, use ListKeys or - // DescribeKey. + // Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To obtain the unique key ID and key ARN for a given CMK, use ListKeys + // or DescribeKey. KeyId *string `min:"1" type:"string" required:"true"` } @@ -1106,8 +1810,11 @@ type CreateAliasInput struct { // An identifier of the key for which you are creating the alias. This value // cannot be another alias but can be a globally unique identifier or a fully - // specified ARN to a key. Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 - // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + // specified ARN to a key. + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 TargetKeyId *string `min:"1" type:"string" required:"true"` } @@ -1170,7 +1877,7 @@ type CreateGrantInput struct { // A list of grant tokens. // - // For more information, go to Grant Tokens (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#grant_token) + // For more information, see Grant Tokens (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#grant_token) // in the AWS Key Management Service Developer Guide. GrantTokens []*string `type:"list"` @@ -1189,8 +1896,11 @@ type CreateGrantInput struct { // to. // // To specify this value, use the globally unique key ID or the Amazon Resource - // Name (ARN) of the key. Examples: Globally unique key ID: 12345678-1234-1234-1234-123456789012 - // Key ARN: arn:aws:kms:us-west-2:123456789012:key/12345678-1234-1234-1234-123456789012 + // Name (ARN) of the key. Examples: + // + // Globally unique key ID: 12345678-1234-1234-1234-123456789012 + // + // Key ARN: arn:aws:kms:us-west-2:123456789012:key/12345678-1234-1234-1234-123456789012 KeyId *string `min:"1" type:"string" required:"true"` // A friendly name for identifying the grant. Use this value to prevent unintended @@ -1208,8 +1918,25 @@ type CreateGrantInput struct { Name *string `min:"1" type:"string"` // A list of operations that the grant permits. The list can contain any combination - // of one or more of the following values: Decrypt Encrypt GenerateDataKey - // GenerateDataKeyWithoutPlaintext ReEncryptFrom ReEncryptTo CreateGrant RetireGrant + // of one or more of the following values: + // + // Decrypt + // + // Encrypt + // + // GenerateDataKey + // + // GenerateDataKeyWithoutPlaintext + // + // ReEncryptFrom (http://docs.aws.amazon.com/kms/latest/APIReference/API_ReEncrypt.html) + // + // ReEncryptTo (http://docs.aws.amazon.com/kms/latest/APIReference/API_ReEncrypt.html) + // + // CreateGrant + // + // RetireGrant + // + // DescribeKey Operations []*string `type:"list"` // The principal that is given permission to retire the grant by using RetireGrant @@ -1272,7 +1999,7 @@ type CreateGrantOutput struct { // The grant token. // - // For more information about using grant tokens, see Grant Tokens (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#grant_token) + // For more information, see Grant Tokens (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#grant_token) // in the AWS Key Management Service Developer Guide. GrantToken *string `min:"1" type:"string"` } @@ -1290,16 +2017,55 @@ func (s CreateGrantOutput) GoString() string { type CreateKeyInput struct { _ struct{} `type:"structure"` - // Description of the key. We recommend that you choose a description that helps - // your customer decide whether the key is appropriate for a task. + // A flag to indicate whether to bypass the key policy lockout safety check. + // + // Setting this value to true increases the likelihood that the CMK becomes + // unmanageable. Do not set this value to true indiscriminately. + // + // For more information, refer to the scenario in the Default Key Policy (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) + // section in the AWS Key Management Service Developer Guide. + // + // Use this parameter only when you include a policy in the request and you + // intend to prevent the principal making the request from making a subsequent + // PutKeyPolicy request on the CMK. + // + // The default value is false. + BypassPolicyLockoutSafetyCheck *bool `type:"boolean"` + + // A description of the CMK. + // + // Use a description that helps you decide whether the CMK is appropriate for + // a task. Description *string `type:"string"` - // Specifies the intended use of the key. Currently this defaults to ENCRYPT/DECRYPT, - // and only symmetric encryption and decryption are supported. + // The intended use of the CMK. + // + // You can use CMKs only for symmetric encryption and decryption. KeyUsage *string `type:"string" enum:"KeyUsageType"` - // Policy to attach to the key. This is required and delegates back to the account. - // The key is the root of trust. The policy size limit is 32 KiB (32768 bytes). + // The key policy to attach to the CMK. + // + // If you specify a key policy, it must meet the following criteria: + // + // It must allow the principal making the CreateKey request to make a subsequent + // PutKeyPolicy request on the CMK. This reduces the likelihood that the CMK + // becomes unmanageable. For more information, refer to the scenario in the + // Default Key Policy (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) + // section in the AWS Key Management Service Developer Guide. + // + // The principal(s) specified in the key policy must exist and be visible + // to AWS KMS. When you create a new AWS principal (for example, an IAM user + // or role), you might need to enforce a delay before specifying the new principal + // in a key policy because the new principal might not immediately be visible + // to AWS KMS. For more information, see Changes that I make are not always + // immediately visible (http://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) + // in the IAM User Guide. + // + // If you do not specify a policy, AWS KMS attaches a default key policy + // to the CMK. For more information, see Default Key Policy (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default) + // in the AWS Key Management Service Developer Guide. + // + // The policy size limit is 32 KiB (32768 bytes). Policy *string `min:"1" type:"string"` } @@ -1329,7 +2095,7 @@ func (s *CreateKeyInput) Validate() error { type CreateKeyOutput struct { _ struct{} `type:"structure"` - // Metadata associated with the key. + // Metadata associated with the CMK. KeyMetadata *KeyMetadata `type:"structure"` } @@ -1358,7 +2124,7 @@ type DecryptInput struct { // A list of grant tokens. // - // For more information, go to Grant Tokens (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#grant_token) + // For more information, see Grant Tokens (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#grant_token) // in the AWS Key Management Service Developer Guide. GrantTokens []*string `type:"list"` } @@ -1466,16 +2232,21 @@ type DescribeKeyInput struct { // A list of grant tokens. // - // For more information, go to Grant Tokens (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#grant_token) + // For more information, see Grant Tokens (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#grant_token) // in the AWS Key Management Service Developer Guide. GrantTokens []*string `type:"list"` // A unique identifier for the customer master key. This value can be a globally // unique identifier, a fully specified ARN to either an alias or a key, or - // an alias name prefixed by "alias/". Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 - // Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName - // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 Alias - // Name Example - alias/MyAliasName + // an alias name prefixed by "alias/". + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + // + // Alias Name Example - alias/MyAliasName KeyId *string `min:"1" type:"string" required:"true"` } @@ -1525,10 +2296,13 @@ func (s DescribeKeyOutput) GoString() string { type DisableKeyInput struct { _ struct{} `type:"structure"` - // A unique identifier for the customer master key. This value can be a globally - // unique identifier or the fully specified ARN to a key. Key ARN Example - - // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 - // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + // A unique identifier for the CMK. + // + // Use the CMK's unique identifier or its Amazon Resource Name (ARN). For example: + // + // Unique ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab KeyId *string `min:"1" type:"string" required:"true"` } @@ -1576,9 +2350,11 @@ type DisableKeyRotationInput struct { _ struct{} `type:"structure"` // A unique identifier for the customer master key. This value can be a globally - // unique identifier or the fully specified ARN to a key. Key ARN Example - - // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 - // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + // unique identifier or the fully specified ARN to a key. + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 KeyId *string `min:"1" type:"string" required:"true"` } @@ -1626,9 +2402,11 @@ type EnableKeyInput struct { _ struct{} `type:"structure"` // A unique identifier for the customer master key. This value can be a globally - // unique identifier or the fully specified ARN to a key. Key ARN Example - - // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 - // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + // unique identifier or the fully specified ARN to a key. + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 KeyId *string `min:"1" type:"string" required:"true"` } @@ -1676,9 +2454,11 @@ type EnableKeyRotationInput struct { _ struct{} `type:"structure"` // A unique identifier for the customer master key. This value can be a globally - // unique identifier or the fully specified ARN to a key. Key ARN Example - - // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 - // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + // unique identifier or the fully specified ARN to a key. + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 KeyId *string `min:"1" type:"string" required:"true"` } @@ -1733,16 +2513,21 @@ type EncryptInput struct { // A list of grant tokens. // - // For more information, go to Grant Tokens (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#grant_token) + // For more information, see Grant Tokens (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#grant_token) // in the AWS Key Management Service Developer Guide. GrantTokens []*string `type:"list"` // A unique identifier for the customer master key. This value can be a globally // unique identifier, a fully specified ARN to either an alias or a key, or - // an alias name prefixed by "alias/". Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 - // Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName - // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 Alias - // Name Example - alias/MyAliasName + // an alias name prefixed by "alias/". + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + // + // Alias Name Example - alias/MyAliasName KeyId *string `min:"1" type:"string" required:"true"` // Data to be encrypted. @@ -1816,16 +2601,21 @@ type GenerateDataKeyInput struct { // A list of grant tokens. // - // For more information, go to Grant Tokens (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#grant_token) + // For more information, see Grant Tokens (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#grant_token) // in the AWS Key Management Service Developer Guide. GrantTokens []*string `type:"list"` // A unique identifier for the customer master key. This value can be a globally // unique identifier, a fully specified ARN to either an alias or a key, or - // an alias name prefixed by "alias/". Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 - // Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName - // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 Alias - // Name Example - alias/MyAliasName + // an alias name prefixed by "alias/". + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + // + // Alias Name Example - alias/MyAliasName KeyId *string `min:"1" type:"string" required:"true"` // Value that identifies the encryption algorithm and key size to generate a @@ -1912,16 +2702,21 @@ type GenerateDataKeyWithoutPlaintextInput struct { // A list of grant tokens. // - // For more information, go to Grant Tokens (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#grant_token) + // For more information, see Grant Tokens (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#grant_token) // in the AWS Key Management Service Developer Guide. GrantTokens []*string `type:"list"` // A unique identifier for the customer master key. This value can be a globally // unique identifier, a fully specified ARN to either an alias or a key, or - // an alias name prefixed by "alias/". Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 - // Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName - // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 Alias - // Name Example - alias/MyAliasName + // an alias name prefixed by "alias/". + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + // + // Alias Name Example - alias/MyAliasName KeyId *string `min:"1" type:"string" required:"true"` // Value that identifies the encryption algorithm and key size. Currently this @@ -2044,9 +2839,11 @@ type GetKeyPolicyInput struct { _ struct{} `type:"structure"` // A unique identifier for the customer master key. This value can be a globally - // unique identifier or the fully specified ARN to a key. Key ARN Example - - // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 - // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + // unique identifier or the fully specified ARN to a key. + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 KeyId *string `min:"1" type:"string" required:"true"` // String that contains the name of the policy. Currently, this must be "default". @@ -2107,9 +2904,11 @@ type GetKeyRotationStatusInput struct { _ struct{} `type:"structure"` // A unique identifier for the customer master key. This value can be a globally - // unique identifier or the fully specified ARN to a key. Key ARN Example - - // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 - // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + // unique identifier or the fully specified ARN to a key. + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 KeyId *string `min:"1" type:"string" required:"true"` } @@ -2291,8 +3090,8 @@ type KeyMetadata struct { // The state of the customer master key (CMK). // - // For more information about how key state affects the use of a CMK, go to - // How Key State Affects the Use of a Customer Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) + // For more information about how key state affects the use of a CMK, see How + // Key State Affects the Use of a Customer Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) // in the AWS Key Management Service Developer Guide. KeyState *string `type:"string" enum:"KeyState"` @@ -2385,9 +3184,11 @@ type ListGrantsInput struct { _ struct{} `type:"structure"` // A unique identifier for the customer master key. This value can be a globally - // unique identifier or the fully specified ARN to a key. Key ARN Example - - // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 - // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + // unique identifier or the fully specified ARN to a key. + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 KeyId *string `min:"1" type:"string" required:"true"` // When paginating results, specify the maximum number of items to return in @@ -2467,10 +3268,15 @@ type ListKeyPoliciesInput struct { // A unique identifier for the customer master key. This value can be a globally // unique identifier, a fully specified ARN to either an alias or a key, or - // an alias name prefixed by "alias/". Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 - // Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName - // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 Alias - // Name Example - alias/MyAliasName + // an alias name prefixed by "alias/". + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + // + // Alias Name Example - alias/MyAliasName KeyId *string `min:"1" type:"string" required:"true"` // When paginating results, specify the maximum number of items to return in @@ -2638,7 +3444,7 @@ type ListRetirableGrantsInput struct { // To specify the retiring principal, use the Amazon Resource Name (ARN) (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // of an AWS principal. Valid AWS principals include AWS accounts (root), IAM // users, federated users, and assumed role users. For examples of the ARN syntax - // for specifying a principal, go to AWS Identity and Access Management (IAM) + // for specifying a principal, see AWS Identity and Access Management (IAM) // (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-iam) // in the Example ARNs section of the Amazon Web Services General Reference. RetiringPrincipal *string `min:"1" type:"string" required:"true"` @@ -2679,19 +3485,53 @@ func (s *ListRetirableGrantsInput) Validate() error { type PutKeyPolicyInput struct { _ struct{} `type:"structure"` - // A unique identifier for the customer master key. This value can be a globally - // unique identifier or the fully specified ARN to a key. Key ARN Example - - // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 - // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + // A flag to indicate whether to bypass the key policy lockout safety check. + // + // Setting this value to true increases the likelihood that the CMK becomes + // unmanageable. Do not set this value to true indiscriminately. + // + // For more information, refer to the scenario in the Default Key Policy (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) + // section in the AWS Key Management Service Developer Guide. + // + // Use this parameter only when you intend to prevent the principal making + // the request from making a subsequent PutKeyPolicy request on the CMK. + // + // The default value is false. + BypassPolicyLockoutSafetyCheck *bool `type:"boolean"` + + // A unique identifier for the CMK. + // + // Use the CMK's unique identifier or its Amazon Resource Name (ARN). For example: + // + // Unique ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab KeyId *string `min:"1" type:"string" required:"true"` - // The policy to attach to the key. This is required and delegates back to the - // account. The key is the root of trust. The policy size limit is 32 KiB (32768 - // bytes). + // The key policy to attach to the CMK. + // + // The key policy must meet the following criteria: + // + // It must allow the principal making the PutKeyPolicy request to make a + // subsequent PutKeyPolicy request on the CMK. This reduces the likelihood that + // the CMK becomes unmanageable. For more information, refer to the scenario + // in the Default Key Policy (http://docs.aws.amazon.com/kms/latest/developerguide/key-policies.html#key-policy-default-allow-root-enable-iam) + // section in the AWS Key Management Service Developer Guide. + // + // The principal(s) specified in the key policy must exist and be visible + // to AWS KMS. When you create a new AWS principal (for example, an IAM user + // or role), you might need to enforce a delay before specifying the new principal + // in a key policy because the new principal might not immediately be visible + // to AWS KMS. For more information, see Changes that I make are not always + // immediately visible (http://docs.aws.amazon.com/IAM/latest/UserGuide/troubleshoot_general.html#troubleshoot_general_eventual-consistency) + // in the IAM User Guide. + // + // The policy size limit is 32 KiB (32768 bytes). Policy *string `min:"1" type:"string" required:"true"` - // Name of the policy to be attached. Currently, the only supported name is - // "default". + // The name of the key policy. + // + // This value must be default. PolicyName *string `min:"1" type:"string" required:"true"` } @@ -2760,16 +3600,20 @@ type ReEncryptInput struct { // A unique identifier for the customer master key used to re-encrypt the data. // This value can be a globally unique identifier, a fully specified ARN to - // either an alias or a key, or an alias name prefixed by "alias/". Key ARN - // Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 - // Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName - // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 Alias - // Name Example - alias/MyAliasName + // either an alias or a key, or an alias name prefixed by "alias/". + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Alias ARN Example - arn:aws:kms:us-east-1:123456789012:alias/MyAliasName + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + // + // Alias Name Example - alias/MyAliasName DestinationKeyId *string `min:"1" type:"string" required:"true"` // A list of grant tokens. // - // For more information, go to Grant Tokens (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#grant_token) + // For more information, see Grant Tokens (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#grant_token) // in the AWS Key Management Service Developer Guide. GrantTokens []*string `type:"list"` @@ -2840,7 +3684,9 @@ type RetireGrantInput struct { _ struct{} `type:"structure"` // Unique identifier of the grant to be retired. The grant ID is returned by - // the CreateGrant function. Grant ID Example - 0123456789012345678901234567890123456789012345678901234567890123 + // the CreateGrant function. + // + // Grant ID Example - 0123456789012345678901234567890123456789012345678901234567890123 GrantId *string `min:"1" type:"string"` // Token that identifies the grant to be retired. @@ -2848,8 +3694,11 @@ type RetireGrantInput struct { // A unique identifier for the customer master key associated with the grant. // This value can be a globally unique identifier or a fully specified ARN of - // the key. Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 - // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + // the key. + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 KeyId *string `min:"1" type:"string"` } @@ -2904,8 +3753,11 @@ type RevokeGrantInput struct { // A unique identifier for the customer master key associated with the grant. // This value can be a globally unique identifier or the fully specified ARN - // to a key. Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 - // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + // to a key. + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 KeyId *string `min:"1" type:"string" required:"true"` } @@ -2961,12 +3813,14 @@ type ScheduleKeyDeletionInput struct { // The unique identifier for the customer master key (CMK) to delete. // // To specify this value, use the unique key ID or the Amazon Resource Name - // (ARN) of the CMK. Examples: Unique key ID: 1234abcd-12ab-34cd-56ef-1234567890ab - // Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // (ARN) of the CMK. Examples: // + // Unique key ID: 1234abcd-12ab-34cd-56ef-1234567890ab // - // To obtain the unique key ID and key ARN for a given CMK, use ListKeys or - // DescribeKey. + // Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + // + // To obtain the unique key ID and key ARN for a given CMK, use ListKeys + // or DescribeKey. KeyId *string `min:"1" type:"string" required:"true"` // The waiting period, specified in number of days. After the waiting period @@ -3037,10 +3891,13 @@ type UpdateAliasInput struct { // Unique identifier of the customer master key to be mapped to the alias. This // value can be a globally unique identifier or the fully specified ARN of a - // key. Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 - // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + // key. // - // You can call ListAliases to verify that the alias is mapped to the correct + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + // + // You can call ListAliases to verify that the alias is mapped to the correct // TargetKeyId. TargetKeyId *string `min:"1" type:"string" required:"true"` } @@ -3098,9 +3955,11 @@ type UpdateKeyDescriptionInput struct { Description *string `type:"string" required:"true"` // A unique identifier for the customer master key. This value can be a globally - // unique identifier or the fully specified ARN to a key. Key ARN Example - - // arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 - // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 + // unique identifier or the fully specified ARN to a key. + // + // Key ARN Example - arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012 + // + // Globally Unique Key ID Example - 12345678-1234-1234-1234-123456789012 KeyId *string `min:"1" type:"string" required:"true"` } diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/service.go b/vendor/github.com/aws/aws-sdk-go/service/kms/service.go index 3dc53b429..4afd3a8e2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kms/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kms/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // AWS Key Management Service (AWS KMS) is an encryption and key management @@ -33,18 +33,18 @@ import ( // Diffie-Hellman (ECDHE). Most modern systems such as Java 7 and later support // these modes. // -// Signing Requests +// Signing Requests // // Requests must be signed by using an access key ID and a secret access key. -// We strongly recommend that you do not use your AWS account access key ID -// and secret key for everyday work with AWS KMS. Instead, use the access key -// ID and secret access key for an IAM user, or you can use the AWS Security +// We strongly recommend that you do not use your AWS account (root) access +// key ID and secret key for everyday work with AWS KMS. Instead, use the access +// key ID and secret access key for an IAM user, or you can use the AWS Security // Token Service to generate temporary security credentials that you can use // to sign requests. // // All AWS KMS operations require Signature Version 4 (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). // -// Logging API Requests +// Logging API Requests // // AWS KMS supports AWS CloudTrail, a service that logs AWS API calls and related // events for your AWS account and delivers them to an Amazon S3 bucket that @@ -53,23 +53,35 @@ import ( // and so on. To learn more about CloudTrail, including how to turn it on and // find your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/). // -// Additional Resources +// Additional Resources // // For more information about credentials and request signing, see the following: // -// AWS Security Credentials (http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html) +// AWS Security Credentials (http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html) // - This topic provides general information about the types of credentials -// used for accessing AWS. AWS Security Token Service (http://docs.aws.amazon.com/STS/latest/UsingSTS/) -// - This guide describes how to create and use temporary security credentials. -// Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) +// used for accessing AWS. +// +// Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html) +// - This section of the IAM User Guide describes how to create and use temporary +// security credentials. +// +// Signature Version 4 Signing Process (http://docs.aws.amazon.com/general/latest/gr/signature-version-4.html) // - This set of topics walks you through the process of signing a request using -// an access key ID and a secret access key. Commonly Used APIs +// an access key ID and a secret access key. // -// Of the APIs discussed in this guide, the following will prove the most -// useful for most applications. You will likely perform actions other than -// these, such as creating keys and assigning policies, by using the console. +// Commonly Used APIs // -// Encrypt Decrypt GenerateDataKey GenerateDataKeyWithoutPlaintext +// Of the APIs discussed in this guide, the following will prove the most useful +// for most applications. You will likely perform actions other than these, +// such as creating keys and assigning policies, by using the console. +// +// Encrypt +// +// Decrypt +// +// GenerateDataKey +// +// GenerateDataKeyWithoutPlaintext //The service client's operations are safe to be used concurrently. // It is not safe to mutate any of the client's properties though. type KMS struct { @@ -118,7 +130,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/lambda/api.go b/vendor/github.com/aws/aws-sdk-go/service/lambda/api.go index bc9375293..2467d596b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/lambda/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/lambda/api.go @@ -15,7 +15,28 @@ import ( const opAddPermission = "AddPermission" -// AddPermissionRequest generates a request for the AddPermission operation. +// AddPermissionRequest generates a "aws/request.Request" representing the +// client's request for the AddPermission operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddPermission method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddPermissionRequest method. +// req, resp := client.AddPermissionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Lambda) AddPermissionRequest(input *AddPermissionInput) (req *request.Request, output *AddPermissionOutput) { op := &request.Operation{ Name: opAddPermission, @@ -56,7 +77,28 @@ func (c *Lambda) AddPermission(input *AddPermissionInput) (*AddPermissionOutput, const opCreateAlias = "CreateAlias" -// CreateAliasRequest generates a request for the CreateAlias operation. +// CreateAliasRequest generates a "aws/request.Request" representing the +// client's request for the CreateAlias operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateAlias method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateAliasRequest method. +// req, resp := client.CreateAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Lambda) CreateAliasRequest(input *CreateAliasInput) (req *request.Request, output *AliasConfiguration) { op := &request.Operation{ Name: opCreateAlias, @@ -87,7 +129,28 @@ func (c *Lambda) CreateAlias(input *CreateAliasInput) (*AliasConfiguration, erro const opCreateEventSourceMapping = "CreateEventSourceMapping" -// CreateEventSourceMappingRequest generates a request for the CreateEventSourceMapping operation. +// CreateEventSourceMappingRequest generates a "aws/request.Request" representing the +// client's request for the CreateEventSourceMapping operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateEventSourceMapping method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateEventSourceMappingRequest method. +// req, resp := client.CreateEventSourceMappingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Lambda) CreateEventSourceMappingRequest(input *CreateEventSourceMappingInput) (req *request.Request, output *EventSourceMappingConfiguration) { op := &request.Operation{ Name: opCreateEventSourceMapping, @@ -137,7 +200,28 @@ func (c *Lambda) CreateEventSourceMapping(input *CreateEventSourceMappingInput) const opCreateFunction = "CreateFunction" -// CreateFunctionRequest generates a request for the CreateFunction operation. +// CreateFunctionRequest generates a "aws/request.Request" representing the +// client's request for the CreateFunction operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateFunction method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateFunctionRequest method. +// req, resp := client.CreateFunctionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Lambda) CreateFunctionRequest(input *CreateFunctionInput) (req *request.Request, output *FunctionConfiguration) { op := &request.Operation{ Name: opCreateFunction, @@ -173,7 +257,28 @@ func (c *Lambda) CreateFunction(input *CreateFunctionInput) (*FunctionConfigurat const opDeleteAlias = "DeleteAlias" -// DeleteAliasRequest generates a request for the DeleteAlias operation. +// DeleteAliasRequest generates a "aws/request.Request" representing the +// client's request for the DeleteAlias operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteAlias method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteAliasRequest method. +// req, resp := client.DeleteAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Lambda) DeleteAliasRequest(input *DeleteAliasInput) (req *request.Request, output *DeleteAliasOutput) { op := &request.Operation{ Name: opDeleteAlias, @@ -205,7 +310,28 @@ func (c *Lambda) DeleteAlias(input *DeleteAliasInput) (*DeleteAliasOutput, error const opDeleteEventSourceMapping = "DeleteEventSourceMapping" -// DeleteEventSourceMappingRequest generates a request for the DeleteEventSourceMapping operation. +// DeleteEventSourceMappingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteEventSourceMapping operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteEventSourceMapping method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteEventSourceMappingRequest method. +// req, resp := client.DeleteEventSourceMappingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Lambda) DeleteEventSourceMappingRequest(input *DeleteEventSourceMappingInput) (req *request.Request, output *EventSourceMappingConfiguration) { op := &request.Operation{ Name: opDeleteEventSourceMapping, @@ -236,7 +362,28 @@ func (c *Lambda) DeleteEventSourceMapping(input *DeleteEventSourceMappingInput) const opDeleteFunction = "DeleteFunction" -// DeleteFunctionRequest generates a request for the DeleteFunction operation. +// DeleteFunctionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteFunction operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteFunction method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteFunctionRequest method. +// req, resp := client.DeleteFunctionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Lambda) DeleteFunctionRequest(input *DeleteFunctionInput) (req *request.Request, output *DeleteFunctionOutput) { op := &request.Operation{ Name: opDeleteFunction, @@ -277,7 +424,28 @@ func (c *Lambda) DeleteFunction(input *DeleteFunctionInput) (*DeleteFunctionOutp const opGetAlias = "GetAlias" -// GetAliasRequest generates a request for the GetAlias operation. +// GetAliasRequest generates a "aws/request.Request" representing the +// client's request for the GetAlias operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetAlias method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetAliasRequest method. +// req, resp := client.GetAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Lambda) GetAliasRequest(input *GetAliasInput) (req *request.Request, output *AliasConfiguration) { op := &request.Operation{ Name: opGetAlias, @@ -308,7 +476,28 @@ func (c *Lambda) GetAlias(input *GetAliasInput) (*AliasConfiguration, error) { const opGetEventSourceMapping = "GetEventSourceMapping" -// GetEventSourceMappingRequest generates a request for the GetEventSourceMapping operation. +// GetEventSourceMappingRequest generates a "aws/request.Request" representing the +// client's request for the GetEventSourceMapping operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetEventSourceMapping method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetEventSourceMappingRequest method. +// req, resp := client.GetEventSourceMappingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Lambda) GetEventSourceMappingRequest(input *GetEventSourceMappingInput) (req *request.Request, output *EventSourceMappingConfiguration) { op := &request.Operation{ Name: opGetEventSourceMapping, @@ -339,7 +528,28 @@ func (c *Lambda) GetEventSourceMapping(input *GetEventSourceMappingInput) (*Even const opGetFunction = "GetFunction" -// GetFunctionRequest generates a request for the GetFunction operation. +// GetFunctionRequest generates a "aws/request.Request" representing the +// client's request for the GetFunction operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetFunction method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetFunctionRequest method. +// req, resp := client.GetFunctionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Lambda) GetFunctionRequest(input *GetFunctionInput) (req *request.Request, output *GetFunctionOutput) { op := &request.Operation{ Name: opGetFunction, @@ -378,7 +588,28 @@ func (c *Lambda) GetFunction(input *GetFunctionInput) (*GetFunctionOutput, error const opGetFunctionConfiguration = "GetFunctionConfiguration" -// GetFunctionConfigurationRequest generates a request for the GetFunctionConfiguration operation. +// GetFunctionConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetFunctionConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetFunctionConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetFunctionConfigurationRequest method. +// req, resp := client.GetFunctionConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Lambda) GetFunctionConfigurationRequest(input *GetFunctionConfigurationInput) (req *request.Request, output *FunctionConfiguration) { op := &request.Operation{ Name: opGetFunctionConfiguration, @@ -417,7 +648,28 @@ func (c *Lambda) GetFunctionConfiguration(input *GetFunctionConfigurationInput) const opGetPolicy = "GetPolicy" -// GetPolicyRequest generates a request for the GetPolicy operation. +// GetPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetPolicyRequest method. +// req, resp := client.GetPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Lambda) GetPolicyRequest(input *GetPolicyInput) (req *request.Request, output *GetPolicyOutput) { op := &request.Operation{ Name: opGetPolicy, @@ -453,7 +705,28 @@ func (c *Lambda) GetPolicy(input *GetPolicyInput) (*GetPolicyOutput, error) { const opInvoke = "Invoke" -// InvokeRequest generates a request for the Invoke operation. +// InvokeRequest generates a "aws/request.Request" representing the +// client's request for the Invoke operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the Invoke method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InvokeRequest method. +// req, resp := client.InvokeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Lambda) InvokeRequest(input *InvokeInput) (req *request.Request, output *InvokeOutput) { op := &request.Operation{ Name: opInvoke, @@ -489,7 +762,28 @@ func (c *Lambda) Invoke(input *InvokeInput) (*InvokeOutput, error) { const opInvokeAsync = "InvokeAsync" -// InvokeAsyncRequest generates a request for the InvokeAsync operation. +// InvokeAsyncRequest generates a "aws/request.Request" representing the +// client's request for the InvokeAsync operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the InvokeAsync method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the InvokeAsyncRequest method. +// req, resp := client.InvokeAsyncRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Lambda) InvokeAsyncRequest(input *InvokeAsyncInput) (req *request.Request, output *InvokeAsyncOutput) { if c.Client.Config.Logger != nil { c.Client.Config.Logger.Log("This operation, InvokeAsync, has been deprecated") @@ -524,7 +818,28 @@ func (c *Lambda) InvokeAsync(input *InvokeAsyncInput) (*InvokeAsyncOutput, error const opListAliases = "ListAliases" -// ListAliasesRequest generates a request for the ListAliases operation. +// ListAliasesRequest generates a "aws/request.Request" representing the +// client's request for the ListAliases operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListAliases method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListAliasesRequest method. +// req, resp := client.ListAliasesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Lambda) ListAliasesRequest(input *ListAliasesInput) (req *request.Request, output *ListAliasesOutput) { op := &request.Operation{ Name: opListAliases, @@ -556,7 +871,28 @@ func (c *Lambda) ListAliases(input *ListAliasesInput) (*ListAliasesOutput, error const opListEventSourceMappings = "ListEventSourceMappings" -// ListEventSourceMappingsRequest generates a request for the ListEventSourceMappings operation. +// ListEventSourceMappingsRequest generates a "aws/request.Request" representing the +// client's request for the ListEventSourceMappings operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListEventSourceMappings method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListEventSourceMappingsRequest method. +// req, resp := client.ListEventSourceMappingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Lambda) ListEventSourceMappingsRequest(input *ListEventSourceMappingsInput) (req *request.Request, output *ListEventSourceMappingsOutput) { op := &request.Operation{ Name: opListEventSourceMappings, @@ -599,6 +935,23 @@ func (c *Lambda) ListEventSourceMappings(input *ListEventSourceMappingsInput) (* return out, err } +// ListEventSourceMappingsPages iterates over the pages of a ListEventSourceMappings operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListEventSourceMappings method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListEventSourceMappings operation. +// pageNum := 0 +// err := client.ListEventSourceMappingsPages(params, +// func(page *ListEventSourceMappingsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *Lambda) ListEventSourceMappingsPages(input *ListEventSourceMappingsInput, fn func(p *ListEventSourceMappingsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListEventSourceMappingsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -609,7 +962,28 @@ func (c *Lambda) ListEventSourceMappingsPages(input *ListEventSourceMappingsInpu const opListFunctions = "ListFunctions" -// ListFunctionsRequest generates a request for the ListFunctions operation. +// ListFunctionsRequest generates a "aws/request.Request" representing the +// client's request for the ListFunctions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListFunctions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListFunctionsRequest method. +// req, resp := client.ListFunctionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Lambda) ListFunctionsRequest(input *ListFunctionsInput) (req *request.Request, output *ListFunctionsOutput) { op := &request.Operation{ Name: opListFunctions, @@ -648,6 +1022,23 @@ func (c *Lambda) ListFunctions(input *ListFunctionsInput) (*ListFunctionsOutput, return out, err } +// ListFunctionsPages iterates over the pages of a ListFunctions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListFunctions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListFunctions operation. +// pageNum := 0 +// err := client.ListFunctionsPages(params, +// func(page *ListFunctionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *Lambda) ListFunctionsPages(input *ListFunctionsInput, fn func(p *ListFunctionsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListFunctionsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -658,7 +1049,28 @@ func (c *Lambda) ListFunctionsPages(input *ListFunctionsInput, fn func(p *ListFu const opListVersionsByFunction = "ListVersionsByFunction" -// ListVersionsByFunctionRequest generates a request for the ListVersionsByFunction operation. +// ListVersionsByFunctionRequest generates a "aws/request.Request" representing the +// client's request for the ListVersionsByFunction operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListVersionsByFunction method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListVersionsByFunctionRequest method. +// req, resp := client.ListVersionsByFunctionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Lambda) ListVersionsByFunctionRequest(input *ListVersionsByFunctionInput) (req *request.Request, output *ListVersionsByFunctionOutput) { op := &request.Operation{ Name: opListVersionsByFunction, @@ -686,7 +1098,28 @@ func (c *Lambda) ListVersionsByFunction(input *ListVersionsByFunctionInput) (*Li const opPublishVersion = "PublishVersion" -// PublishVersionRequest generates a request for the PublishVersion operation. +// PublishVersionRequest generates a "aws/request.Request" representing the +// client's request for the PublishVersion operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PublishVersion method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PublishVersionRequest method. +// req, resp := client.PublishVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Lambda) PublishVersionRequest(input *PublishVersionInput) (req *request.Request, output *FunctionConfiguration) { op := &request.Operation{ Name: opPublishVersion, @@ -717,7 +1150,28 @@ func (c *Lambda) PublishVersion(input *PublishVersionInput) (*FunctionConfigurat const opRemovePermission = "RemovePermission" -// RemovePermissionRequest generates a request for the RemovePermission operation. +// RemovePermissionRequest generates a "aws/request.Request" representing the +// client's request for the RemovePermission operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemovePermission method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemovePermissionRequest method. +// req, resp := client.RemovePermissionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Lambda) RemovePermissionRequest(input *RemovePermissionInput) (req *request.Request, output *RemovePermissionOutput) { op := &request.Operation{ Name: opRemovePermission, @@ -758,7 +1212,28 @@ func (c *Lambda) RemovePermission(input *RemovePermissionInput) (*RemovePermissi const opUpdateAlias = "UpdateAlias" -// UpdateAliasRequest generates a request for the UpdateAlias operation. +// UpdateAliasRequest generates a "aws/request.Request" representing the +// client's request for the UpdateAlias operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateAlias method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateAliasRequest method. +// req, resp := client.UpdateAliasRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Lambda) UpdateAliasRequest(input *UpdateAliasInput) (req *request.Request, output *AliasConfiguration) { op := &request.Operation{ Name: opUpdateAlias, @@ -789,7 +1264,28 @@ func (c *Lambda) UpdateAlias(input *UpdateAliasInput) (*AliasConfiguration, erro const opUpdateEventSourceMapping = "UpdateEventSourceMapping" -// UpdateEventSourceMappingRequest generates a request for the UpdateEventSourceMapping operation. +// UpdateEventSourceMappingRequest generates a "aws/request.Request" representing the +// client's request for the UpdateEventSourceMapping operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateEventSourceMapping method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateEventSourceMappingRequest method. +// req, resp := client.UpdateEventSourceMappingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Lambda) UpdateEventSourceMappingRequest(input *UpdateEventSourceMappingInput) (req *request.Request, output *EventSourceMappingConfiguration) { op := &request.Operation{ Name: opUpdateEventSourceMapping, @@ -832,7 +1328,28 @@ func (c *Lambda) UpdateEventSourceMapping(input *UpdateEventSourceMappingInput) const opUpdateFunctionCode = "UpdateFunctionCode" -// UpdateFunctionCodeRequest generates a request for the UpdateFunctionCode operation. +// UpdateFunctionCodeRequest generates a "aws/request.Request" representing the +// client's request for the UpdateFunctionCode operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateFunctionCode method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateFunctionCodeRequest method. +// req, resp := client.UpdateFunctionCodeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Lambda) UpdateFunctionCodeRequest(input *UpdateFunctionCodeInput) (req *request.Request, output *FunctionConfiguration) { op := &request.Operation{ Name: opUpdateFunctionCode, @@ -867,7 +1384,28 @@ func (c *Lambda) UpdateFunctionCode(input *UpdateFunctionCodeInput) (*FunctionCo const opUpdateFunctionConfiguration = "UpdateFunctionConfiguration" -// UpdateFunctionConfigurationRequest generates a request for the UpdateFunctionConfiguration operation. +// UpdateFunctionConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the UpdateFunctionConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateFunctionConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateFunctionConfigurationRequest method. +// req, resp := client.UpdateFunctionConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Lambda) UpdateFunctionConfigurationRequest(input *UpdateFunctionConfigurationInput) (req *request.Request, output *FunctionConfiguration) { op := &request.Operation{ Name: opUpdateFunctionConfiguration, diff --git a/vendor/github.com/aws/aws-sdk-go/service/lambda/service.go b/vendor/github.com/aws/aws-sdk-go/service/lambda/service.go index 0ccde6c02..8b155cf84 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/lambda/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/lambda/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/restjson" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // Overview @@ -64,7 +64,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restjson.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restjson.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restjson.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/opsworks/api.go b/vendor/github.com/aws/aws-sdk-go/service/opsworks/api.go index ec5fcdbe2..9af0dfd05 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/opsworks/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/opsworks/api.go @@ -14,7 +14,28 @@ import ( const opAssignInstance = "AssignInstance" -// AssignInstanceRequest generates a request for the AssignInstance operation. +// AssignInstanceRequest generates a "aws/request.Request" representing the +// client's request for the AssignInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssignInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssignInstanceRequest method. +// req, resp := client.AssignInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) AssignInstanceRequest(input *AssignInstanceInput) (req *request.Request, output *AssignInstanceOutput) { op := &request.Operation{ Name: opAssignInstance, @@ -36,13 +57,16 @@ func (c *OpsWorks) AssignInstanceRequest(input *AssignInstanceInput) (req *reque // Assign a registered instance to a layer. // -// You can assign registered on-premises instances to any layer type. You -// can assign registered Amazon EC2 instances only to custom layers. You cannot -// use this action with instances that were created with AWS OpsWorks. Required -// Permissions: To use this action, an AWS Identity and Access Management (IAM) -// user must have a Manage permissions level for the stack or an attached policy -// that explicitly grants permissions. For more information on user permissions, -// see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). +// You can assign registered on-premises instances to any layer type. +// +// You can assign registered Amazon EC2 instances only to custom layers. +// +// You cannot use this action with instances that were created with AWS OpsWorks. +// +// Required Permissions: To use this action, an AWS Identity and Access +// Management (IAM) user must have a Manage permissions level for the stack +// or an attached policy that explicitly grants permissions. For more information +// on user permissions, see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). func (c *OpsWorks) AssignInstance(input *AssignInstanceInput) (*AssignInstanceOutput, error) { req, out := c.AssignInstanceRequest(input) err := req.Send() @@ -51,7 +75,28 @@ func (c *OpsWorks) AssignInstance(input *AssignInstanceInput) (*AssignInstanceOu const opAssignVolume = "AssignVolume" -// AssignVolumeRequest generates a request for the AssignVolume operation. +// AssignVolumeRequest generates a "aws/request.Request" representing the +// client's request for the AssignVolume operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssignVolume method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssignVolumeRequest method. +// req, resp := client.AssignVolumeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) AssignVolumeRequest(input *AssignVolumeInput) (req *request.Request, output *AssignVolumeOutput) { op := &request.Operation{ Name: opAssignVolume, @@ -77,7 +122,7 @@ func (c *OpsWorks) AssignVolumeRequest(input *AssignVolumeInput) (req *request.R // point before calling AssignVolume. For more information, see Resource Management // (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -89,7 +134,28 @@ func (c *OpsWorks) AssignVolume(input *AssignVolumeInput) (*AssignVolumeOutput, const opAssociateElasticIp = "AssociateElasticIp" -// AssociateElasticIpRequest generates a request for the AssociateElasticIp operation. +// AssociateElasticIpRequest generates a "aws/request.Request" representing the +// client's request for the AssociateElasticIp operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssociateElasticIp method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssociateElasticIpRequest method. +// req, resp := client.AssociateElasticIpRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) AssociateElasticIpRequest(input *AssociateElasticIpInput) (req *request.Request, output *AssociateElasticIpOutput) { op := &request.Operation{ Name: opAssociateElasticIp, @@ -113,7 +179,7 @@ func (c *OpsWorks) AssociateElasticIpRequest(input *AssociateElasticIpInput) (re // instance. The address must first be registered with the stack by calling // RegisterElasticIp. For more information, see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -125,7 +191,28 @@ func (c *OpsWorks) AssociateElasticIp(input *AssociateElasticIpInput) (*Associat const opAttachElasticLoadBalancer = "AttachElasticLoadBalancer" -// AttachElasticLoadBalancerRequest generates a request for the AttachElasticLoadBalancer operation. +// AttachElasticLoadBalancerRequest generates a "aws/request.Request" representing the +// client's request for the AttachElasticLoadBalancer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AttachElasticLoadBalancer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AttachElasticLoadBalancerRequest method. +// req, resp := client.AttachElasticLoadBalancerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) AttachElasticLoadBalancerRequest(input *AttachElasticLoadBalancerInput) (req *request.Request, output *AttachElasticLoadBalancerOutput) { op := &request.Operation{ Name: opAttachElasticLoadBalancer, @@ -152,7 +239,7 @@ func (c *OpsWorks) AttachElasticLoadBalancerRequest(input *AttachElasticLoadBala // the Elastic Load Balancing console, API, or CLI. For more information, see // Elastic Load Balancing Developer Guide (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/Welcome.html). // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -164,7 +251,28 @@ func (c *OpsWorks) AttachElasticLoadBalancer(input *AttachElasticLoadBalancerInp const opCloneStack = "CloneStack" -// CloneStackRequest generates a request for the CloneStack operation. +// CloneStackRequest generates a "aws/request.Request" representing the +// client's request for the CloneStack operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CloneStack method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CloneStackRequest method. +// req, resp := client.CloneStackRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) CloneStackRequest(input *CloneStackInput) (req *request.Request, output *CloneStackOutput) { op := &request.Operation{ Name: opCloneStack, @@ -186,7 +294,7 @@ func (c *OpsWorks) CloneStackRequest(input *CloneStackInput) (req *request.Reque // (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-cloning.html). // By default, all parameters are set to the values used by the parent stack. // -// Required Permissions: To use this action, an IAM user must have an attached +// Required Permissions: To use this action, an IAM user must have an attached // policy that explicitly grants permissions. For more information on user permissions, // see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). func (c *OpsWorks) CloneStack(input *CloneStackInput) (*CloneStackOutput, error) { @@ -197,7 +305,28 @@ func (c *OpsWorks) CloneStack(input *CloneStackInput) (*CloneStackOutput, error) const opCreateApp = "CreateApp" -// CreateAppRequest generates a request for the CreateApp operation. +// CreateAppRequest generates a "aws/request.Request" representing the +// client's request for the CreateApp operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateApp method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateAppRequest method. +// req, resp := client.CreateAppRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) CreateAppRequest(input *CreateAppInput) (req *request.Request, output *CreateAppOutput) { op := &request.Operation{ Name: opCreateApp, @@ -218,7 +347,7 @@ func (c *OpsWorks) CreateAppRequest(input *CreateAppInput) (req *request.Request // Creates an app for a specified stack. For more information, see Creating // Apps (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html). // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -230,7 +359,28 @@ func (c *OpsWorks) CreateApp(input *CreateAppInput) (*CreateAppOutput, error) { const opCreateDeployment = "CreateDeployment" -// CreateDeploymentRequest generates a request for the CreateDeployment operation. +// CreateDeploymentRequest generates a "aws/request.Request" representing the +// client's request for the CreateDeployment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDeployment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDeploymentRequest method. +// req, resp := client.CreateDeploymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) CreateDeploymentRequest(input *CreateDeploymentInput) (req *request.Request, output *CreateDeploymentOutput) { op := &request.Operation{ Name: opCreateDeployment, @@ -252,7 +402,7 @@ func (c *OpsWorks) CreateDeploymentRequest(input *CreateDeploymentInput) (req *r // (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-deploying.html) // and Run Stack Commands (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-commands.html). // -// Required Permissions: To use this action, an IAM user must have a Deploy +// Required Permissions: To use this action, an IAM user must have a Deploy // or Manage permissions level for the stack, or an attached policy that explicitly // grants permissions. For more information on user permissions, see Managing // User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -264,7 +414,28 @@ func (c *OpsWorks) CreateDeployment(input *CreateDeploymentInput) (*CreateDeploy const opCreateInstance = "CreateInstance" -// CreateInstanceRequest generates a request for the CreateInstance operation. +// CreateInstanceRequest generates a "aws/request.Request" representing the +// client's request for the CreateInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateInstanceRequest method. +// req, resp := client.CreateInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) CreateInstanceRequest(input *CreateInstanceInput) (req *request.Request, output *CreateInstanceOutput) { op := &request.Operation{ Name: opCreateInstance, @@ -285,7 +456,7 @@ func (c *OpsWorks) CreateInstanceRequest(input *CreateInstanceInput) (req *reque // Creates an instance in a specified stack. For more information, see Adding // an Instance to a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-add.html). // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -297,7 +468,28 @@ func (c *OpsWorks) CreateInstance(input *CreateInstanceInput) (*CreateInstanceOu const opCreateLayer = "CreateLayer" -// CreateLayerRequest generates a request for the CreateLayer operation. +// CreateLayerRequest generates a "aws/request.Request" representing the +// client's request for the CreateLayer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateLayer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateLayerRequest method. +// req, resp := client.CreateLayerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) CreateLayerRequest(input *CreateLayerInput) (req *request.Request, output *CreateLayerOutput) { op := &request.Operation{ Name: opCreateLayer, @@ -324,7 +516,7 @@ func (c *OpsWorks) CreateLayerRequest(input *CreateLayerInput) (req *request.Req // of custom layers, so you can call CreateLayer as many times as you like for // that layer type. // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -336,7 +528,28 @@ func (c *OpsWorks) CreateLayer(input *CreateLayerInput) (*CreateLayerOutput, err const opCreateStack = "CreateStack" -// CreateStackRequest generates a request for the CreateStack operation. +// CreateStackRequest generates a "aws/request.Request" representing the +// client's request for the CreateStack operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateStack method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateStackRequest method. +// req, resp := client.CreateStackRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) CreateStackRequest(input *CreateStackInput) (req *request.Request, output *CreateStackOutput) { op := &request.Operation{ Name: opCreateStack, @@ -356,7 +569,7 @@ func (c *OpsWorks) CreateStackRequest(input *CreateStackInput) (req *request.Req // Creates a new stack. For more information, see Create a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-edit.html). // -// Required Permissions: To use this action, an IAM user must have an attached +// Required Permissions: To use this action, an IAM user must have an attached // policy that explicitly grants permissions. For more information on user permissions, // see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). func (c *OpsWorks) CreateStack(input *CreateStackInput) (*CreateStackOutput, error) { @@ -367,7 +580,28 @@ func (c *OpsWorks) CreateStack(input *CreateStackInput) (*CreateStackOutput, err const opCreateUserProfile = "CreateUserProfile" -// CreateUserProfileRequest generates a request for the CreateUserProfile operation. +// CreateUserProfileRequest generates a "aws/request.Request" representing the +// client's request for the CreateUserProfile operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateUserProfile method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateUserProfileRequest method. +// req, resp := client.CreateUserProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) CreateUserProfileRequest(input *CreateUserProfileInput) (req *request.Request, output *CreateUserProfileOutput) { op := &request.Operation{ Name: opCreateUserProfile, @@ -387,7 +621,7 @@ func (c *OpsWorks) CreateUserProfileRequest(input *CreateUserProfileInput) (req // Creates a new user profile. // -// Required Permissions: To use this action, an IAM user must have an attached +// Required Permissions: To use this action, an IAM user must have an attached // policy that explicitly grants permissions. For more information on user permissions, // see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). func (c *OpsWorks) CreateUserProfile(input *CreateUserProfileInput) (*CreateUserProfileOutput, error) { @@ -398,7 +632,28 @@ func (c *OpsWorks) CreateUserProfile(input *CreateUserProfileInput) (*CreateUser const opDeleteApp = "DeleteApp" -// DeleteAppRequest generates a request for the DeleteApp operation. +// DeleteAppRequest generates a "aws/request.Request" representing the +// client's request for the DeleteApp operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteApp method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteAppRequest method. +// req, resp := client.DeleteAppRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DeleteAppRequest(input *DeleteAppInput) (req *request.Request, output *DeleteAppOutput) { op := &request.Operation{ Name: opDeleteApp, @@ -420,7 +675,7 @@ func (c *OpsWorks) DeleteAppRequest(input *DeleteAppInput) (req *request.Request // Deletes a specified app. // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -432,7 +687,28 @@ func (c *OpsWorks) DeleteApp(input *DeleteAppInput) (*DeleteAppOutput, error) { const opDeleteInstance = "DeleteInstance" -// DeleteInstanceRequest generates a request for the DeleteInstance operation. +// DeleteInstanceRequest generates a "aws/request.Request" representing the +// client's request for the DeleteInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteInstanceRequest method. +// req, resp := client.DeleteInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DeleteInstanceRequest(input *DeleteInstanceInput) (req *request.Request, output *DeleteInstanceOutput) { op := &request.Operation{ Name: opDeleteInstance, @@ -457,7 +733,7 @@ func (c *OpsWorks) DeleteInstanceRequest(input *DeleteInstanceInput) (req *reque // // For more information, see Deleting Instances (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-delete.html). // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -469,7 +745,28 @@ func (c *OpsWorks) DeleteInstance(input *DeleteInstanceInput) (*DeleteInstanceOu const opDeleteLayer = "DeleteLayer" -// DeleteLayerRequest generates a request for the DeleteLayer operation. +// DeleteLayerRequest generates a "aws/request.Request" representing the +// client's request for the DeleteLayer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteLayer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteLayerRequest method. +// req, resp := client.DeleteLayerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DeleteLayerRequest(input *DeleteLayerInput) (req *request.Request, output *DeleteLayerOutput) { op := &request.Operation{ Name: opDeleteLayer, @@ -493,7 +790,7 @@ func (c *OpsWorks) DeleteLayerRequest(input *DeleteLayerInput) (req *request.Req // instances or unassign registered instances. For more information, see How // to Delete a Layer (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-basics-delete.html). // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -505,7 +802,28 @@ func (c *OpsWorks) DeleteLayer(input *DeleteLayerInput) (*DeleteLayerOutput, err const opDeleteStack = "DeleteStack" -// DeleteStackRequest generates a request for the DeleteStack operation. +// DeleteStackRequest generates a "aws/request.Request" representing the +// client's request for the DeleteStack operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteStack method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteStackRequest method. +// req, resp := client.DeleteStackRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DeleteStackRequest(input *DeleteStackInput) (req *request.Request, output *DeleteStackOutput) { op := &request.Operation{ Name: opDeleteStack, @@ -529,7 +847,7 @@ func (c *OpsWorks) DeleteStackRequest(input *DeleteStackInput) (req *request.Req // apps or deregister registered instances. For more information, see Shut Down // a Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-shutting.html). // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -541,7 +859,28 @@ func (c *OpsWorks) DeleteStack(input *DeleteStackInput) (*DeleteStackOutput, err const opDeleteUserProfile = "DeleteUserProfile" -// DeleteUserProfileRequest generates a request for the DeleteUserProfile operation. +// DeleteUserProfileRequest generates a "aws/request.Request" representing the +// client's request for the DeleteUserProfile operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteUserProfile method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteUserProfileRequest method. +// req, resp := client.DeleteUserProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DeleteUserProfileRequest(input *DeleteUserProfileInput) (req *request.Request, output *DeleteUserProfileOutput) { op := &request.Operation{ Name: opDeleteUserProfile, @@ -563,7 +902,7 @@ func (c *OpsWorks) DeleteUserProfileRequest(input *DeleteUserProfileInput) (req // Deletes a user profile. // -// Required Permissions: To use this action, an IAM user must have an attached +// Required Permissions: To use this action, an IAM user must have an attached // policy that explicitly grants permissions. For more information on user permissions, // see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). func (c *OpsWorks) DeleteUserProfile(input *DeleteUserProfileInput) (*DeleteUserProfileOutput, error) { @@ -574,7 +913,28 @@ func (c *OpsWorks) DeleteUserProfile(input *DeleteUserProfileInput) (*DeleteUser const opDeregisterEcsCluster = "DeregisterEcsCluster" -// DeregisterEcsClusterRequest generates a request for the DeregisterEcsCluster operation. +// DeregisterEcsClusterRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterEcsCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeregisterEcsCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeregisterEcsClusterRequest method. +// req, resp := client.DeregisterEcsClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DeregisterEcsClusterRequest(input *DeregisterEcsClusterInput) (req *request.Request, output *DeregisterEcsClusterOutput) { op := &request.Operation{ Name: opDeregisterEcsCluster, @@ -597,9 +957,10 @@ func (c *OpsWorks) DeregisterEcsClusterRequest(input *DeregisterEcsClusterInput) // Deregisters a specified Amazon ECS cluster from a stack. For more information, // see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-ecscluster.html#workinglayers-ecscluster-delete). // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack or an attached policy that explicitly grants -// permissions. For more information on user permissions, see . +// permissions. For more information on user permissions, see http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html +// (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). func (c *OpsWorks) DeregisterEcsCluster(input *DeregisterEcsClusterInput) (*DeregisterEcsClusterOutput, error) { req, out := c.DeregisterEcsClusterRequest(input) err := req.Send() @@ -608,7 +969,28 @@ func (c *OpsWorks) DeregisterEcsCluster(input *DeregisterEcsClusterInput) (*Dere const opDeregisterElasticIp = "DeregisterElasticIp" -// DeregisterElasticIpRequest generates a request for the DeregisterElasticIp operation. +// DeregisterElasticIpRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterElasticIp operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeregisterElasticIp method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeregisterElasticIpRequest method. +// req, resp := client.DeregisterElasticIpRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DeregisterElasticIpRequest(input *DeregisterElasticIpInput) (req *request.Request, output *DeregisterElasticIpOutput) { op := &request.Operation{ Name: opDeregisterElasticIp, @@ -631,7 +1013,7 @@ func (c *OpsWorks) DeregisterElasticIpRequest(input *DeregisterElasticIpInput) ( // Deregisters a specified Elastic IP address. The address can then be registered // by another stack. For more information, see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -643,7 +1025,28 @@ func (c *OpsWorks) DeregisterElasticIp(input *DeregisterElasticIpInput) (*Deregi const opDeregisterInstance = "DeregisterInstance" -// DeregisterInstanceRequest generates a request for the DeregisterInstance operation. +// DeregisterInstanceRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeregisterInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeregisterInstanceRequest method. +// req, resp := client.DeregisterInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DeregisterInstanceRequest(input *DeregisterInstanceInput) (req *request.Request, output *DeregisterInstanceOutput) { op := &request.Operation{ Name: opDeregisterInstance, @@ -667,7 +1070,7 @@ func (c *OpsWorks) DeregisterInstanceRequest(input *DeregisterInstanceInput) (re // the instance from the stack and returns it to your control. This action can // not be used with instances that were created with AWS OpsWorks. // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -679,7 +1082,28 @@ func (c *OpsWorks) DeregisterInstance(input *DeregisterInstanceInput) (*Deregist const opDeregisterRdsDbInstance = "DeregisterRdsDbInstance" -// DeregisterRdsDbInstanceRequest generates a request for the DeregisterRdsDbInstance operation. +// DeregisterRdsDbInstanceRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterRdsDbInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeregisterRdsDbInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeregisterRdsDbInstanceRequest method. +// req, resp := client.DeregisterRdsDbInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DeregisterRdsDbInstanceRequest(input *DeregisterRdsDbInstanceInput) (req *request.Request, output *DeregisterRdsDbInstanceOutput) { op := &request.Operation{ Name: opDeregisterRdsDbInstance, @@ -701,7 +1125,7 @@ func (c *OpsWorks) DeregisterRdsDbInstanceRequest(input *DeregisterRdsDbInstance // Deregisters an Amazon RDS instance. // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -713,7 +1137,28 @@ func (c *OpsWorks) DeregisterRdsDbInstance(input *DeregisterRdsDbInstanceInput) const opDeregisterVolume = "DeregisterVolume" -// DeregisterVolumeRequest generates a request for the DeregisterVolume operation. +// DeregisterVolumeRequest generates a "aws/request.Request" representing the +// client's request for the DeregisterVolume operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeregisterVolume method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeregisterVolumeRequest method. +// req, resp := client.DeregisterVolumeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DeregisterVolumeRequest(input *DeregisterVolumeInput) (req *request.Request, output *DeregisterVolumeOutput) { op := &request.Operation{ Name: opDeregisterVolume, @@ -736,7 +1181,7 @@ func (c *OpsWorks) DeregisterVolumeRequest(input *DeregisterVolumeInput) (req *r // Deregisters an Amazon EBS volume. The volume can then be registered by another // stack. For more information, see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -748,7 +1193,28 @@ func (c *OpsWorks) DeregisterVolume(input *DeregisterVolumeInput) (*DeregisterVo const opDescribeAgentVersions = "DescribeAgentVersions" -// DescribeAgentVersionsRequest generates a request for the DescribeAgentVersions operation. +// DescribeAgentVersionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAgentVersions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAgentVersions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAgentVersionsRequest method. +// req, resp := client.DescribeAgentVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DescribeAgentVersionsRequest(input *DescribeAgentVersionsInput) (req *request.Request, output *DescribeAgentVersionsOutput) { op := &request.Operation{ Name: opDescribeAgentVersions, @@ -777,7 +1243,28 @@ func (c *OpsWorks) DescribeAgentVersions(input *DescribeAgentVersionsInput) (*De const opDescribeApps = "DescribeApps" -// DescribeAppsRequest generates a request for the DescribeApps operation. +// DescribeAppsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeApps operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeApps method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAppsRequest method. +// req, resp := client.DescribeAppsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DescribeAppsRequest(input *DescribeAppsInput) (req *request.Request, output *DescribeAppsOutput) { op := &request.Operation{ Name: opDescribeApps, @@ -799,7 +1286,7 @@ func (c *OpsWorks) DescribeAppsRequest(input *DescribeAppsInput) (req *request.R // // You must specify at least one of the parameters. // -// Required Permissions: To use this action, an IAM user must have a Show, +// Required Permissions: To use this action, an IAM user must have a Show, // Deploy, or Manage permissions level for the stack, or an attached policy // that explicitly grants permissions. For more information on user permissions, // see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -811,7 +1298,28 @@ func (c *OpsWorks) DescribeApps(input *DescribeAppsInput) (*DescribeAppsOutput, const opDescribeCommands = "DescribeCommands" -// DescribeCommandsRequest generates a request for the DescribeCommands operation. +// DescribeCommandsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCommands operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeCommands method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeCommandsRequest method. +// req, resp := client.DescribeCommandsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DescribeCommandsRequest(input *DescribeCommandsInput) (req *request.Request, output *DescribeCommandsOutput) { op := &request.Operation{ Name: opDescribeCommands, @@ -833,7 +1341,7 @@ func (c *OpsWorks) DescribeCommandsRequest(input *DescribeCommandsInput) (req *r // // You must specify at least one of the parameters. // -// Required Permissions: To use this action, an IAM user must have a Show, +// Required Permissions: To use this action, an IAM user must have a Show, // Deploy, or Manage permissions level for the stack, or an attached policy // that explicitly grants permissions. For more information on user permissions, // see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -845,7 +1353,28 @@ func (c *OpsWorks) DescribeCommands(input *DescribeCommandsInput) (*DescribeComm const opDescribeDeployments = "DescribeDeployments" -// DescribeDeploymentsRequest generates a request for the DescribeDeployments operation. +// DescribeDeploymentsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDeployments operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDeployments method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDeploymentsRequest method. +// req, resp := client.DescribeDeploymentsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DescribeDeploymentsRequest(input *DescribeDeploymentsInput) (req *request.Request, output *DescribeDeploymentsOutput) { op := &request.Operation{ Name: opDescribeDeployments, @@ -867,7 +1396,7 @@ func (c *OpsWorks) DescribeDeploymentsRequest(input *DescribeDeploymentsInput) ( // // You must specify at least one of the parameters. // -// Required Permissions: To use this action, an IAM user must have a Show, +// Required Permissions: To use this action, an IAM user must have a Show, // Deploy, or Manage permissions level for the stack, or an attached policy // that explicitly grants permissions. For more information on user permissions, // see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -879,7 +1408,28 @@ func (c *OpsWorks) DescribeDeployments(input *DescribeDeploymentsInput) (*Descri const opDescribeEcsClusters = "DescribeEcsClusters" -// DescribeEcsClustersRequest generates a request for the DescribeEcsClusters operation. +// DescribeEcsClustersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEcsClusters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEcsClusters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEcsClustersRequest method. +// req, resp := client.DescribeEcsClustersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DescribeEcsClustersRequest(input *DescribeEcsClustersInput) (req *request.Request, output *DescribeEcsClustersOutput) { op := &request.Operation{ Name: opDescribeEcsClusters, @@ -908,7 +1458,7 @@ func (c *OpsWorks) DescribeEcsClustersRequest(input *DescribeEcsClustersInput) ( // the response. However, AWS OpsWorks currently supports only one cluster per // layer, so the result set has a maximum of one element. // -// Required Permissions: To use this action, an IAM user must have a Show, +// Required Permissions: To use this action, an IAM user must have a Show, // Deploy, or Manage permissions level for the stack or an attached policy that // explicitly grants permission. For more information on user permissions, see // Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -918,6 +1468,23 @@ func (c *OpsWorks) DescribeEcsClusters(input *DescribeEcsClustersInput) (*Descri return out, err } +// DescribeEcsClustersPages iterates over the pages of a DescribeEcsClusters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeEcsClusters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeEcsClusters operation. +// pageNum := 0 +// err := client.DescribeEcsClustersPages(params, +// func(page *DescribeEcsClustersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *OpsWorks) DescribeEcsClustersPages(input *DescribeEcsClustersInput, fn func(p *DescribeEcsClustersOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeEcsClustersRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -928,7 +1495,28 @@ func (c *OpsWorks) DescribeEcsClustersPages(input *DescribeEcsClustersInput, fn const opDescribeElasticIps = "DescribeElasticIps" -// DescribeElasticIpsRequest generates a request for the DescribeElasticIps operation. +// DescribeElasticIpsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeElasticIps operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeElasticIps method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeElasticIpsRequest method. +// req, resp := client.DescribeElasticIpsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DescribeElasticIpsRequest(input *DescribeElasticIpsInput) (req *request.Request, output *DescribeElasticIpsOutput) { op := &request.Operation{ Name: opDescribeElasticIps, @@ -950,7 +1538,7 @@ func (c *OpsWorks) DescribeElasticIpsRequest(input *DescribeElasticIpsInput) (re // // You must specify at least one of the parameters. // -// Required Permissions: To use this action, an IAM user must have a Show, +// Required Permissions: To use this action, an IAM user must have a Show, // Deploy, or Manage permissions level for the stack, or an attached policy // that explicitly grants permissions. For more information on user permissions, // see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -962,7 +1550,28 @@ func (c *OpsWorks) DescribeElasticIps(input *DescribeElasticIpsInput) (*Describe const opDescribeElasticLoadBalancers = "DescribeElasticLoadBalancers" -// DescribeElasticLoadBalancersRequest generates a request for the DescribeElasticLoadBalancers operation. +// DescribeElasticLoadBalancersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeElasticLoadBalancers operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeElasticLoadBalancers method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeElasticLoadBalancersRequest method. +// req, resp := client.DescribeElasticLoadBalancersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DescribeElasticLoadBalancersRequest(input *DescribeElasticLoadBalancersInput) (req *request.Request, output *DescribeElasticLoadBalancersOutput) { op := &request.Operation{ Name: opDescribeElasticLoadBalancers, @@ -984,7 +1593,7 @@ func (c *OpsWorks) DescribeElasticLoadBalancersRequest(input *DescribeElasticLoa // // You must specify at least one of the parameters. // -// Required Permissions: To use this action, an IAM user must have a Show, +// Required Permissions: To use this action, an IAM user must have a Show, // Deploy, or Manage permissions level for the stack, or an attached policy // that explicitly grants permissions. For more information on user permissions, // see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -996,7 +1605,28 @@ func (c *OpsWorks) DescribeElasticLoadBalancers(input *DescribeElasticLoadBalanc const opDescribeInstances = "DescribeInstances" -// DescribeInstancesRequest generates a request for the DescribeInstances operation. +// DescribeInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeInstancesRequest method. +// req, resp := client.DescribeInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DescribeInstancesRequest(input *DescribeInstancesInput) (req *request.Request, output *DescribeInstancesOutput) { op := &request.Operation{ Name: opDescribeInstances, @@ -1018,7 +1648,7 @@ func (c *OpsWorks) DescribeInstancesRequest(input *DescribeInstancesInput) (req // // You must specify at least one of the parameters. // -// Required Permissions: To use this action, an IAM user must have a Show, +// Required Permissions: To use this action, an IAM user must have a Show, // Deploy, or Manage permissions level for the stack, or an attached policy // that explicitly grants permissions. For more information on user permissions, // see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -1030,7 +1660,28 @@ func (c *OpsWorks) DescribeInstances(input *DescribeInstancesInput) (*DescribeIn const opDescribeLayers = "DescribeLayers" -// DescribeLayersRequest generates a request for the DescribeLayers operation. +// DescribeLayersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLayers operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLayers method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLayersRequest method. +// req, resp := client.DescribeLayersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DescribeLayersRequest(input *DescribeLayersInput) (req *request.Request, output *DescribeLayersOutput) { op := &request.Operation{ Name: opDescribeLayers, @@ -1052,7 +1703,7 @@ func (c *OpsWorks) DescribeLayersRequest(input *DescribeLayersInput) (req *reque // // You must specify at least one of the parameters. // -// Required Permissions: To use this action, an IAM user must have a Show, +// Required Permissions: To use this action, an IAM user must have a Show, // Deploy, or Manage permissions level for the stack, or an attached policy // that explicitly grants permissions. For more information on user permissions, // see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -1064,7 +1715,28 @@ func (c *OpsWorks) DescribeLayers(input *DescribeLayersInput) (*DescribeLayersOu const opDescribeLoadBasedAutoScaling = "DescribeLoadBasedAutoScaling" -// DescribeLoadBasedAutoScalingRequest generates a request for the DescribeLoadBasedAutoScaling operation. +// DescribeLoadBasedAutoScalingRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLoadBasedAutoScaling operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLoadBasedAutoScaling method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLoadBasedAutoScalingRequest method. +// req, resp := client.DescribeLoadBasedAutoScalingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DescribeLoadBasedAutoScalingRequest(input *DescribeLoadBasedAutoScalingInput) (req *request.Request, output *DescribeLoadBasedAutoScalingOutput) { op := &request.Operation{ Name: opDescribeLoadBasedAutoScaling, @@ -1086,7 +1758,7 @@ func (c *OpsWorks) DescribeLoadBasedAutoScalingRequest(input *DescribeLoadBasedA // // You must specify at least one of the parameters. // -// Required Permissions: To use this action, an IAM user must have a Show, +// Required Permissions: To use this action, an IAM user must have a Show, // Deploy, or Manage permissions level for the stack, or an attached policy // that explicitly grants permissions. For more information on user permissions, // see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -1098,7 +1770,28 @@ func (c *OpsWorks) DescribeLoadBasedAutoScaling(input *DescribeLoadBasedAutoScal const opDescribeMyUserProfile = "DescribeMyUserProfile" -// DescribeMyUserProfileRequest generates a request for the DescribeMyUserProfile operation. +// DescribeMyUserProfileRequest generates a "aws/request.Request" representing the +// client's request for the DescribeMyUserProfile operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeMyUserProfile method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeMyUserProfileRequest method. +// req, resp := client.DescribeMyUserProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DescribeMyUserProfileRequest(input *DescribeMyUserProfileInput) (req *request.Request, output *DescribeMyUserProfileOutput) { op := &request.Operation{ Name: opDescribeMyUserProfile, @@ -1118,7 +1811,7 @@ func (c *OpsWorks) DescribeMyUserProfileRequest(input *DescribeMyUserProfileInpu // Describes a user's SSH information. // -// Required Permissions: To use this action, an IAM user must have self-management +// Required Permissions: To use this action, an IAM user must have self-management // enabled or an attached policy that explicitly grants permissions. For more // information on user permissions, see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). func (c *OpsWorks) DescribeMyUserProfile(input *DescribeMyUserProfileInput) (*DescribeMyUserProfileOutput, error) { @@ -1129,7 +1822,28 @@ func (c *OpsWorks) DescribeMyUserProfile(input *DescribeMyUserProfileInput) (*De const opDescribePermissions = "DescribePermissions" -// DescribePermissionsRequest generates a request for the DescribePermissions operation. +// DescribePermissionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribePermissions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribePermissions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribePermissionsRequest method. +// req, resp := client.DescribePermissionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DescribePermissionsRequest(input *DescribePermissionsInput) (req *request.Request, output *DescribePermissionsOutput) { op := &request.Operation{ Name: opDescribePermissions, @@ -1149,7 +1863,7 @@ func (c *OpsWorks) DescribePermissionsRequest(input *DescribePermissionsInput) ( // Describes the permissions for a specified stack. // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -1161,7 +1875,28 @@ func (c *OpsWorks) DescribePermissions(input *DescribePermissionsInput) (*Descri const opDescribeRaidArrays = "DescribeRaidArrays" -// DescribeRaidArraysRequest generates a request for the DescribeRaidArrays operation. +// DescribeRaidArraysRequest generates a "aws/request.Request" representing the +// client's request for the DescribeRaidArrays operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeRaidArrays method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeRaidArraysRequest method. +// req, resp := client.DescribeRaidArraysRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DescribeRaidArraysRequest(input *DescribeRaidArraysInput) (req *request.Request, output *DescribeRaidArraysOutput) { op := &request.Operation{ Name: opDescribeRaidArrays, @@ -1183,7 +1918,7 @@ func (c *OpsWorks) DescribeRaidArraysRequest(input *DescribeRaidArraysInput) (re // // You must specify at least one of the parameters. // -// Required Permissions: To use this action, an IAM user must have a Show, +// Required Permissions: To use this action, an IAM user must have a Show, // Deploy, or Manage permissions level for the stack, or an attached policy // that explicitly grants permissions. For more information on user permissions, // see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -1195,7 +1930,28 @@ func (c *OpsWorks) DescribeRaidArrays(input *DescribeRaidArraysInput) (*Describe const opDescribeRdsDbInstances = "DescribeRdsDbInstances" -// DescribeRdsDbInstancesRequest generates a request for the DescribeRdsDbInstances operation. +// DescribeRdsDbInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeRdsDbInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeRdsDbInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeRdsDbInstancesRequest method. +// req, resp := client.DescribeRdsDbInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DescribeRdsDbInstancesRequest(input *DescribeRdsDbInstancesInput) (req *request.Request, output *DescribeRdsDbInstancesOutput) { op := &request.Operation{ Name: opDescribeRdsDbInstances, @@ -1215,7 +1971,7 @@ func (c *OpsWorks) DescribeRdsDbInstancesRequest(input *DescribeRdsDbInstancesIn // Describes Amazon RDS instances. // -// Required Permissions: To use this action, an IAM user must have a Show, +// Required Permissions: To use this action, an IAM user must have a Show, // Deploy, or Manage permissions level for the stack, or an attached policy // that explicitly grants permissions. For more information on user permissions, // see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -1227,7 +1983,28 @@ func (c *OpsWorks) DescribeRdsDbInstances(input *DescribeRdsDbInstancesInput) (* const opDescribeServiceErrors = "DescribeServiceErrors" -// DescribeServiceErrorsRequest generates a request for the DescribeServiceErrors operation. +// DescribeServiceErrorsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeServiceErrors operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeServiceErrors method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeServiceErrorsRequest method. +// req, resp := client.DescribeServiceErrorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DescribeServiceErrorsRequest(input *DescribeServiceErrorsInput) (req *request.Request, output *DescribeServiceErrorsOutput) { op := &request.Operation{ Name: opDescribeServiceErrors, @@ -1247,7 +2024,7 @@ func (c *OpsWorks) DescribeServiceErrorsRequest(input *DescribeServiceErrorsInpu // Describes AWS OpsWorks service errors. // -// Required Permissions: To use this action, an IAM user must have a Show, +// Required Permissions: To use this action, an IAM user must have a Show, // Deploy, or Manage permissions level for the stack, or an attached policy // that explicitly grants permissions. For more information on user permissions, // see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -1259,7 +2036,28 @@ func (c *OpsWorks) DescribeServiceErrors(input *DescribeServiceErrorsInput) (*De const opDescribeStackProvisioningParameters = "DescribeStackProvisioningParameters" -// DescribeStackProvisioningParametersRequest generates a request for the DescribeStackProvisioningParameters operation. +// DescribeStackProvisioningParametersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeStackProvisioningParameters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeStackProvisioningParameters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeStackProvisioningParametersRequest method. +// req, resp := client.DescribeStackProvisioningParametersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DescribeStackProvisioningParametersRequest(input *DescribeStackProvisioningParametersInput) (req *request.Request, output *DescribeStackProvisioningParametersOutput) { op := &request.Operation{ Name: opDescribeStackProvisioningParameters, @@ -1279,7 +2077,7 @@ func (c *OpsWorks) DescribeStackProvisioningParametersRequest(input *DescribeSta // Requests a description of a stack's provisioning parameters. // -// Required Permissions: To use this action, an IAM user must have a Show, +// Required Permissions: To use this action, an IAM user must have a Show, // Deploy, or Manage permissions level for the stack or an attached policy that // explicitly grants permissions. For more information on user permissions, // see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -1291,7 +2089,28 @@ func (c *OpsWorks) DescribeStackProvisioningParameters(input *DescribeStackProvi const opDescribeStackSummary = "DescribeStackSummary" -// DescribeStackSummaryRequest generates a request for the DescribeStackSummary operation. +// DescribeStackSummaryRequest generates a "aws/request.Request" representing the +// client's request for the DescribeStackSummary operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeStackSummary method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeStackSummaryRequest method. +// req, resp := client.DescribeStackSummaryRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DescribeStackSummaryRequest(input *DescribeStackSummaryInput) (req *request.Request, output *DescribeStackSummaryOutput) { op := &request.Operation{ Name: opDescribeStackSummary, @@ -1312,7 +2131,7 @@ func (c *OpsWorks) DescribeStackSummaryRequest(input *DescribeStackSummaryInput) // Describes the number of layers and apps in a specified stack, and the number // of instances in each state, such as running_setup or online. // -// Required Permissions: To use this action, an IAM user must have a Show, +// Required Permissions: To use this action, an IAM user must have a Show, // Deploy, or Manage permissions level for the stack, or an attached policy // that explicitly grants permissions. For more information on user permissions, // see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -1324,7 +2143,28 @@ func (c *OpsWorks) DescribeStackSummary(input *DescribeStackSummaryInput) (*Desc const opDescribeStacks = "DescribeStacks" -// DescribeStacksRequest generates a request for the DescribeStacks operation. +// DescribeStacksRequest generates a "aws/request.Request" representing the +// client's request for the DescribeStacks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeStacks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeStacksRequest method. +// req, resp := client.DescribeStacksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DescribeStacksRequest(input *DescribeStacksInput) (req *request.Request, output *DescribeStacksOutput) { op := &request.Operation{ Name: opDescribeStacks, @@ -1344,7 +2184,7 @@ func (c *OpsWorks) DescribeStacksRequest(input *DescribeStacksInput) (req *reque // Requests a description of one or more stacks. // -// Required Permissions: To use this action, an IAM user must have a Show, +// Required Permissions: To use this action, an IAM user must have a Show, // Deploy, or Manage permissions level for the stack, or an attached policy // that explicitly grants permissions. For more information on user permissions, // see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -1356,7 +2196,28 @@ func (c *OpsWorks) DescribeStacks(input *DescribeStacksInput) (*DescribeStacksOu const opDescribeTimeBasedAutoScaling = "DescribeTimeBasedAutoScaling" -// DescribeTimeBasedAutoScalingRequest generates a request for the DescribeTimeBasedAutoScaling operation. +// DescribeTimeBasedAutoScalingRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTimeBasedAutoScaling operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTimeBasedAutoScaling method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTimeBasedAutoScalingRequest method. +// req, resp := client.DescribeTimeBasedAutoScalingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DescribeTimeBasedAutoScalingRequest(input *DescribeTimeBasedAutoScalingInput) (req *request.Request, output *DescribeTimeBasedAutoScalingOutput) { op := &request.Operation{ Name: opDescribeTimeBasedAutoScaling, @@ -1378,7 +2239,7 @@ func (c *OpsWorks) DescribeTimeBasedAutoScalingRequest(input *DescribeTimeBasedA // // You must specify at least one of the parameters. // -// Required Permissions: To use this action, an IAM user must have a Show, +// Required Permissions: To use this action, an IAM user must have a Show, // Deploy, or Manage permissions level for the stack, or an attached policy // that explicitly grants permissions. For more information on user permissions, // see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -1390,7 +2251,28 @@ func (c *OpsWorks) DescribeTimeBasedAutoScaling(input *DescribeTimeBasedAutoScal const opDescribeUserProfiles = "DescribeUserProfiles" -// DescribeUserProfilesRequest generates a request for the DescribeUserProfiles operation. +// DescribeUserProfilesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeUserProfiles operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeUserProfiles method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeUserProfilesRequest method. +// req, resp := client.DescribeUserProfilesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DescribeUserProfilesRequest(input *DescribeUserProfilesInput) (req *request.Request, output *DescribeUserProfilesOutput) { op := &request.Operation{ Name: opDescribeUserProfiles, @@ -1410,7 +2292,7 @@ func (c *OpsWorks) DescribeUserProfilesRequest(input *DescribeUserProfilesInput) // Describe specified users. // -// Required Permissions: To use this action, an IAM user must have an attached +// Required Permissions: To use this action, an IAM user must have an attached // policy that explicitly grants permissions. For more information on user permissions, // see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). func (c *OpsWorks) DescribeUserProfiles(input *DescribeUserProfilesInput) (*DescribeUserProfilesOutput, error) { @@ -1421,7 +2303,28 @@ func (c *OpsWorks) DescribeUserProfiles(input *DescribeUserProfilesInput) (*Desc const opDescribeVolumes = "DescribeVolumes" -// DescribeVolumesRequest generates a request for the DescribeVolumes operation. +// DescribeVolumesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeVolumes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeVolumes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeVolumesRequest method. +// req, resp := client.DescribeVolumesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DescribeVolumesRequest(input *DescribeVolumesInput) (req *request.Request, output *DescribeVolumesOutput) { op := &request.Operation{ Name: opDescribeVolumes, @@ -1443,7 +2346,7 @@ func (c *OpsWorks) DescribeVolumesRequest(input *DescribeVolumesInput) (req *req // // You must specify at least one of the parameters. // -// Required Permissions: To use this action, an IAM user must have a Show, +// Required Permissions: To use this action, an IAM user must have a Show, // Deploy, or Manage permissions level for the stack, or an attached policy // that explicitly grants permissions. For more information on user permissions, // see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -1455,7 +2358,28 @@ func (c *OpsWorks) DescribeVolumes(input *DescribeVolumesInput) (*DescribeVolume const opDetachElasticLoadBalancer = "DetachElasticLoadBalancer" -// DetachElasticLoadBalancerRequest generates a request for the DetachElasticLoadBalancer operation. +// DetachElasticLoadBalancerRequest generates a "aws/request.Request" representing the +// client's request for the DetachElasticLoadBalancer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DetachElasticLoadBalancer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DetachElasticLoadBalancerRequest method. +// req, resp := client.DetachElasticLoadBalancerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DetachElasticLoadBalancerRequest(input *DetachElasticLoadBalancerInput) (req *request.Request, output *DetachElasticLoadBalancerOutput) { op := &request.Operation{ Name: opDetachElasticLoadBalancer, @@ -1477,7 +2401,7 @@ func (c *OpsWorks) DetachElasticLoadBalancerRequest(input *DetachElasticLoadBala // Detaches a specified Elastic Load Balancing instance from its layer. // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -1489,7 +2413,28 @@ func (c *OpsWorks) DetachElasticLoadBalancer(input *DetachElasticLoadBalancerInp const opDisassociateElasticIp = "DisassociateElasticIp" -// DisassociateElasticIpRequest generates a request for the DisassociateElasticIp operation. +// DisassociateElasticIpRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateElasticIp operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisassociateElasticIp method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisassociateElasticIpRequest method. +// req, resp := client.DisassociateElasticIpRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) DisassociateElasticIpRequest(input *DisassociateElasticIpInput) (req *request.Request, output *DisassociateElasticIpOutput) { op := &request.Operation{ Name: opDisassociateElasticIp, @@ -1513,7 +2458,7 @@ func (c *OpsWorks) DisassociateElasticIpRequest(input *DisassociateElasticIpInpu // registered with the stack. For more information, see Resource Management // (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -1525,7 +2470,28 @@ func (c *OpsWorks) DisassociateElasticIp(input *DisassociateElasticIpInput) (*Di const opGetHostnameSuggestion = "GetHostnameSuggestion" -// GetHostnameSuggestionRequest generates a request for the GetHostnameSuggestion operation. +// GetHostnameSuggestionRequest generates a "aws/request.Request" representing the +// client's request for the GetHostnameSuggestion operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetHostnameSuggestion method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetHostnameSuggestionRequest method. +// req, resp := client.GetHostnameSuggestionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) GetHostnameSuggestionRequest(input *GetHostnameSuggestionInput) (req *request.Request, output *GetHostnameSuggestionOutput) { op := &request.Operation{ Name: opGetHostnameSuggestion, @@ -1546,7 +2512,7 @@ func (c *OpsWorks) GetHostnameSuggestionRequest(input *GetHostnameSuggestionInpu // Gets a generated host name for the specified layer, based on the current // host name theme. // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -1558,7 +2524,28 @@ func (c *OpsWorks) GetHostnameSuggestion(input *GetHostnameSuggestionInput) (*Ge const opGrantAccess = "GrantAccess" -// GrantAccessRequest generates a request for the GrantAccess operation. +// GrantAccessRequest generates a "aws/request.Request" representing the +// client's request for the GrantAccess operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GrantAccess method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GrantAccessRequest method. +// req, resp := client.GrantAccessRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) GrantAccessRequest(input *GrantAccessInput) (req *request.Request, output *GrantAccessOutput) { op := &request.Operation{ Name: opGrantAccess, @@ -1576,8 +2563,9 @@ func (c *OpsWorks) GrantAccessRequest(input *GrantAccessInput) (req *request.Req return } -// This action can be used only with Windows stacks. Grants RDP access to a -// Windows instance for a specified time period. +// This action can be used only with Windows stacks. +// +// Grants RDP access to a Windows instance for a specified time period. func (c *OpsWorks) GrantAccess(input *GrantAccessInput) (*GrantAccessOutput, error) { req, out := c.GrantAccessRequest(input) err := req.Send() @@ -1586,7 +2574,28 @@ func (c *OpsWorks) GrantAccess(input *GrantAccessInput) (*GrantAccessOutput, err const opRebootInstance = "RebootInstance" -// RebootInstanceRequest generates a request for the RebootInstance operation. +// RebootInstanceRequest generates a "aws/request.Request" representing the +// client's request for the RebootInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RebootInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RebootInstanceRequest method. +// req, resp := client.RebootInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) RebootInstanceRequest(input *RebootInstanceInput) (req *request.Request, output *RebootInstanceOutput) { op := &request.Operation{ Name: opRebootInstance, @@ -1609,7 +2618,7 @@ func (c *OpsWorks) RebootInstanceRequest(input *RebootInstanceInput) (req *reque // Reboots a specified instance. For more information, see Starting, Stopping, // and Rebooting Instances (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-starting.html). // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -1621,7 +2630,28 @@ func (c *OpsWorks) RebootInstance(input *RebootInstanceInput) (*RebootInstanceOu const opRegisterEcsCluster = "RegisterEcsCluster" -// RegisterEcsClusterRequest generates a request for the RegisterEcsCluster operation. +// RegisterEcsClusterRequest generates a "aws/request.Request" representing the +// client's request for the RegisterEcsCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterEcsCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterEcsClusterRequest method. +// req, resp := client.RegisterEcsClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) RegisterEcsClusterRequest(input *RegisterEcsClusterInput) (req *request.Request, output *RegisterEcsClusterOutput) { op := &request.Operation{ Name: opRegisterEcsCluster, @@ -1643,7 +2673,7 @@ func (c *OpsWorks) RegisterEcsClusterRequest(input *RegisterEcsClusterInput) (re // one cluster with a stack. A cluster can be registered with only one stack. // For more information, see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/workinglayers-ecscluster.html). // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -1655,7 +2685,28 @@ func (c *OpsWorks) RegisterEcsCluster(input *RegisterEcsClusterInput) (*Register const opRegisterElasticIp = "RegisterElasticIp" -// RegisterElasticIpRequest generates a request for the RegisterElasticIp operation. +// RegisterElasticIpRequest generates a "aws/request.Request" representing the +// client's request for the RegisterElasticIp operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterElasticIp method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterElasticIpRequest method. +// req, resp := client.RegisterElasticIpRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) RegisterElasticIpRequest(input *RegisterElasticIpInput) (req *request.Request, output *RegisterElasticIpOutput) { op := &request.Operation{ Name: opRegisterElasticIp, @@ -1678,7 +2729,7 @@ func (c *OpsWorks) RegisterElasticIpRequest(input *RegisterElasticIpInput) (req // you must first deregister it by calling DeregisterElasticIp. For more information, // see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -1690,7 +2741,28 @@ func (c *OpsWorks) RegisterElasticIp(input *RegisterElasticIpInput) (*RegisterEl const opRegisterInstance = "RegisterInstance" -// RegisterInstanceRequest generates a request for the RegisterInstance operation. +// RegisterInstanceRequest generates a "aws/request.Request" representing the +// client's request for the RegisterInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterInstanceRequest method. +// req, resp := client.RegisterInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) RegisterInstanceRequest(input *RegisterInstanceInput) (req *request.Request, output *RegisterInstanceOutput) { op := &request.Operation{ Name: opRegisterInstance, @@ -1711,13 +2783,14 @@ func (c *OpsWorks) RegisterInstanceRequest(input *RegisterInstanceInput) (req *r // Registers instances with a specified stack that were created outside of AWS // OpsWorks. // -// We do not recommend using this action to register instances. The complete +// We do not recommend using this action to register instances. The complete // registration operation has two primary steps, installing the AWS OpsWorks // agent on the instance and registering the instance with the stack. RegisterInstance // handles only the second step. You should instead use the AWS CLI register // command, which performs the entire registration operation. For more information, // see Registering an Instance with an AWS OpsWorks Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/registered-instances-register.html). -// Required Permissions: To use this action, an IAM user must have a Manage +// +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -1729,7 +2802,28 @@ func (c *OpsWorks) RegisterInstance(input *RegisterInstanceInput) (*RegisterInst const opRegisterRdsDbInstance = "RegisterRdsDbInstance" -// RegisterRdsDbInstanceRequest generates a request for the RegisterRdsDbInstance operation. +// RegisterRdsDbInstanceRequest generates a "aws/request.Request" representing the +// client's request for the RegisterRdsDbInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterRdsDbInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterRdsDbInstanceRequest method. +// req, resp := client.RegisterRdsDbInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) RegisterRdsDbInstanceRequest(input *RegisterRdsDbInstanceInput) (req *request.Request, output *RegisterRdsDbInstanceOutput) { op := &request.Operation{ Name: opRegisterRdsDbInstance, @@ -1751,7 +2845,7 @@ func (c *OpsWorks) RegisterRdsDbInstanceRequest(input *RegisterRdsDbInstanceInpu // Registers an Amazon RDS instance with a stack. // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -1763,7 +2857,28 @@ func (c *OpsWorks) RegisterRdsDbInstance(input *RegisterRdsDbInstanceInput) (*Re const opRegisterVolume = "RegisterVolume" -// RegisterVolumeRequest generates a request for the RegisterVolume operation. +// RegisterVolumeRequest generates a "aws/request.Request" representing the +// client's request for the RegisterVolume operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RegisterVolume method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RegisterVolumeRequest method. +// req, resp := client.RegisterVolumeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) RegisterVolumeRequest(input *RegisterVolumeInput) (req *request.Request, output *RegisterVolumeOutput) { op := &request.Operation{ Name: opRegisterVolume, @@ -1786,7 +2901,7 @@ func (c *OpsWorks) RegisterVolumeRequest(input *RegisterVolumeInput) (req *reque // first deregister it by calling DeregisterVolume. For more information, see // Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -1798,7 +2913,28 @@ func (c *OpsWorks) RegisterVolume(input *RegisterVolumeInput) (*RegisterVolumeOu const opSetLoadBasedAutoScaling = "SetLoadBasedAutoScaling" -// SetLoadBasedAutoScalingRequest generates a request for the SetLoadBasedAutoScaling operation. +// SetLoadBasedAutoScalingRequest generates a "aws/request.Request" representing the +// client's request for the SetLoadBasedAutoScaling operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetLoadBasedAutoScaling method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetLoadBasedAutoScalingRequest method. +// req, resp := client.SetLoadBasedAutoScalingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) SetLoadBasedAutoScalingRequest(input *SetLoadBasedAutoScalingInput) (req *request.Request, output *SetLoadBasedAutoScalingOutput) { op := &request.Operation{ Name: opSetLoadBasedAutoScaling, @@ -1827,7 +2963,7 @@ func (c *OpsWorks) SetLoadBasedAutoScalingRequest(input *SetLoadBasedAutoScaling // from that set, so you must ensure that you have created enough instances // to handle the maximum anticipated load. // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -1839,7 +2975,28 @@ func (c *OpsWorks) SetLoadBasedAutoScaling(input *SetLoadBasedAutoScalingInput) const opSetPermission = "SetPermission" -// SetPermissionRequest generates a request for the SetPermission operation. +// SetPermissionRequest generates a "aws/request.Request" representing the +// client's request for the SetPermission operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetPermission method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetPermissionRequest method. +// req, resp := client.SetPermissionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) SetPermissionRequest(input *SetPermissionInput) (req *request.Request, output *SetPermissionOutput) { op := &request.Operation{ Name: opSetPermission, @@ -1862,7 +3019,7 @@ func (c *OpsWorks) SetPermissionRequest(input *SetPermissionInput) (req *request // Specifies a user's permissions. For more information, see Security and Permissions // (http://docs.aws.amazon.com/opsworks/latest/userguide/workingsecurity.html). // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -1874,7 +3031,28 @@ func (c *OpsWorks) SetPermission(input *SetPermissionInput) (*SetPermissionOutpu const opSetTimeBasedAutoScaling = "SetTimeBasedAutoScaling" -// SetTimeBasedAutoScalingRequest generates a request for the SetTimeBasedAutoScaling operation. +// SetTimeBasedAutoScalingRequest generates a "aws/request.Request" representing the +// client's request for the SetTimeBasedAutoScaling operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetTimeBasedAutoScaling method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetTimeBasedAutoScalingRequest method. +// req, resp := client.SetTimeBasedAutoScalingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) SetTimeBasedAutoScalingRequest(input *SetTimeBasedAutoScalingInput) (req *request.Request, output *SetTimeBasedAutoScalingOutput) { op := &request.Operation{ Name: opSetTimeBasedAutoScaling, @@ -1898,7 +3076,7 @@ func (c *OpsWorks) SetTimeBasedAutoScalingRequest(input *SetTimeBasedAutoScaling // For more information, see Managing Load with Time-based and Load-based Instances // (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-autoscaling.html). // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -1910,7 +3088,28 @@ func (c *OpsWorks) SetTimeBasedAutoScaling(input *SetTimeBasedAutoScalingInput) const opStartInstance = "StartInstance" -// StartInstanceRequest generates a request for the StartInstance operation. +// StartInstanceRequest generates a "aws/request.Request" representing the +// client's request for the StartInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StartInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StartInstanceRequest method. +// req, resp := client.StartInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) StartInstanceRequest(input *StartInstanceInput) (req *request.Request, output *StartInstanceOutput) { op := &request.Operation{ Name: opStartInstance, @@ -1933,7 +3132,7 @@ func (c *OpsWorks) StartInstanceRequest(input *StartInstanceInput) (req *request // Starts a specified instance. For more information, see Starting, Stopping, // and Rebooting Instances (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-starting.html). // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -1945,7 +3144,28 @@ func (c *OpsWorks) StartInstance(input *StartInstanceInput) (*StartInstanceOutpu const opStartStack = "StartStack" -// StartStackRequest generates a request for the StartStack operation. +// StartStackRequest generates a "aws/request.Request" representing the +// client's request for the StartStack operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StartStack method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StartStackRequest method. +// req, resp := client.StartStackRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) StartStackRequest(input *StartStackInput) (req *request.Request, output *StartStackOutput) { op := &request.Operation{ Name: opStartStack, @@ -1967,7 +3187,7 @@ func (c *OpsWorks) StartStackRequest(input *StartStackInput) (req *request.Reque // Starts a stack's instances. // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -1979,7 +3199,28 @@ func (c *OpsWorks) StartStack(input *StartStackInput) (*StartStackOutput, error) const opStopInstance = "StopInstance" -// StopInstanceRequest generates a request for the StopInstance operation. +// StopInstanceRequest generates a "aws/request.Request" representing the +// client's request for the StopInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StopInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StopInstanceRequest method. +// req, resp := client.StopInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) StopInstanceRequest(input *StopInstanceInput) (req *request.Request, output *StopInstanceOutput) { op := &request.Operation{ Name: opStopInstance, @@ -2004,7 +3245,7 @@ func (c *OpsWorks) StopInstanceRequest(input *StopInstanceInput) (req *request.R // EBS-backed instance without losing data. For more information, see Starting, // Stopping, and Rebooting Instances (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-starting.html). // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -2016,7 +3257,28 @@ func (c *OpsWorks) StopInstance(input *StopInstanceInput) (*StopInstanceOutput, const opStopStack = "StopStack" -// StopStackRequest generates a request for the StopStack operation. +// StopStackRequest generates a "aws/request.Request" representing the +// client's request for the StopStack operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the StopStack method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the StopStackRequest method. +// req, resp := client.StopStackRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) StopStackRequest(input *StopStackInput) (req *request.Request, output *StopStackOutput) { op := &request.Operation{ Name: opStopStack, @@ -2038,7 +3300,7 @@ func (c *OpsWorks) StopStackRequest(input *StopStackInput) (req *request.Request // Stops a specified stack. // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -2050,7 +3312,28 @@ func (c *OpsWorks) StopStack(input *StopStackInput) (*StopStackOutput, error) { const opUnassignInstance = "UnassignInstance" -// UnassignInstanceRequest generates a request for the UnassignInstance operation. +// UnassignInstanceRequest generates a "aws/request.Request" representing the +// client's request for the UnassignInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UnassignInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UnassignInstanceRequest method. +// req, resp := client.UnassignInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) UnassignInstanceRequest(input *UnassignInstanceInput) (req *request.Request, output *UnassignInstanceOutput) { op := &request.Operation{ Name: opUnassignInstance, @@ -2075,7 +3358,7 @@ func (c *OpsWorks) UnassignInstanceRequest(input *UnassignInstanceInput) (req *r // as needed. You cannot use this action with instances that were created with // AWS OpsWorks. // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -2087,7 +3370,28 @@ func (c *OpsWorks) UnassignInstance(input *UnassignInstanceInput) (*UnassignInst const opUnassignVolume = "UnassignVolume" -// UnassignVolumeRequest generates a request for the UnassignVolume operation. +// UnassignVolumeRequest generates a "aws/request.Request" representing the +// client's request for the UnassignVolume operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UnassignVolume method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UnassignVolumeRequest method. +// req, resp := client.UnassignVolumeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) UnassignVolumeRequest(input *UnassignVolumeInput) (req *request.Request, output *UnassignVolumeOutput) { op := &request.Operation{ Name: opUnassignVolume, @@ -2110,7 +3414,7 @@ func (c *OpsWorks) UnassignVolumeRequest(input *UnassignVolumeInput) (req *reque // Unassigns an assigned Amazon EBS volume. The volume remains registered with // the stack. For more information, see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -2122,7 +3426,28 @@ func (c *OpsWorks) UnassignVolume(input *UnassignVolumeInput) (*UnassignVolumeOu const opUpdateApp = "UpdateApp" -// UpdateAppRequest generates a request for the UpdateApp operation. +// UpdateAppRequest generates a "aws/request.Request" representing the +// client's request for the UpdateApp operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateApp method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateAppRequest method. +// req, resp := client.UpdateAppRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) UpdateAppRequest(input *UpdateAppInput) (req *request.Request, output *UpdateAppOutput) { op := &request.Operation{ Name: opUpdateApp, @@ -2144,7 +3469,7 @@ func (c *OpsWorks) UpdateAppRequest(input *UpdateAppInput) (req *request.Request // Updates a specified app. // -// Required Permissions: To use this action, an IAM user must have a Deploy +// Required Permissions: To use this action, an IAM user must have a Deploy // or Manage permissions level for the stack, or an attached policy that explicitly // grants permissions. For more information on user permissions, see Managing // User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -2156,7 +3481,28 @@ func (c *OpsWorks) UpdateApp(input *UpdateAppInput) (*UpdateAppOutput, error) { const opUpdateElasticIp = "UpdateElasticIp" -// UpdateElasticIpRequest generates a request for the UpdateElasticIp operation. +// UpdateElasticIpRequest generates a "aws/request.Request" representing the +// client's request for the UpdateElasticIp operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateElasticIp method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateElasticIpRequest method. +// req, resp := client.UpdateElasticIpRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) UpdateElasticIpRequest(input *UpdateElasticIpInput) (req *request.Request, output *UpdateElasticIpOutput) { op := &request.Operation{ Name: opUpdateElasticIp, @@ -2179,7 +3525,7 @@ func (c *OpsWorks) UpdateElasticIpRequest(input *UpdateElasticIpInput) (req *req // Updates a registered Elastic IP address's name. For more information, see // Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -2191,7 +3537,28 @@ func (c *OpsWorks) UpdateElasticIp(input *UpdateElasticIpInput) (*UpdateElasticI const opUpdateInstance = "UpdateInstance" -// UpdateInstanceRequest generates a request for the UpdateInstance operation. +// UpdateInstanceRequest generates a "aws/request.Request" representing the +// client's request for the UpdateInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateInstanceRequest method. +// req, resp := client.UpdateInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) UpdateInstanceRequest(input *UpdateInstanceInput) (req *request.Request, output *UpdateInstanceOutput) { op := &request.Operation{ Name: opUpdateInstance, @@ -2213,7 +3580,7 @@ func (c *OpsWorks) UpdateInstanceRequest(input *UpdateInstanceInput) (req *reque // Updates a specified instance. // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -2225,7 +3592,28 @@ func (c *OpsWorks) UpdateInstance(input *UpdateInstanceInput) (*UpdateInstanceOu const opUpdateLayer = "UpdateLayer" -// UpdateLayerRequest generates a request for the UpdateLayer operation. +// UpdateLayerRequest generates a "aws/request.Request" representing the +// client's request for the UpdateLayer operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateLayer method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateLayerRequest method. +// req, resp := client.UpdateLayerRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) UpdateLayerRequest(input *UpdateLayerInput) (req *request.Request, output *UpdateLayerOutput) { op := &request.Operation{ Name: opUpdateLayer, @@ -2247,7 +3635,7 @@ func (c *OpsWorks) UpdateLayerRequest(input *UpdateLayerInput) (req *request.Req // Updates a specified layer. // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -2259,7 +3647,28 @@ func (c *OpsWorks) UpdateLayer(input *UpdateLayerInput) (*UpdateLayerOutput, err const opUpdateMyUserProfile = "UpdateMyUserProfile" -// UpdateMyUserProfileRequest generates a request for the UpdateMyUserProfile operation. +// UpdateMyUserProfileRequest generates a "aws/request.Request" representing the +// client's request for the UpdateMyUserProfile operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateMyUserProfile method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateMyUserProfileRequest method. +// req, resp := client.UpdateMyUserProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) UpdateMyUserProfileRequest(input *UpdateMyUserProfileInput) (req *request.Request, output *UpdateMyUserProfileOutput) { op := &request.Operation{ Name: opUpdateMyUserProfile, @@ -2281,7 +3690,7 @@ func (c *OpsWorks) UpdateMyUserProfileRequest(input *UpdateMyUserProfileInput) ( // Updates a user's SSH public key. // -// Required Permissions: To use this action, an IAM user must have self-management +// Required Permissions: To use this action, an IAM user must have self-management // enabled or an attached policy that explicitly grants permissions. For more // information on user permissions, see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). func (c *OpsWorks) UpdateMyUserProfile(input *UpdateMyUserProfileInput) (*UpdateMyUserProfileOutput, error) { @@ -2292,7 +3701,28 @@ func (c *OpsWorks) UpdateMyUserProfile(input *UpdateMyUserProfileInput) (*Update const opUpdateRdsDbInstance = "UpdateRdsDbInstance" -// UpdateRdsDbInstanceRequest generates a request for the UpdateRdsDbInstance operation. +// UpdateRdsDbInstanceRequest generates a "aws/request.Request" representing the +// client's request for the UpdateRdsDbInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateRdsDbInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateRdsDbInstanceRequest method. +// req, resp := client.UpdateRdsDbInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) UpdateRdsDbInstanceRequest(input *UpdateRdsDbInstanceInput) (req *request.Request, output *UpdateRdsDbInstanceOutput) { op := &request.Operation{ Name: opUpdateRdsDbInstance, @@ -2314,7 +3744,7 @@ func (c *OpsWorks) UpdateRdsDbInstanceRequest(input *UpdateRdsDbInstanceInput) ( // Updates an Amazon RDS instance. // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -2326,7 +3756,28 @@ func (c *OpsWorks) UpdateRdsDbInstance(input *UpdateRdsDbInstanceInput) (*Update const opUpdateStack = "UpdateStack" -// UpdateStackRequest generates a request for the UpdateStack operation. +// UpdateStackRequest generates a "aws/request.Request" representing the +// client's request for the UpdateStack operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateStack method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateStackRequest method. +// req, resp := client.UpdateStackRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) UpdateStackRequest(input *UpdateStackInput) (req *request.Request, output *UpdateStackOutput) { op := &request.Operation{ Name: opUpdateStack, @@ -2348,7 +3799,7 @@ func (c *OpsWorks) UpdateStackRequest(input *UpdateStackInput) (req *request.Req // Updates a specified stack. // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -2360,7 +3811,28 @@ func (c *OpsWorks) UpdateStack(input *UpdateStackInput) (*UpdateStackOutput, err const opUpdateUserProfile = "UpdateUserProfile" -// UpdateUserProfileRequest generates a request for the UpdateUserProfile operation. +// UpdateUserProfileRequest generates a "aws/request.Request" representing the +// client's request for the UpdateUserProfile operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateUserProfile method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateUserProfileRequest method. +// req, resp := client.UpdateUserProfileRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) UpdateUserProfileRequest(input *UpdateUserProfileInput) (req *request.Request, output *UpdateUserProfileOutput) { op := &request.Operation{ Name: opUpdateUserProfile, @@ -2382,7 +3854,7 @@ func (c *OpsWorks) UpdateUserProfileRequest(input *UpdateUserProfileInput) (req // Updates a specified user profile. // -// Required Permissions: To use this action, an IAM user must have an attached +// Required Permissions: To use this action, an IAM user must have an attached // policy that explicitly grants permissions. For more information on user permissions, // see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). func (c *OpsWorks) UpdateUserProfile(input *UpdateUserProfileInput) (*UpdateUserProfileOutput, error) { @@ -2393,7 +3865,28 @@ func (c *OpsWorks) UpdateUserProfile(input *UpdateUserProfileInput) (*UpdateUser const opUpdateVolume = "UpdateVolume" -// UpdateVolumeRequest generates a request for the UpdateVolume operation. +// UpdateVolumeRequest generates a "aws/request.Request" representing the +// client's request for the UpdateVolume operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateVolume method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateVolumeRequest method. +// req, resp := client.UpdateVolumeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *OpsWorks) UpdateVolumeRequest(input *UpdateVolumeInput) (req *request.Request, output *UpdateVolumeOutput) { op := &request.Operation{ Name: opUpdateVolume, @@ -2416,7 +3909,7 @@ func (c *OpsWorks) UpdateVolumeRequest(input *UpdateVolumeInput) (req *request.R // Updates an Amazon EBS volume's name or mount point. For more information, // see Resource Management (http://docs.aws.amazon.com/opsworks/latest/userguide/resources.html). // -// Required Permissions: To use this action, an IAM user must have a Manage +// Required Permissions: To use this action, an IAM user must have a Manage // permissions level for the stack, or an attached policy that explicitly grants // permissions. For more information on user permissions, see Managing User // Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). @@ -2482,9 +3975,9 @@ type App struct { // see Environment Variables (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html#workingapps-creating-environment). // // There is no specific limit on the number of environment variables. However, - // the size of the associated data structure - which includes the variables' - // names, values, and protected flag values - cannot exceed 10 KB (10240 Bytes). - // This limit should accommodate most if not all use cases, but if you do exceed + // the size of the associated data structure - which includes the variable names, + // values, and protected flag values - cannot exceed 10 KB (10240 Bytes). This + // limit should accommodate most if not all use cases, but if you do exceed // it, you will cause an exception (API) with an "Environment: is too large // (maximum is 10KB)" message. Environment []*EnvironmentVariable `type:"list"` @@ -2720,7 +4213,7 @@ type AutoScalingThresholds struct { // takes a list of up to five alarm names, which are case sensitive and must // be in the same region as the stack. // - // To use custom alarms, you must update your service role to allow cloudwatch:DescribeAlarms. + // To use custom alarms, you must update your service role to allow cloudwatch:DescribeAlarms. // You can either have AWS OpsWorks update the role for you when you first use // this feature or you can edit the role manually. For more information, see // Allowing AWS OpsWorks to Act on Your Behalf (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-servicerole.html). @@ -2840,16 +4333,20 @@ type CloneStackInput struct { // The default AWS OpsWorks agent version. You have the following options: // - // Auto-update - Set this parameter to LATEST. AWS OpsWorks automatically + // Auto-update - Set this parameter to LATEST. AWS OpsWorks automatically // installs new agent versions on the stack's instances as soon as they are - // available. Fixed version - Set this parameter to your preferred agent version. - // To update the agent version, you must edit the stack configuration and specify - // a new version. AWS OpsWorks then automatically installs that version on the - // stack's instances. The default setting is LATEST. To specify an agent version, - // you must use the complete version number, not the abbreviated number shown - // on the console. For a list of available agent version numbers, call DescribeAgentVersions. + // available. // - // You can also specify an agent version when you create or update an instance, + // Fixed version - Set this parameter to your preferred agent version. To + // update the agent version, you must edit the stack configuration and specify + // a new version. AWS OpsWorks then automatically installs that version on the + // stack's instances. + // + // The default setting is LATEST. To specify an agent version, you must use + // the complete version number, not the abbreviated number shown on the console. + // For a list of available agent version numbers, call DescribeAgentVersions. + // + // You can also specify an agent version when you create or update an instance, // which overrides the stack's default setting. AgentVersion *string `type:"string"` @@ -2871,7 +4368,7 @@ type CloneStackInput struct { // The configuration manager. When you clone a stack we recommend that you use // the configuration manager to specify the Chef version: 12, 11.10, or 11.4 // for Linux stacks, or 12.2 for Windows stacks. The default value for Linux - // stacks is currently 11.4. + // stacks is currently 12. ConfigurationManager *StackConfigurationManager `type:"structure"` // Contains the information required to retrieve an app or cookbook from a repository. @@ -2902,15 +4399,28 @@ type CloneStackInput struct { // The stack's operating system, which must be set to one of the following. // - // A supported Linux operating system: An Amazon Linux version, such as Amazon - // Linux 2015.03, Red Hat Enterprise Linux 7, Ubuntu 12.04 LTS, or Ubuntu 14.04 - // LTS. Microsoft Windows Server 2012 R2 Base. A custom AMI: Custom. You specify - // the custom AMI you want to use when you create instances. For more information - // on how to use custom AMIs with OpsWorks, see Using Custom AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). - // The default option is the parent stack's operating system. For more information + // A supported Linux operating system: An Amazon Linux version, such as Amazon + // Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. + // + // A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu + // 14.04 LTS, or Ubuntu 12.04 LTS. + // + // CentOS 7 + // + // Red Hat Enterprise Linux 7 + // + // Microsoft Windows Server 2012 R2 Base, Microsoft Windows Server 2012 + // R2 with SQL Server Express, Microsoft Windows Server 2012 R2 with SQL Server + // Standard, or Microsoft Windows Server 2012 R2 with SQL Server Web. + // + // A custom AMI: Custom. You specify the custom AMI you want to use when + // you create instances. For more information on how to use custom AMIs with + // OpsWorks, see Using Custom AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). + // + // The default option is the parent stack's operating system. For more information // on the supported operating systems, see AWS OpsWorks Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). // - // You can specify a different Linux operating system for the cloned stack, + // You can specify a different Linux operating system for the cloned stack, // but you cannot change from Linux to Windows or Windows to Linux. DefaultOs *string `type:"string"` @@ -2941,8 +4451,28 @@ type CloneStackInput struct { // HostnameTheme is set to Layer_Dependent, which creates host names by appending // integers to the layer's short name. The other themes are: // - // Baked_Goods Clouds Europe_Cities Fruits Greek_Deities Legendary_creatures_from_Japan - // Planets_and_Moons Roman_Deities Scottish_Islands US_Cities Wild_Cats + // Baked_Goods + // + // Clouds + // + // Europe_Cities + // + // Fruits + // + // Greek_Deities + // + // Legendary_creatures_from_Japan + // + // Planets_and_Moons + // + // Roman_Deities + // + // Scottish_Islands + // + // US_Cities + // + // Wild_Cats + // // To obtain a generated host name, call GetHostNameSuggestion, which returns // a host name based on the current theme. HostnameTheme *string `type:"string"` @@ -2980,36 +4510,44 @@ type CloneStackInput struct { // you can instead provide your own custom security groups. UseOpsworksSecurityGroups // has the following settings: // - // True - AWS OpsWorks automatically associates the appropriate built-in security - // group with each layer (default setting). You can associate additional security - // groups with a layer after you create it but you cannot delete the built-in - // security group. False - AWS OpsWorks does not associate built-in security - // groups with layers. You must create appropriate Amazon Elastic Compute Cloud - // (Amazon EC2) security groups and associate a security group with each layer - // that you create. However, you can still manually associate a built-in security + // True - AWS OpsWorks automatically associates the appropriate built-in + // security group with each layer (default setting). You can associate additional + // security groups with a layer after you create it but you cannot delete the + // built-in security group. + // + // False - AWS OpsWorks does not associate built-in security groups with + // layers. You must create appropriate Amazon Elastic Compute Cloud (Amazon + // EC2) security groups and associate a security group with each layer that + // you create. However, you can still manually associate a built-in security // group with a layer on creation; custom security groups are required only - // for those layers that need custom settings. For more information, see Create - // a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). + // for those layers that need custom settings. + // + // For more information, see Create a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). UseOpsworksSecurityGroups *bool `type:"boolean"` // The ID of the VPC that the cloned stack is to be launched into. It must be // in the specified region. All instances are launched into this VPC, and you // cannot change the ID later. // - // If your account supports EC2 Classic, the default value is no VPC. If your - // account does not support EC2 Classic, the default value is the default VPC - // for the specified region. If the VPC ID corresponds to a default VPC and - // you have specified either the DefaultAvailabilityZone or the DefaultSubnetId - // parameter only, AWS OpsWorks infers the value of the other parameter. If - // you specify neither parameter, AWS OpsWorks sets these parameters to the - // first valid Availability Zone for the specified region and the corresponding - // default VPC subnet ID, respectively. + // If your account supports EC2 Classic, the default value is no VPC. + // + // If your account does not support EC2 Classic, the default value is the + // default VPC for the specified region. + // + // If the VPC ID corresponds to a default VPC and you have specified either + // the DefaultAvailabilityZone or the DefaultSubnetId parameter only, AWS OpsWorks + // infers the value of the other parameter. If you specify neither parameter, + // AWS OpsWorks sets these parameters to the first valid Availability Zone for + // the specified region and the corresponding default VPC subnet ID, respectively. // // If you specify a nondefault VPC ID, note the following: // - // It must belong to a VPC in your account that is in the specified region. - // You must specify a value for DefaultSubnetId. For more information on how - // to use AWS OpsWorks with a VPC, see Running a Stack in a VPC (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-vpc.html). + // It must belong to a VPC in your account that is in the specified region. + // + // You must specify a value for DefaultSubnetId. + // + // For more information on how to use AWS OpsWorks with a VPC, see Running + // a Stack in a VPC (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-vpc.html). // For more information on default VPC and EC2 Classic, see Supported Platforms // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html). VpcId *string `type:"string"` @@ -3089,13 +4627,36 @@ type Command struct { // The command status: // - // failed successful skipped pending + // failed + // + // successful + // + // skipped + // + // pending Status *string `type:"string"` // The command type: // - // deploy rollback start stop restart undeploy update_dependencies - // install_dependencies update_custom_cookbooks execute_recipes + // deploy + // + // rollback + // + // start + // + // stop + // + // restart + // + // undeploy + // + // update_dependencies + // + // install_dependencies + // + // update_custom_cookbooks + // + // execute_recipes Type *string `type:"string"` } @@ -3136,14 +4697,14 @@ type CreateAppInput struct { // are defined on the associated app server instance. For more information, // see Environment Variables (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html#workingapps-creating-environment). // - // There is no specific limit on the number of environment variables. However, + // There is no specific limit on the number of environment variables. However, // the size of the associated data structure - which includes the variables' // names, values, and protected flag values - cannot exceed 10 KB (10240 Bytes). // This limit should accommodate most if not all use cases. Exceeding it will // cause an exception with the message, "Environment: is too large (maximum // is 10KB)." // - // This parameter is supported only by Chef 11.10 stacks. If you have specified + // This parameter is supported only by Chef 11.10 stacks. If you have specified // one or more environment variables, you cannot modify the stack's Chef version. Environment []*EnvironmentVariable `type:"list"` @@ -3318,20 +4879,23 @@ type CreateInstanceInput struct { // The default AWS OpsWorks agent version. You have the following options: // - // INHERIT - Use the stack's default agent version setting. version_number - // - Use the specified agent version. This value overrides the stack's default - // setting. To update the agent version, edit the instance configuration and - // specify a new version. AWS OpsWorks then automatically installs that version - // on the instance. The default setting is INHERIT. To specify an agent version, - // you must use the complete version number, not the abbreviated number shown - // on the console. For a list of available agent version numbers, call DescribeAgentVersions. + // INHERIT - Use the stack's default agent version setting. + // + // version_number - Use the specified agent version. This value overrides + // the stack's default setting. To update the agent version, edit the instance + // configuration and specify a new version. AWS OpsWorks then automatically + // installs that version on the instance. + // + // The default setting is INHERIT. To specify an agent version, you must + // use the complete version number, not the abbreviated number shown on the + // console. For a list of available agent version numbers, call DescribeAgentVersions. AgentVersion *string `type:"string"` // A custom AMI ID to be used to create the instance. The AMI should be based // on one of the supported operating systems. For more information, see Using // Custom AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). // - // If you specify a custom AMI, you must set Os to Custom. + // If you specify a custom AMI, you must set Os to Custom. AmiId *string `type:"string"` // The instance architecture. The default option is x86_64. Instance types do @@ -3382,11 +4946,25 @@ type CreateInstanceInput struct { // The instance's operating system, which must be set to one of the following. // - // A supported Linux operating system: An Amazon Linux version, such as Amazon - // Linux 2015.03, Red Hat Enterprise Linux 7, Ubuntu 12.04 LTS, or Ubuntu 14.04 - // LTS. Microsoft Windows Server 2012 R2 Base. A custom AMI: Custom. For more - // information on the supported operating systems, see AWS OpsWorks Operating - // Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). + // A supported Linux operating system: An Amazon Linux version, such as Amazon + // Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. + // + // A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu + // 14.04 LTS, or Ubuntu 12.04 LTS. + // + // CentOS 7 + // + // Red Hat Enterprise Linux 7 + // + // A supported Windows operating system, such as Microsoft Windows Server + // 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft + // Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server + // 2012 R2 with SQL Server Web. + // + // A custom AMI: Custom. + // + // For more information on the supported operating systems, see AWS OpsWorks + // Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). // // The default option is the current Amazon Linux version. If you set this // parameter to Custom, you must use the CreateInstance action's AmiId parameter @@ -3418,10 +4996,10 @@ type CreateInstanceInput struct { // Because there are costs associated with changes in tenancy options, we recommend // that you research tenancy options before choosing them for your instances. // For more information about dedicated hosts, see Dedicated Hosts Overview - // (https://aws.amazon.com/ec2/dedicated-hosts/) and Amazon EC2 Dedicated Hosts - // (https://aws.amazon.com/ec2/dedicated-hosts/). For more information about + // (http://aws.amazon.com/ec2/dedicated-hosts/) and Amazon EC2 Dedicated Hosts + // (http://aws.amazon.com/ec2/dedicated-hosts/). For more information about // dedicated instances, see Dedicated Instances (http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/dedicated-instance.html) - // and Amazon EC2 Dedicated Instances (https://aws.amazon.com/ec2/purchasing-options/dedicated-instances/). + // and Amazon EC2 Dedicated Instances (http://aws.amazon.com/ec2/purchasing-options/dedicated-instances/). Tenancy *string `type:"string"` // The instance's virtualization type, paravirtual or hvm. @@ -3547,7 +5125,8 @@ type CreateLayerInput struct { StackId *string `type:"string" required:"true"` // The layer type. A stack cannot have more than one built-in layer of the same - // type. It can have any number of custom layers. + // type. It can have any number of custom layers. Built-in layers are not available + // in Chef 12 stacks. Type *string `type:"string" required:"true" enum:"LayerType"` // Whether to use Amazon EBS-optimized instances. @@ -3622,17 +5201,21 @@ type CreateStackInput struct { // The default AWS OpsWorks agent version. You have the following options: // - // Auto-update - Set this parameter to LATEST. AWS OpsWorks automatically + // Auto-update - Set this parameter to LATEST. AWS OpsWorks automatically // installs new agent versions on the stack's instances as soon as they are - // available. Fixed version - Set this parameter to your preferred agent version. - // To update the agent version, you must edit the stack configuration and specify - // a new version. AWS OpsWorks then automatically installs that version on the - // stack's instances. The default setting is the most recent release of the - // agent. To specify an agent version, you must use the complete version number, - // not the abbreviated number shown on the console. For a list of available - // agent version numbers, call DescribeAgentVersions. + // available. // - // You can also specify an agent version when you create or update an instance, + // Fixed version - Set this parameter to your preferred agent version. To + // update the agent version, you must edit the stack configuration and specify + // a new version. AWS OpsWorks then automatically installs that version on the + // stack's instances. + // + // The default setting is the most recent release of the agent. To specify + // an agent version, you must use the complete version number, not the abbreviated + // number shown on the console. For a list of available agent version numbers, + // call DescribeAgentVersions. + // + // You can also specify an agent version when you create or update an instance, // which overrides the stack's default setting. AgentVersion *string `type:"string"` @@ -3681,12 +5264,25 @@ type CreateStackInput struct { // unless you specify a different operating system when you create the instance. // You can specify one of the following. // - // A supported Linux operating system: An Amazon Linux version, such as Amazon - // Linux 2015.03, Red Hat Enterprise Linux 7, Ubuntu 12.04 LTS, or Ubuntu 14.04 - // LTS. Microsoft Windows Server 2012 R2 Base. A custom AMI: Custom. You specify - // the custom AMI you want to use when you create instances. For more information, - // see Using Custom AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). - // The default option is the current Amazon Linux version. For more information + // A supported Linux operating system: An Amazon Linux version, such as Amazon + // Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. + // + // A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu + // 14.04 LTS, or Ubuntu 12.04 LTS. + // + // CentOS 7 + // + // Red Hat Enterprise Linux 7 + // + // A supported Windows operating system, such as Microsoft Windows Server + // 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft + // Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server + // 2012 R2 with SQL Server Web. + // + // A custom AMI: Custom. You specify the custom AMI you want to use when + // you create instances. For more information, see Using Custom AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). + // + // The default option is the current Amazon Linux version. For more information // on the supported operating systems, see AWS OpsWorks Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). DefaultOs *string `type:"string"` @@ -3718,8 +5314,28 @@ type CreateStackInput struct { // is set to Layer_Dependent, which creates host names by appending integers // to the layer's short name. The other themes are: // - // Baked_Goods Clouds Europe_Cities Fruits Greek_Deities Legendary_creatures_from_Japan - // Planets_and_Moons Roman_Deities Scottish_Islands US_Cities Wild_Cats + // Baked_Goods + // + // Clouds + // + // Europe_Cities + // + // Fruits + // + // Greek_Deities + // + // Legendary_creatures_from_Japan + // + // Planets_and_Moons + // + // Roman_Deities + // + // Scottish_Islands + // + // US_Cities + // + // Wild_Cats + // // To obtain a generated host name, call GetHostNameSuggestion, which returns // a host name based on the current theme. HostnameTheme *string `type:"string"` @@ -3748,14 +5364,17 @@ type CreateStackInput struct { // you can instead provide your own custom security groups. UseOpsworksSecurityGroups // has the following settings: // - // True - AWS OpsWorks automatically associates the appropriate built-in security - // group with each layer (default setting). You can associate additional security - // groups with a layer after you create it, but you cannot delete the built-in - // security group. False - AWS OpsWorks does not associate built-in security - // groups with layers. You must create appropriate EC2 security groups and associate - // a security group with each layer that you create. However, you can still - // manually associate a built-in security group with a layer on creation; custom - // security groups are required only for those layers that need custom settings. + // True - AWS OpsWorks automatically associates the appropriate built-in + // security group with each layer (default setting). You can associate additional + // security groups with a layer after you create it, but you cannot delete the + // built-in security group. + // + // False - AWS OpsWorks does not associate built-in security groups with + // layers. You must create appropriate EC2 security groups and associate a security + // group with each layer that you create. However, you can still manually associate + // a built-in security group with a layer on creation; custom security groups + // are required only for those layers that need custom settings. + // // For more information, see Create a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). UseOpsworksSecurityGroups *bool `type:"boolean"` @@ -3763,20 +5382,25 @@ type CreateStackInput struct { // in the stack's region. All instances are launched into this VPC. You cannot // change the ID later. // - // If your account supports EC2-Classic, the default value is no VPC. If your - // account does not support EC2-Classic, the default value is the default VPC - // for the specified region. If the VPC ID corresponds to a default VPC and - // you have specified either the DefaultAvailabilityZone or the DefaultSubnetId - // parameter only, AWS OpsWorks infers the value of the other parameter. If - // you specify neither parameter, AWS OpsWorks sets these parameters to the - // first valid Availability Zone for the specified region and the corresponding - // default VPC subnet ID, respectively. + // If your account supports EC2-Classic, the default value is no VPC. + // + // If your account does not support EC2-Classic, the default value is the + // default VPC for the specified region. + // + // If the VPC ID corresponds to a default VPC and you have specified either + // the DefaultAvailabilityZone or the DefaultSubnetId parameter only, AWS OpsWorks + // infers the value of the other parameter. If you specify neither parameter, + // AWS OpsWorks sets these parameters to the first valid Availability Zone for + // the specified region and the corresponding default VPC subnet ID, respectively. // // If you specify a nondefault VPC ID, note the following: // - // It must belong to a VPC in your account that is in the specified region. - // You must specify a value for DefaultSubnetId. For more information on how - // to use AWS OpsWorks with a VPC, see Running a Stack in a VPC (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-vpc.html). + // It must belong to a VPC in your account that is in the specified region. + // + // You must specify a value for DefaultSubnetId. + // + // For more information on how to use AWS OpsWorks with a VPC, see Running + // a Stack in a VPC (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-vpc.html). // For more information on default VPC and EC2-Classic, see Supported Platforms // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html). VpcId *string `type:"string"` @@ -4193,7 +5817,11 @@ type Deployment struct { // The deployment status: // - // running successful failed + // running + // + // successful + // + // failed Status *string `type:"string"` } @@ -4219,39 +5847,55 @@ type DeploymentCommand struct { // // The update_dependencies command takes two arguments: // - // upgrade_os_to - Specifies the desired Amazon Linux version for instances + // upgrade_os_to - Specifies the desired Amazon Linux version for instances // whose OS you want to upgrade, such as Amazon Linux 2014.09. You must also - // set the allow_reboot argument to true. allow_reboot - Specifies whether to - // allow AWS OpsWorks to reboot the instances if necessary, after installing - // the updates. This argument can be set to either true or false. The default - // value is false. For example, to upgrade an instance to Amazon Linux 2014.09, - // set Args to the following. + // set the allow_reboot argument to true. // - // { "upgrade_os_to":["Amazon Linux 2014.09"], "allow_reboot":["true"] } + // allow_reboot - Specifies whether to allow AWS OpsWorks to reboot the + // instances if necessary, after installing the updates. This argument can be + // set to either true or false. The default value is false. + // + // For example, to upgrade an instance to Amazon Linux 2014.09, set Args + // to the following. + // + // { "upgrade_os_to":["Amazon Linux 2014.09"], "allow_reboot":["true"] } Args map[string][]*string `type:"map"` // Specifies the operation. You can specify only one command. // // For stacks, the following commands are available: // - // execute_recipes: Execute one or more recipes. To specify the recipes, set - // an Args parameter named recipes to the list of recipes to be executed. For - // example, to execute phpapp::appsetup, set Args to {"recipes":["phpapp::appsetup"]}. - // install_dependencies: Install the stack's dependencies. update_custom_cookbooks: - // Update the stack's custom cookbooks. update_dependencies: Update the stack's - // dependencies. The update_dependencies and install_dependencies commands - // are supported only for Linux instances. You can run the commands successfully - // on Windows instances, but they do nothing. For apps, the following commands - // are available: + // execute_recipes: Execute one or more recipes. To specify the recipes, + // set an Args parameter named recipes to the list of recipes to be executed. + // For example, to execute phpapp::appsetup, set Args to {"recipes":["phpapp::appsetup"]}. // - // deploy: Deploy an app. Ruby on Rails apps have an optional Args parameter + // install_dependencies: Install the stack's dependencies. + // + // update_custom_cookbooks: Update the stack's custom cookbooks. + // + // update_dependencies: Update the stack's dependencies. + // + // The update_dependencies and install_dependencies commands are supported + // only for Linux instances. You can run the commands successfully on Windows + // instances, but they do nothing. + // + // For apps, the following commands are available: + // + // deploy: Deploy an app. Ruby on Rails apps have an optional Args parameter // named migrate. Set Args to {"migrate":["true"]} to migrate the database. - // The default setting is {"migrate":["false"]}. rollback Roll the app back - // to the previous version. When you update an app, AWS OpsWorks stores the - // previous version, up to a maximum of five versions. You can use this command - // to roll an app back as many as four versions. start: Start the app's web - // or application server. stop: Stop the app's web or application server. restart: - // Restart the app's web or application server. undeploy: Undeploy the app. + // The default setting is {"migrate":["false"]}. + // + // rollback Roll the app back to the previous version. When you update an + // app, AWS OpsWorks stores the previous version, up to a maximum of five versions. + // You can use this command to roll an app back as many as four versions. + // + // start: Start the app's web or application server. + // + // stop: Stop the app's web or application server. + // + // restart: Restart the app's web or application server. + // + // undeploy: Undeploy the app. Name *string `type:"string" required:"true" enum:"DeploymentCommandName"` } @@ -5008,12 +6652,15 @@ type DescribePermissionsOutput struct { // An array of Permission objects that describe the stack permissions. // - // If the request object contains only a stack ID, the array contains a Permission - // object with permissions for each of the stack IAM ARNs. If the request object - // contains only an IAM ARN, the array contains a Permission object with permissions - // for each of the user's stack IDs. If the request contains a stack ID and - // an IAM ARN, the array contains a single Permission object with permissions - // for the specified stack and IAM ARN. + // If the request object contains only a stack ID, the array contains a Permission + // object with permissions for each of the stack IAM ARNs. + // + // If the request object contains only an IAM ARN, the array contains a Permission + // object with permissions for each of the user's stack IDs. + // + // If the request contains a stack ID and an IAM ARN, the array contains + // a single Permission object with permissions for the specified stack and IAM + // ARN. Permissions []*Permission `type:"list"` } @@ -5948,9 +7595,35 @@ type Instance struct { // The instance status: // - // booting connection_lost online pending rebooting requested - // running_setup setup_failed shutting_down start_failed stopped - // stopping terminated terminating + // booting + // + // connection_lost + // + // online + // + // pending + // + // rebooting + // + // requested + // + // running_setup + // + // setup_failed + // + // shutting_down + // + // start_failed + // + // stop_failed + // + // stopped + // + // stopping + // + // terminated + // + // terminating Status *string `type:"string"` // The instance's subnet ID; applicable only if the stack is running in a VPC. @@ -6236,8 +7909,18 @@ type Permission struct { // The user's permission level, which must be the following: // - // deny show deploy manage iam_only For more information on the - // permissions associated with these levels, see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html) + // deny + // + // show + // + // deploy + // + // manage + // + // iam_only + // + // For more information on the permissions associated with these levels, + // see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html) Level *string `type:"string"` // A stack ID. @@ -6891,8 +8574,18 @@ type SetPermissionInput struct { // The user's permission level, which must be set to one of the following strings. // You cannot set your own permissions level. // - // deny show deploy manage iam_only For more information on the - // permissions associated with these levels, see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). + // deny + // + // show + // + // deploy + // + // manage + // + // iam_only + // + // For more information on the permissions associated with these levels, + // see Managing User Permissions (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-users.html). Level *string `type:"string"` // The stack ID. @@ -7017,9 +8710,13 @@ type Source struct { // When included in a request, the parameter depends on the repository type. // - // For Amazon S3 bundles, set Password to the appropriate IAM secret access - // key. For HTTP bundles and Subversion repositories, set Password to the password. - // For more information on how to safely handle IAM credentials, see . + // For Amazon S3 bundles, set Password to the appropriate IAM secret access + // key. + // + // For HTTP bundles and Subversion repositories, set Password to the password. + // + // For more information on how to safely handle IAM credentials, see http://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html + // (http://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html). // // In responses, AWS OpsWorks returns *****FILTERED***** instead of the actual // value. @@ -7045,8 +8742,10 @@ type Source struct { // This parameter depends on the repository type. // - // For Amazon S3 bundles, set Username to the appropriate IAM access key ID. - // For HTTP bundles, Git repositories, and Subversion repositories, set Username + // For Amazon S3 bundles, set Username to the appropriate IAM access key + // ID. + // + // For HTTP bundles, Git repositories, and Subversion repositories, set Username // to the user name. Username *string `type:"string"` } @@ -7606,14 +9305,14 @@ type UpdateAppInput struct { // are defined on the associated app server instances.For more information, // see Environment Variables (http://docs.aws.amazon.com/opsworks/latest/userguide/workingapps-creating.html#workingapps-creating-environment). // - // There is no specific limit on the number of environment variables. However, + // There is no specific limit on the number of environment variables. However, // the size of the associated data structure - which includes the variables' // names, values, and protected flag values - cannot exceed 10 KB (10240 Bytes). // This limit should accommodate most if not all use cases. Exceeding it will // cause an exception with the message, "Environment: is too large (maximum // is 10KB)." // - // This parameter is supported only by Chef 11.10 stacks. If you have specified + // This parameter is supported only by Chef 11.10 stacks. If you have specified // one or more environment variables, you cannot modify the stack's Chef version. Environment []*EnvironmentVariable `type:"list"` @@ -7731,21 +9430,23 @@ type UpdateInstanceInput struct { // The default AWS OpsWorks agent version. You have the following options: // - // INHERIT - Use the stack's default agent version setting. version_number - // - Use the specified agent version. This value overrides the stack's default - // setting. To update the agent version, you must edit the instance configuration - // and specify a new version. AWS OpsWorks then automatically installs that - // version on the instance. The default setting is INHERIT. To specify an agent - // version, you must use the complete version number, not the abbreviated number - // shown on the console. For a list of available agent version numbers, call - // DescribeAgentVersions. + // INHERIT - Use the stack's default agent version setting. + // + // version_number - Use the specified agent version. This value overrides + // the stack's default setting. To update the agent version, you must edit the + // instance configuration and specify a new version. AWS OpsWorks then automatically + // installs that version on the instance. + // + // The default setting is INHERIT. To specify an agent version, you must + // use the complete version number, not the abbreviated number shown on the + // console. For a list of available agent version numbers, call DescribeAgentVersions. AgentVersion *string `type:"string"` // A custom AMI ID to be used to create the instance. The AMI must be based // on one of the supported operating systems. For more information, see Instances // (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html) // - // If you specify a custom AMI, you must set Os to Custom. + // If you specify a custom AMI, you must set Os to Custom. AmiId *string `type:"string"` // The instance architecture. Instance types do not necessarily support both @@ -7789,11 +9490,25 @@ type UpdateInstanceInput struct { // The instance's operating system, which must be set to one of the following. // - // A supported Linux operating system: An Amazon Linux version, such as Amazon - // Linux 2015.03, Red Hat Enterprise Linux 7, Ubuntu 12.04 LTS, or Ubuntu 14.04 - // LTS. Microsoft Windows Server 2012 R2 Base. A custom AMI: Custom. For more - // information on the supported operating systems, see AWS OpsWorks Operating - // Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). + // A supported Linux operating system: An Amazon Linux version, such as Amazon + // Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. + // + // A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu + // 14.04 LTS, or Ubuntu 12.04 LTS. + // + // CentOS 7 + // + // Red Hat Enterprise Linux 7 + // + // A supported Windows operating system, such as Microsoft Windows Server + // 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft + // Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server + // 2012 R2 with SQL Server Web. + // + // A custom AMI: Custom. + // + // For more information on the supported operating systems, see AWS OpsWorks + // Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). // // The default option is the current Amazon Linux version. If you set this // parameter to Custom, you must use the AmiId parameter to specify the custom @@ -7802,7 +9517,7 @@ type UpdateInstanceInput struct { // For more information on how to use custom AMIs with OpsWorks, see Using Custom // AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). // - // You can specify a different Linux operating system for the updated stack, + // You can specify a different Linux operating system for the updated stack, // but you cannot change from Linux to Windows or Windows to Linux. Os *string `type:"string"` @@ -8051,16 +9766,20 @@ type UpdateStackInput struct { // The default AWS OpsWorks agent version. You have the following options: // - // Auto-update - Set this parameter to LATEST. AWS OpsWorks automatically + // Auto-update - Set this parameter to LATEST. AWS OpsWorks automatically // installs new agent versions on the stack's instances as soon as they are - // available. Fixed version - Set this parameter to your preferred agent version. - // To update the agent version, you must edit the stack configuration and specify - // a new version. AWS OpsWorks then automatically installs that version on the - // stack's instances. The default setting is LATEST. To specify an agent version, - // you must use the complete version number, not the abbreviated number shown - // on the console. For a list of available agent version numbers, call DescribeAgentVersions. + // available. // - // You can also specify an agent version when you create or update an instance, + // Fixed version - Set this parameter to your preferred agent version. To + // update the agent version, you must edit the stack configuration and specify + // a new version. AWS OpsWorks then automatically installs that version on the + // stack's instances. + // + // The default setting is LATEST. To specify an agent version, you must use + // the complete version number, not the abbreviated number shown on the console. + // For a list of available agent version numbers, call DescribeAgentVersions. + // + // You can also specify an agent version when you create or update an instance, // which overrides the stack's default setting. AgentVersion *string `type:"string"` @@ -8107,12 +9826,26 @@ type UpdateStackInput struct { // The stack's operating system, which must be set to one of the following: // - // A supported Linux operating system: An Amazon Linux version, such as Amazon - // Linux 2015.03, Red Hat Enterprise Linux 7, Ubuntu 12.04 LTS, or Ubuntu 14.04 - // LTS. Microsoft Windows Server 2012 R2 Base. A custom AMI: Custom. You specify - // the custom AMI you want to use when you create instances. For more information - // on how to use custom AMIs with OpsWorks, see Using Custom AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). - // The default option is the stack's current operating system. For more information + // A supported Linux operating system: An Amazon Linux version, such as Amazon + // Linux 2016.03, Amazon Linux 2015.09, or Amazon Linux 2015.03. + // + // A supported Ubuntu operating system, such as Ubuntu 16.04 LTS, Ubuntu + // 14.04 LTS, or Ubuntu 12.04 LTS. + // + // CentOS 7 + // + // Red Hat Enterprise Linux 7 + // + // A supported Windows operating system, such as Microsoft Windows Server + // 2012 R2 Base, Microsoft Windows Server 2012 R2 with SQL Server Express, Microsoft + // Windows Server 2012 R2 with SQL Server Standard, or Microsoft Windows Server + // 2012 R2 with SQL Server Web. + // + // A custom AMI: Custom. You specify the custom AMI you want to use when + // you create instances. For more information on how to use custom AMIs with + // OpsWorks, see Using Custom AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). + // + // The default option is the stack's current operating system. For more information // on the supported operating systems, see AWS OpsWorks Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). DefaultOs *string `type:"string"` @@ -8143,8 +9876,28 @@ type UpdateStackInput struct { // HostnameTheme is set to Layer_Dependent, which creates host names by appending // integers to the layer's short name. The other themes are: // - // Baked_Goods Clouds Europe_Cities Fruits Greek_Deities Legendary_creatures_from_Japan - // Planets_and_Moons Roman_Deities Scottish_Islands US_Cities Wild_Cats + // Baked_Goods + // + // Clouds + // + // Europe_Cities + // + // Fruits + // + // Greek_Deities + // + // Legendary_creatures_from_Japan + // + // Planets_and_Moons + // + // Roman_Deities + // + // Scottish_Islands + // + // US_Cities + // + // Wild_Cats + // // To obtain a generated host name, call GetHostNameSuggestion, which returns // a host name based on the current theme. HostnameTheme *string `type:"string"` @@ -8169,15 +9922,18 @@ type UpdateStackInput struct { // allows you to provide your own custom security groups instead of using the // built-in groups. UseOpsworksSecurityGroups has the following settings: // - // True - AWS OpsWorks automatically associates the appropriate built-in security - // group with each layer (default setting). You can associate additional security - // groups with a layer after you create it, but you cannot delete the built-in - // security group. False - AWS OpsWorks does not associate built-in security - // groups with layers. You must create appropriate EC2 security groups and associate - // a security group with each layer that you create. However, you can still - // manually associate a built-in security group with a layer on. Custom security - // groups are required only for those layers that need custom settings. For - // more information, see Create a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). + // True - AWS OpsWorks automatically associates the appropriate built-in + // security group with each layer (default setting). You can associate additional + // security groups with a layer after you create it, but you cannot delete the + // built-in security group. + // + // False - AWS OpsWorks does not associate built-in security groups with + // layers. You must create appropriate EC2 security groups and associate a security + // group with each layer that you create. However, you can still manually associate + // a built-in security group with a layer on. Custom security groups are required + // only for those layers that need custom settings. + // + // For more information, see Create a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). UseOpsworksSecurityGroups *bool `type:"boolean"` } @@ -8434,8 +10190,11 @@ type VolumeConfiguration struct { // The volume type: // - // standard - Magnetic io1 - Provisioned IOPS (SSD) gp2 - General Purpose - // (SSD) + // standard - Magnetic + // + // io1 - Provisioned IOPS (SSD) + // + // gp2 - General Purpose (SSD) VolumeType *string `type:"string"` } @@ -8471,12 +10230,16 @@ func (s *VolumeConfiguration) Validate() error { // Describes a time-based instance's auto scaling schedule. The schedule consists // of a set of key-value pairs. // -// The key is the time period (a UTC hour) and must be an integer from 0 - -// 23. The value indicates whether the instance should be online or offline -// for the specified period, and must be set to "on" or "off" The default setting -// for all time periods is off, so you use the following parameters primarily -// to specify the online periods. You don't have to explicitly specify offline -// periods unless you want to change an online period to an offline period. +// The key is the time period (a UTC hour) and must be an integer from 0 +// - 23. +// +// The value indicates whether the instance should be online or offline for +// the specified period, and must be set to "on" or "off" +// +// The default setting for all time periods is off, so you use the following +// parameters primarily to specify the online periods. You don't have to explicitly +// specify offline periods unless you want to change an online period to an +// offline period. // // The following example specifies that the instance should be online for four // hours, from UTC 1200 - 1600. It will be off for the remainder of the day. diff --git a/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go b/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go index 9b8500d12..de8c77962 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go @@ -7,12 +7,12 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // Welcome to the AWS OpsWorks API Reference. This guide provides descriptions, -// syntax, and usage examples about AWS OpsWorks actions and data types, including +// syntax, and usage examples for AWS OpsWorks actions and data types, including // common parameters and error codes. // // AWS OpsWorks is an application management service that provides an integrated @@ -26,27 +26,38 @@ import ( // Line Interface (CLI) or by using one of the AWS SDKs to implement applications // in your preferred language. For more information, see: // -// AWS CLI (http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html) -// AWS SDK for Java (http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/opsworks/AWSOpsWorksClient.html) -// AWS SDK for .NET (http://docs.aws.amazon.com/sdkfornet/latest/apidocs/html/N_Amazon_OpsWorks.htm) -// AWS SDK for PHP 2 (http://docs.aws.amazon.com/aws-sdk-php-2/latest/class-Aws.OpsWorks.OpsWorksClient.html) -// AWS SDK for Ruby (http://docs.aws.amazon.com/AWSRubySDK/latest/AWS/OpsWorks/Client.html) -// AWS SDK for Node.js (http://aws.amazon.com/documentation/sdkforjavascript/) -// AWS SDK for Python(Boto) (http://docs.pythonboto.org/en/latest/ref/opsworks.html) +// AWS CLI (http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html) +// +// AWS SDK for Java (http://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/services/opsworks/AWSOpsWorksClient.html) +// +// AWS SDK for .NET (http://docs.aws.amazon.com/sdkfornet/latest/apidocs/html/N_Amazon_OpsWorks.htm) +// +// AWS SDK for PHP 2 (http://docs.aws.amazon.com/aws-sdk-php-2/latest/class-Aws.OpsWorks.OpsWorksClient.html) +// +// AWS SDK for Ruby (http://docs.aws.amazon.com/sdkforruby/api/) +// +// AWS SDK for Node.js (http://aws.amazon.com/documentation/sdkforjavascript/) +// +// AWS SDK for Python(Boto) (http://docs.pythonboto.org/en/latest/ref/opsworks.html) +// // Endpoints // -// AWS OpsWorks supports only one endpoint, opsworks.us-east-1.amazonaws.com -// (HTTPS), so you must connect to that endpoint. You can then use the API to -// direct AWS OpsWorks to create stacks in any AWS Region. +// AWS OpsWorks supports two endpoints, opsworks.us-east-1.amazonaws.com and +// opsworks.ap-south-1.amazonaws.com (both HTTPS). You must connect to one of +// those two endpoints. You can then use the API to direct AWS OpsWorks to create +// stacks in any AWS region. Stacks created in all regions except ap-south-1 +// are connected to the us-east-1 regional endpoint; stacks created in ap-south-1 +// are associated with the ap-south-1 regional endpoint, and can only be accessed +// or managed within that endpoint. // // Chef Versions // // When you call CreateStack, CloneStack, or UpdateStack we recommend you use // the ConfigurationManager parameter to specify the Chef version. The recommended -// value for Linux stacks is currently 12 (the default is 11.4). Windows stacks -// use Chef 12.2. For more information, see Chef Versions (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook-chef11.html). +// and default value for Linux stacks is currently 12. Windows stacks use Chef +// 12.2. For more information, see Chef Versions (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook-chef11.html). // -// You can specify Chef 12, 11.10, or 11.4 for your Linux stack. We recommend +// You can specify Chef 12, 11.10, or 11.4 for your Linux stack. We recommend // migrating your existing Linux stacks to Chef 12 as soon as possible. //The service client's operations are safe to be used concurrently. // It is not safe to mutate any of the client's properties though. @@ -96,7 +107,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/opsworks/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/opsworks/waiters.go index 8d07ac87e..9aa531992 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/opsworks/waiters.go +++ b/vendor/github.com/aws/aws-sdk-go/service/opsworks/waiters.go @@ -135,6 +135,71 @@ func (c *OpsWorks) WaitUntilInstanceOnline(input *DescribeInstancesInput) error return w.Wait() } +func (c *OpsWorks) WaitUntilInstanceRegistered(input *DescribeInstancesInput) error { + waiterCfg := waiter.Config{ + Operation: "DescribeInstances", + Delay: 15, + MaxAttempts: 40, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "pathAll", + Argument: "Instances[].Status", + Expected: "registered", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "setup_failed", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "shutting_down", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "stopped", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "stopping", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "terminating", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "terminated", + }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "stop_failed", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} + func (c *OpsWorks) WaitUntilInstanceStopped(input *DescribeInstancesInput) error { waiterCfg := waiter.Config{ Operation: "DescribeInstances", diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/api.go b/vendor/github.com/aws/aws-sdk-go/service/rds/api.go index 12440c395..b62829b19 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/rds/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/rds/api.go @@ -15,7 +15,28 @@ import ( const opAddSourceIdentifierToSubscription = "AddSourceIdentifierToSubscription" -// AddSourceIdentifierToSubscriptionRequest generates a request for the AddSourceIdentifierToSubscription operation. +// AddSourceIdentifierToSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the AddSourceIdentifierToSubscription operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddSourceIdentifierToSubscription method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddSourceIdentifierToSubscriptionRequest method. +// req, resp := client.AddSourceIdentifierToSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) AddSourceIdentifierToSubscriptionRequest(input *AddSourceIdentifierToSubscriptionInput) (req *request.Request, output *AddSourceIdentifierToSubscriptionOutput) { op := &request.Operation{ Name: opAddSourceIdentifierToSubscription, @@ -42,7 +63,28 @@ func (c *RDS) AddSourceIdentifierToSubscription(input *AddSourceIdentifierToSubs const opAddTagsToResource = "AddTagsToResource" -// AddTagsToResourceRequest generates a request for the AddTagsToResource operation. +// AddTagsToResourceRequest generates a "aws/request.Request" representing the +// client's request for the AddTagsToResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddTagsToResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddTagsToResourceRequest method. +// req, resp := client.AddTagsToResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) AddTagsToResourceRequest(input *AddTagsToResourceInput) (req *request.Request, output *AddTagsToResourceOutput) { op := &request.Operation{ Name: opAddTagsToResource, @@ -76,7 +118,28 @@ func (c *RDS) AddTagsToResource(input *AddTagsToResourceInput) (*AddTagsToResour const opApplyPendingMaintenanceAction = "ApplyPendingMaintenanceAction" -// ApplyPendingMaintenanceActionRequest generates a request for the ApplyPendingMaintenanceAction operation. +// ApplyPendingMaintenanceActionRequest generates a "aws/request.Request" representing the +// client's request for the ApplyPendingMaintenanceAction operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ApplyPendingMaintenanceAction method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ApplyPendingMaintenanceActionRequest method. +// req, resp := client.ApplyPendingMaintenanceActionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) ApplyPendingMaintenanceActionRequest(input *ApplyPendingMaintenanceActionInput) (req *request.Request, output *ApplyPendingMaintenanceActionOutput) { op := &request.Operation{ Name: opApplyPendingMaintenanceAction, @@ -104,7 +167,28 @@ func (c *RDS) ApplyPendingMaintenanceAction(input *ApplyPendingMaintenanceAction const opAuthorizeDBSecurityGroupIngress = "AuthorizeDBSecurityGroupIngress" -// AuthorizeDBSecurityGroupIngressRequest generates a request for the AuthorizeDBSecurityGroupIngress operation. +// AuthorizeDBSecurityGroupIngressRequest generates a "aws/request.Request" representing the +// client's request for the AuthorizeDBSecurityGroupIngress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AuthorizeDBSecurityGroupIngress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AuthorizeDBSecurityGroupIngressRequest method. +// req, resp := client.AuthorizeDBSecurityGroupIngressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) AuthorizeDBSecurityGroupIngressRequest(input *AuthorizeDBSecurityGroupIngressInput) (req *request.Request, output *AuthorizeDBSecurityGroupIngressOutput) { op := &request.Operation{ Name: opAuthorizeDBSecurityGroupIngress, @@ -141,9 +225,78 @@ func (c *RDS) AuthorizeDBSecurityGroupIngress(input *AuthorizeDBSecurityGroupIng return out, err } +const opCopyDBClusterParameterGroup = "CopyDBClusterParameterGroup" + +// CopyDBClusterParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the CopyDBClusterParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CopyDBClusterParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CopyDBClusterParameterGroupRequest method. +// req, resp := client.CopyDBClusterParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) CopyDBClusterParameterGroupRequest(input *CopyDBClusterParameterGroupInput) (req *request.Request, output *CopyDBClusterParameterGroupOutput) { + op := &request.Operation{ + Name: opCopyDBClusterParameterGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CopyDBClusterParameterGroupInput{} + } + + req = c.newRequest(op, input, output) + output = &CopyDBClusterParameterGroupOutput{} + req.Data = output + return +} + +// Copies the specified DB cluster parameter group. +func (c *RDS) CopyDBClusterParameterGroup(input *CopyDBClusterParameterGroupInput) (*CopyDBClusterParameterGroupOutput, error) { + req, out := c.CopyDBClusterParameterGroupRequest(input) + err := req.Send() + return out, err +} + const opCopyDBClusterSnapshot = "CopyDBClusterSnapshot" -// CopyDBClusterSnapshotRequest generates a request for the CopyDBClusterSnapshot operation. +// CopyDBClusterSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the CopyDBClusterSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CopyDBClusterSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CopyDBClusterSnapshotRequest method. +// req, resp := client.CopyDBClusterSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) CopyDBClusterSnapshotRequest(input *CopyDBClusterSnapshotInput) (req *request.Request, output *CopyDBClusterSnapshotOutput) { op := &request.Operation{ Name: opCopyDBClusterSnapshot, @@ -172,7 +325,28 @@ func (c *RDS) CopyDBClusterSnapshot(input *CopyDBClusterSnapshotInput) (*CopyDBC const opCopyDBParameterGroup = "CopyDBParameterGroup" -// CopyDBParameterGroupRequest generates a request for the CopyDBParameterGroup operation. +// CopyDBParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the CopyDBParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CopyDBParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CopyDBParameterGroupRequest method. +// req, resp := client.CopyDBParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) CopyDBParameterGroupRequest(input *CopyDBParameterGroupInput) (req *request.Request, output *CopyDBParameterGroupOutput) { op := &request.Operation{ Name: opCopyDBParameterGroup, @@ -199,7 +373,28 @@ func (c *RDS) CopyDBParameterGroup(input *CopyDBParameterGroupInput) (*CopyDBPar const opCopyDBSnapshot = "CopyDBSnapshot" -// CopyDBSnapshotRequest generates a request for the CopyDBSnapshot operation. +// CopyDBSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the CopyDBSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CopyDBSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CopyDBSnapshotRequest method. +// req, resp := client.CopyDBSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) CopyDBSnapshotRequest(input *CopyDBSnapshotInput) (req *request.Request, output *CopyDBSnapshotOutput) { op := &request.Operation{ Name: opCopyDBSnapshot, @@ -230,7 +425,28 @@ func (c *RDS) CopyDBSnapshot(input *CopyDBSnapshotInput) (*CopyDBSnapshotOutput, const opCopyOptionGroup = "CopyOptionGroup" -// CopyOptionGroupRequest generates a request for the CopyOptionGroup operation. +// CopyOptionGroupRequest generates a "aws/request.Request" representing the +// client's request for the CopyOptionGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CopyOptionGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CopyOptionGroupRequest method. +// req, resp := client.CopyOptionGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) CopyOptionGroupRequest(input *CopyOptionGroupInput) (req *request.Request, output *CopyOptionGroupOutput) { op := &request.Operation{ Name: opCopyOptionGroup, @@ -257,7 +473,28 @@ func (c *RDS) CopyOptionGroup(input *CopyOptionGroupInput) (*CopyOptionGroupOutp const opCreateDBCluster = "CreateDBCluster" -// CreateDBClusterRequest generates a request for the CreateDBCluster operation. +// CreateDBClusterRequest generates a "aws/request.Request" representing the +// client's request for the CreateDBCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDBCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDBClusterRequest method. +// req, resp := client.CreateDBClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) CreateDBClusterRequest(input *CreateDBClusterInput) (req *request.Request, output *CreateDBClusterOutput) { op := &request.Operation{ Name: opCreateDBCluster, @@ -275,8 +512,12 @@ func (c *RDS) CreateDBClusterRequest(input *CreateDBClusterInput) (req *request. return } -// Creates a new Amazon Aurora DB cluster. For more information on Amazon Aurora, -// see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// Creates a new Amazon Aurora DB cluster. +// +// You can use the ReplicationSourceIdentifier parameter to create the DB cluster +// as a Read Replica of another DB cluster. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) // in the Amazon RDS User Guide. func (c *RDS) CreateDBCluster(input *CreateDBClusterInput) (*CreateDBClusterOutput, error) { req, out := c.CreateDBClusterRequest(input) @@ -286,7 +527,28 @@ func (c *RDS) CreateDBCluster(input *CreateDBClusterInput) (*CreateDBClusterOutp const opCreateDBClusterParameterGroup = "CreateDBClusterParameterGroup" -// CreateDBClusterParameterGroupRequest generates a request for the CreateDBClusterParameterGroup operation. +// CreateDBClusterParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateDBClusterParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDBClusterParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDBClusterParameterGroupRequest method. +// req, resp := client.CreateDBClusterParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) CreateDBClusterParameterGroupRequest(input *CreateDBClusterParameterGroupInput) (req *request.Request, output *CreateDBClusterParameterGroupOutput) { op := &request.Operation{ Name: opCreateDBClusterParameterGroup, @@ -306,7 +568,7 @@ func (c *RDS) CreateDBClusterParameterGroupRequest(input *CreateDBClusterParamet // Creates a new DB cluster parameter group. // -// Parameters in a DB cluster parameter group apply to all of the instances +// Parameters in a DB cluster parameter group apply to all of the instances // in a DB cluster. // // A DB cluster parameter group is initially created with the default parameters @@ -340,7 +602,28 @@ func (c *RDS) CreateDBClusterParameterGroup(input *CreateDBClusterParameterGroup const opCreateDBClusterSnapshot = "CreateDBClusterSnapshot" -// CreateDBClusterSnapshotRequest generates a request for the CreateDBClusterSnapshot operation. +// CreateDBClusterSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the CreateDBClusterSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDBClusterSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDBClusterSnapshotRequest method. +// req, resp := client.CreateDBClusterSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) CreateDBClusterSnapshotRequest(input *CreateDBClusterSnapshotInput) (req *request.Request, output *CreateDBClusterSnapshotOutput) { op := &request.Operation{ Name: opCreateDBClusterSnapshot, @@ -369,7 +652,28 @@ func (c *RDS) CreateDBClusterSnapshot(input *CreateDBClusterSnapshotInput) (*Cre const opCreateDBInstance = "CreateDBInstance" -// CreateDBInstanceRequest generates a request for the CreateDBInstance operation. +// CreateDBInstanceRequest generates a "aws/request.Request" representing the +// client's request for the CreateDBInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDBInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDBInstanceRequest method. +// req, resp := client.CreateDBInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) CreateDBInstanceRequest(input *CreateDBInstanceInput) (req *request.Request, output *CreateDBInstanceOutput) { op := &request.Operation{ Name: opCreateDBInstance, @@ -396,7 +700,28 @@ func (c *RDS) CreateDBInstance(input *CreateDBInstanceInput) (*CreateDBInstanceO const opCreateDBInstanceReadReplica = "CreateDBInstanceReadReplica" -// CreateDBInstanceReadReplicaRequest generates a request for the CreateDBInstanceReadReplica operation. +// CreateDBInstanceReadReplicaRequest generates a "aws/request.Request" representing the +// client's request for the CreateDBInstanceReadReplica operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDBInstanceReadReplica method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDBInstanceReadReplicaRequest method. +// req, resp := client.CreateDBInstanceReadReplicaRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) CreateDBInstanceReadReplicaRequest(input *CreateDBInstanceReadReplicaInput) (req *request.Request, output *CreateDBInstanceReadReplicaOutput) { op := &request.Operation{ Name: opCreateDBInstanceReadReplica, @@ -417,12 +742,12 @@ func (c *RDS) CreateDBInstanceReadReplicaRequest(input *CreateDBInstanceReadRepl // Creates a DB instance for a DB instance running MySQL, MariaDB, or PostgreSQL // that acts as a Read Replica of a source DB instance. // -// All Read Replica DB instances are created as Single-AZ deployments with +// All Read Replica DB instances are created as Single-AZ deployments with // backups disabled. All other DB instance attributes (including DB security // groups and DB parameter groups) are inherited from the source DB instance, // except as specified below. // -// The source DB instance must have backup retention enabled. +// The source DB instance must have backup retention enabled. func (c *RDS) CreateDBInstanceReadReplica(input *CreateDBInstanceReadReplicaInput) (*CreateDBInstanceReadReplicaOutput, error) { req, out := c.CreateDBInstanceReadReplicaRequest(input) err := req.Send() @@ -431,7 +756,28 @@ func (c *RDS) CreateDBInstanceReadReplica(input *CreateDBInstanceReadReplicaInpu const opCreateDBParameterGroup = "CreateDBParameterGroup" -// CreateDBParameterGroupRequest generates a request for the CreateDBParameterGroup operation. +// CreateDBParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateDBParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDBParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDBParameterGroupRequest method. +// req, resp := client.CreateDBParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) CreateDBParameterGroupRequest(input *CreateDBParameterGroupInput) (req *request.Request, output *CreateDBParameterGroupOutput) { op := &request.Operation{ Name: opCreateDBParameterGroup, @@ -478,7 +824,28 @@ func (c *RDS) CreateDBParameterGroup(input *CreateDBParameterGroupInput) (*Creat const opCreateDBSecurityGroup = "CreateDBSecurityGroup" -// CreateDBSecurityGroupRequest generates a request for the CreateDBSecurityGroup operation. +// CreateDBSecurityGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateDBSecurityGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDBSecurityGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDBSecurityGroupRequest method. +// req, resp := client.CreateDBSecurityGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) CreateDBSecurityGroupRequest(input *CreateDBSecurityGroupInput) (req *request.Request, output *CreateDBSecurityGroupOutput) { op := &request.Operation{ Name: opCreateDBSecurityGroup, @@ -506,7 +873,28 @@ func (c *RDS) CreateDBSecurityGroup(input *CreateDBSecurityGroupInput) (*CreateD const opCreateDBSnapshot = "CreateDBSnapshot" -// CreateDBSnapshotRequest generates a request for the CreateDBSnapshot operation. +// CreateDBSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the CreateDBSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDBSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDBSnapshotRequest method. +// req, resp := client.CreateDBSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) CreateDBSnapshotRequest(input *CreateDBSnapshotInput) (req *request.Request, output *CreateDBSnapshotOutput) { op := &request.Operation{ Name: opCreateDBSnapshot, @@ -533,7 +921,28 @@ func (c *RDS) CreateDBSnapshot(input *CreateDBSnapshotInput) (*CreateDBSnapshotO const opCreateDBSubnetGroup = "CreateDBSubnetGroup" -// CreateDBSubnetGroupRequest generates a request for the CreateDBSubnetGroup operation. +// CreateDBSubnetGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateDBSubnetGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDBSubnetGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDBSubnetGroupRequest method. +// req, resp := client.CreateDBSubnetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) CreateDBSubnetGroupRequest(input *CreateDBSubnetGroupInput) (req *request.Request, output *CreateDBSubnetGroupOutput) { op := &request.Operation{ Name: opCreateDBSubnetGroup, @@ -561,7 +970,28 @@ func (c *RDS) CreateDBSubnetGroup(input *CreateDBSubnetGroupInput) (*CreateDBSub const opCreateEventSubscription = "CreateEventSubscription" -// CreateEventSubscriptionRequest generates a request for the CreateEventSubscription operation. +// CreateEventSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the CreateEventSubscription operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateEventSubscription method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateEventSubscriptionRequest method. +// req, resp := client.CreateEventSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) CreateEventSubscriptionRequest(input *CreateEventSubscriptionInput) (req *request.Request, output *CreateEventSubscriptionOutput) { op := &request.Operation{ Name: opCreateEventSubscription, @@ -606,7 +1036,28 @@ func (c *RDS) CreateEventSubscription(input *CreateEventSubscriptionInput) (*Cre const opCreateOptionGroup = "CreateOptionGroup" -// CreateOptionGroupRequest generates a request for the CreateOptionGroup operation. +// CreateOptionGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateOptionGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateOptionGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateOptionGroupRequest method. +// req, resp := client.CreateOptionGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) CreateOptionGroupRequest(input *CreateOptionGroupInput) (req *request.Request, output *CreateOptionGroupOutput) { op := &request.Operation{ Name: opCreateOptionGroup, @@ -633,7 +1084,28 @@ func (c *RDS) CreateOptionGroup(input *CreateOptionGroupInput) (*CreateOptionGro const opDeleteDBCluster = "DeleteDBCluster" -// DeleteDBClusterRequest generates a request for the DeleteDBCluster operation. +// DeleteDBClusterRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDBCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDBCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDBClusterRequest method. +// req, resp := client.DeleteDBClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DeleteDBClusterRequest(input *DeleteDBClusterInput) (req *request.Request, output *DeleteDBClusterOutput) { op := &request.Operation{ Name: opDeleteDBCluster, @@ -651,13 +1123,12 @@ func (c *RDS) DeleteDBClusterRequest(input *DeleteDBClusterInput) (req *request. return } -// The DeleteDBCluster action deletes a previously provisioned DB cluster. A -// successful response from the web service indicates the request was received -// correctly. When you delete a DB cluster, all automated backups for that DB -// cluster are deleted and cannot be recovered. Manual DB cluster snapshots -// of the DB cluster to be deleted are not deleted. +// The DeleteDBCluster action deletes a previously provisioned DB cluster. When +// you delete a DB cluster, all automated backups for that DB cluster are deleted +// and cannot be recovered. Manual DB cluster snapshots of the specified DB +// cluster are not deleted. // -// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) // in the Amazon RDS User Guide. func (c *RDS) DeleteDBCluster(input *DeleteDBClusterInput) (*DeleteDBClusterOutput, error) { req, out := c.DeleteDBClusterRequest(input) @@ -667,7 +1138,28 @@ func (c *RDS) DeleteDBCluster(input *DeleteDBClusterInput) (*DeleteDBClusterOutp const opDeleteDBClusterParameterGroup = "DeleteDBClusterParameterGroup" -// DeleteDBClusterParameterGroupRequest generates a request for the DeleteDBClusterParameterGroup operation. +// DeleteDBClusterParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDBClusterParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDBClusterParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDBClusterParameterGroupRequest method. +// req, resp := client.DeleteDBClusterParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DeleteDBClusterParameterGroupRequest(input *DeleteDBClusterParameterGroupInput) (req *request.Request, output *DeleteDBClusterParameterGroupOutput) { op := &request.Operation{ Name: opDeleteDBClusterParameterGroup, @@ -700,7 +1192,28 @@ func (c *RDS) DeleteDBClusterParameterGroup(input *DeleteDBClusterParameterGroup const opDeleteDBClusterSnapshot = "DeleteDBClusterSnapshot" -// DeleteDBClusterSnapshotRequest generates a request for the DeleteDBClusterSnapshot operation. +// DeleteDBClusterSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDBClusterSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDBClusterSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDBClusterSnapshotRequest method. +// req, resp := client.DeleteDBClusterSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DeleteDBClusterSnapshotRequest(input *DeleteDBClusterSnapshotInput) (req *request.Request, output *DeleteDBClusterSnapshotOutput) { op := &request.Operation{ Name: opDeleteDBClusterSnapshot, @@ -721,8 +1234,9 @@ func (c *RDS) DeleteDBClusterSnapshotRequest(input *DeleteDBClusterSnapshotInput // Deletes a DB cluster snapshot. If the snapshot is being copied, the copy // operation is terminated. // -// The DB cluster snapshot must be in the available state to be deleted. For -// more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) +// The DB cluster snapshot must be in the available state to be deleted. +// +// For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) // in the Amazon RDS User Guide. func (c *RDS) DeleteDBClusterSnapshot(input *DeleteDBClusterSnapshotInput) (*DeleteDBClusterSnapshotOutput, error) { req, out := c.DeleteDBClusterSnapshotRequest(input) @@ -732,7 +1246,28 @@ func (c *RDS) DeleteDBClusterSnapshot(input *DeleteDBClusterSnapshotInput) (*Del const opDeleteDBInstance = "DeleteDBInstance" -// DeleteDBInstanceRequest generates a request for the DeleteDBInstance operation. +// DeleteDBInstanceRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDBInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDBInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDBInstanceRequest method. +// req, resp := client.DeleteDBInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DeleteDBInstanceRequest(input *DeleteDBInstanceInput) (req *request.Request, output *DeleteDBInstanceOutput) { op := &request.Operation{ Name: opDeleteDBInstance, @@ -751,19 +1286,30 @@ func (c *RDS) DeleteDBInstanceRequest(input *DeleteDBInstanceInput) (req *reques } // The DeleteDBInstance action deletes a previously provisioned DB instance. -// A successful response from the web service indicates the request was received -// correctly. When you delete a DB instance, all automated backups for that -// instance are deleted and cannot be recovered. Manual DB snapshots of the -// DB instance to be deleted are not deleted. +// When you delete a DB instance, all automated backups for that instance are +// deleted and cannot be recovered. Manual DB snapshots of the DB instance to +// be deleted by DeleteDBInstance are not deleted. // -// If a final DB snapshot is requested the status of the RDS instance will -// be "deleting" until the DB snapshot is created. The API action DescribeDBInstance +// If you request a final DB snapshot the status of the Amazon RDS DB instance +// is deleting until the DB snapshot is created. The API action DescribeDBInstance // is used to monitor the status of this operation. The action cannot be canceled // or reverted once submitted. // -// Note that when a DB instance is in a failure state and has a status of 'failed', -// 'incompatible-restore', or 'incompatible-network', it can only be deleted -// when the SkipFinalSnapshot parameter is set to "true". +// Note that when a DB instance is in a failure state and has a status of failed, +// incompatible-restore, or incompatible-network, you can only delete it when +// the SkipFinalSnapshot parameter is set to true. +// +// If the specified DB instance is part of an Amazon Aurora DB cluster, you +// cannot delete the DB instance if the following are true: +// +// The DB cluster is a Read Replica of another Amazon Aurora DB cluster. +// +// The DB instance is the only instance in the DB cluster. +// +// To delete a DB instance in this case, first call the PromoteReadReplicaDBCluster +// API action to promote the DB cluster so it's no longer a Read Replica. After +// the promotion completes, then call the DeleteDBInstance API action to delete +// the final instance in the DB cluster. func (c *RDS) DeleteDBInstance(input *DeleteDBInstanceInput) (*DeleteDBInstanceOutput, error) { req, out := c.DeleteDBInstanceRequest(input) err := req.Send() @@ -772,7 +1318,28 @@ func (c *RDS) DeleteDBInstance(input *DeleteDBInstanceInput) (*DeleteDBInstanceO const opDeleteDBParameterGroup = "DeleteDBParameterGroup" -// DeleteDBParameterGroupRequest generates a request for the DeleteDBParameterGroup operation. +// DeleteDBParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDBParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDBParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDBParameterGroupRequest method. +// req, resp := client.DeleteDBParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DeleteDBParameterGroupRequest(input *DeleteDBParameterGroupInput) (req *request.Request, output *DeleteDBParameterGroupOutput) { op := &request.Operation{ Name: opDeleteDBParameterGroup, @@ -802,7 +1369,28 @@ func (c *RDS) DeleteDBParameterGroup(input *DeleteDBParameterGroupInput) (*Delet const opDeleteDBSecurityGroup = "DeleteDBSecurityGroup" -// DeleteDBSecurityGroupRequest generates a request for the DeleteDBSecurityGroup operation. +// DeleteDBSecurityGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDBSecurityGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDBSecurityGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDBSecurityGroupRequest method. +// req, resp := client.DeleteDBSecurityGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DeleteDBSecurityGroupRequest(input *DeleteDBSecurityGroupInput) (req *request.Request, output *DeleteDBSecurityGroupOutput) { op := &request.Operation{ Name: opDeleteDBSecurityGroup, @@ -824,7 +1412,7 @@ func (c *RDS) DeleteDBSecurityGroupRequest(input *DeleteDBSecurityGroupInput) (r // Deletes a DB security group. // -// The specified DB security group must not be associated with any DB instances. +// The specified DB security group must not be associated with any DB instances. func (c *RDS) DeleteDBSecurityGroup(input *DeleteDBSecurityGroupInput) (*DeleteDBSecurityGroupOutput, error) { req, out := c.DeleteDBSecurityGroupRequest(input) err := req.Send() @@ -833,7 +1421,28 @@ func (c *RDS) DeleteDBSecurityGroup(input *DeleteDBSecurityGroupInput) (*DeleteD const opDeleteDBSnapshot = "DeleteDBSnapshot" -// DeleteDBSnapshotRequest generates a request for the DeleteDBSnapshot operation. +// DeleteDBSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDBSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDBSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDBSnapshotRequest method. +// req, resp := client.DeleteDBSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DeleteDBSnapshotRequest(input *DeleteDBSnapshotInput) (req *request.Request, output *DeleteDBSnapshotOutput) { op := &request.Operation{ Name: opDeleteDBSnapshot, @@ -854,7 +1463,7 @@ func (c *RDS) DeleteDBSnapshotRequest(input *DeleteDBSnapshotInput) (req *reques // Deletes a DBSnapshot. If the snapshot is being copied, the copy operation // is terminated. // -// The DBSnapshot must be in the available state to be deleted. +// The DBSnapshot must be in the available state to be deleted. func (c *RDS) DeleteDBSnapshot(input *DeleteDBSnapshotInput) (*DeleteDBSnapshotOutput, error) { req, out := c.DeleteDBSnapshotRequest(input) err := req.Send() @@ -863,7 +1472,28 @@ func (c *RDS) DeleteDBSnapshot(input *DeleteDBSnapshotInput) (*DeleteDBSnapshotO const opDeleteDBSubnetGroup = "DeleteDBSubnetGroup" -// DeleteDBSubnetGroupRequest generates a request for the DeleteDBSubnetGroup operation. +// DeleteDBSubnetGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteDBSubnetGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteDBSubnetGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteDBSubnetGroupRequest method. +// req, resp := client.DeleteDBSubnetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DeleteDBSubnetGroupRequest(input *DeleteDBSubnetGroupInput) (req *request.Request, output *DeleteDBSubnetGroupOutput) { op := &request.Operation{ Name: opDeleteDBSubnetGroup, @@ -885,7 +1515,8 @@ func (c *RDS) DeleteDBSubnetGroupRequest(input *DeleteDBSubnetGroupInput) (req * // Deletes a DB subnet group. // -// The specified database subnet group must not be associated with any DB instances. +// The specified database subnet group must not be associated with any DB +// instances. func (c *RDS) DeleteDBSubnetGroup(input *DeleteDBSubnetGroupInput) (*DeleteDBSubnetGroupOutput, error) { req, out := c.DeleteDBSubnetGroupRequest(input) err := req.Send() @@ -894,7 +1525,28 @@ func (c *RDS) DeleteDBSubnetGroup(input *DeleteDBSubnetGroupInput) (*DeleteDBSub const opDeleteEventSubscription = "DeleteEventSubscription" -// DeleteEventSubscriptionRequest generates a request for the DeleteEventSubscription operation. +// DeleteEventSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteEventSubscription operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteEventSubscription method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteEventSubscriptionRequest method. +// req, resp := client.DeleteEventSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DeleteEventSubscriptionRequest(input *DeleteEventSubscriptionInput) (req *request.Request, output *DeleteEventSubscriptionOutput) { op := &request.Operation{ Name: opDeleteEventSubscription, @@ -921,7 +1573,28 @@ func (c *RDS) DeleteEventSubscription(input *DeleteEventSubscriptionInput) (*Del const opDeleteOptionGroup = "DeleteOptionGroup" -// DeleteOptionGroupRequest generates a request for the DeleteOptionGroup operation. +// DeleteOptionGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteOptionGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteOptionGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteOptionGroupRequest method. +// req, resp := client.DeleteOptionGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DeleteOptionGroupRequest(input *DeleteOptionGroupInput) (req *request.Request, output *DeleteOptionGroupOutput) { op := &request.Operation{ Name: opDeleteOptionGroup, @@ -950,7 +1623,28 @@ func (c *RDS) DeleteOptionGroup(input *DeleteOptionGroupInput) (*DeleteOptionGro const opDescribeAccountAttributes = "DescribeAccountAttributes" -// DescribeAccountAttributesRequest generates a request for the DescribeAccountAttributes operation. +// DescribeAccountAttributesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeAccountAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeAccountAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeAccountAttributesRequest method. +// req, resp := client.DescribeAccountAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DescribeAccountAttributesRequest(input *DescribeAccountAttributesInput) (req *request.Request, output *DescribeAccountAttributesOutput) { op := &request.Operation{ Name: opDescribeAccountAttributes, @@ -982,7 +1676,28 @@ func (c *RDS) DescribeAccountAttributes(input *DescribeAccountAttributesInput) ( const opDescribeCertificates = "DescribeCertificates" -// DescribeCertificatesRequest generates a request for the DescribeCertificates operation. +// DescribeCertificatesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeCertificates operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeCertificates method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeCertificatesRequest method. +// req, resp := client.DescribeCertificatesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DescribeCertificatesRequest(input *DescribeCertificatesInput) (req *request.Request, output *DescribeCertificatesOutput) { op := &request.Operation{ Name: opDescribeCertificates, @@ -1009,7 +1724,28 @@ func (c *RDS) DescribeCertificates(input *DescribeCertificatesInput) (*DescribeC const opDescribeDBClusterParameterGroups = "DescribeDBClusterParameterGroups" -// DescribeDBClusterParameterGroupsRequest generates a request for the DescribeDBClusterParameterGroups operation. +// DescribeDBClusterParameterGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDBClusterParameterGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDBClusterParameterGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDBClusterParameterGroupsRequest method. +// req, resp := client.DescribeDBClusterParameterGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DescribeDBClusterParameterGroupsRequest(input *DescribeDBClusterParameterGroupsInput) (req *request.Request, output *DescribeDBClusterParameterGroupsOutput) { op := &request.Operation{ Name: opDescribeDBClusterParameterGroups, @@ -1041,7 +1777,28 @@ func (c *RDS) DescribeDBClusterParameterGroups(input *DescribeDBClusterParameter const opDescribeDBClusterParameters = "DescribeDBClusterParameters" -// DescribeDBClusterParametersRequest generates a request for the DescribeDBClusterParameters operation. +// DescribeDBClusterParametersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDBClusterParameters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDBClusterParameters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDBClusterParametersRequest method. +// req, resp := client.DescribeDBClusterParametersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DescribeDBClusterParametersRequest(input *DescribeDBClusterParametersInput) (req *request.Request, output *DescribeDBClusterParametersOutput) { op := &request.Operation{ Name: opDescribeDBClusterParameters, @@ -1070,9 +1827,89 @@ func (c *RDS) DescribeDBClusterParameters(input *DescribeDBClusterParametersInpu return out, err } +const opDescribeDBClusterSnapshotAttributes = "DescribeDBClusterSnapshotAttributes" + +// DescribeDBClusterSnapshotAttributesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDBClusterSnapshotAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDBClusterSnapshotAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDBClusterSnapshotAttributesRequest method. +// req, resp := client.DescribeDBClusterSnapshotAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) DescribeDBClusterSnapshotAttributesRequest(input *DescribeDBClusterSnapshotAttributesInput) (req *request.Request, output *DescribeDBClusterSnapshotAttributesOutput) { + op := &request.Operation{ + Name: opDescribeDBClusterSnapshotAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeDBClusterSnapshotAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeDBClusterSnapshotAttributesOutput{} + req.Data = output + return +} + +// Returns a list of DB cluster snapshot attribute names and values for a manual +// DB cluster snapshot. +// +// When sharing snapshots with other AWS accounts, DescribeDBClusterSnapshotAttributes +// returns the restore attribute and a list of IDs for the AWS accounts that +// are authorized to copy or restore the manual DB cluster snapshot. If all +// is included in the list of values for the restore attribute, then the manual +// DB cluster snapshot is public and can be copied or restored by all AWS accounts. +// +// To add or remove access for an AWS account to copy or restore a manual DB +// cluster snapshot, or to make the manual DB cluster snapshot public or private, +// use the ModifyDBClusterSnapshotAttribute API action. +func (c *RDS) DescribeDBClusterSnapshotAttributes(input *DescribeDBClusterSnapshotAttributesInput) (*DescribeDBClusterSnapshotAttributesOutput, error) { + req, out := c.DescribeDBClusterSnapshotAttributesRequest(input) + err := req.Send() + return out, err +} + const opDescribeDBClusterSnapshots = "DescribeDBClusterSnapshots" -// DescribeDBClusterSnapshotsRequest generates a request for the DescribeDBClusterSnapshots operation. +// DescribeDBClusterSnapshotsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDBClusterSnapshots operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDBClusterSnapshots method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDBClusterSnapshotsRequest method. +// req, resp := client.DescribeDBClusterSnapshotsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DescribeDBClusterSnapshotsRequest(input *DescribeDBClusterSnapshotsInput) (req *request.Request, output *DescribeDBClusterSnapshotsOutput) { op := &request.Operation{ Name: opDescribeDBClusterSnapshots, @@ -1090,7 +1927,8 @@ func (c *RDS) DescribeDBClusterSnapshotsRequest(input *DescribeDBClusterSnapshot return } -// Returns information about DB cluster snapshots. This API supports pagination. +// Returns information about DB cluster snapshots. This API action supports +// pagination. // // For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) // in the Amazon RDS User Guide. @@ -1102,7 +1940,28 @@ func (c *RDS) DescribeDBClusterSnapshots(input *DescribeDBClusterSnapshotsInput) const opDescribeDBClusters = "DescribeDBClusters" -// DescribeDBClustersRequest generates a request for the DescribeDBClusters operation. +// DescribeDBClustersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDBClusters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDBClusters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDBClustersRequest method. +// req, resp := client.DescribeDBClustersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DescribeDBClustersRequest(input *DescribeDBClustersInput) (req *request.Request, output *DescribeDBClustersOutput) { op := &request.Operation{ Name: opDescribeDBClusters, @@ -1133,7 +1992,28 @@ func (c *RDS) DescribeDBClusters(input *DescribeDBClustersInput) (*DescribeDBClu const opDescribeDBEngineVersions = "DescribeDBEngineVersions" -// DescribeDBEngineVersionsRequest generates a request for the DescribeDBEngineVersions operation. +// DescribeDBEngineVersionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDBEngineVersions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDBEngineVersions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDBEngineVersionsRequest method. +// req, resp := client.DescribeDBEngineVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DescribeDBEngineVersionsRequest(input *DescribeDBEngineVersionsInput) (req *request.Request, output *DescribeDBEngineVersionsOutput) { op := &request.Operation{ Name: opDescribeDBEngineVersions, @@ -1164,6 +2044,23 @@ func (c *RDS) DescribeDBEngineVersions(input *DescribeDBEngineVersionsInput) (*D return out, err } +// DescribeDBEngineVersionsPages iterates over the pages of a DescribeDBEngineVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDBEngineVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDBEngineVersions operation. +// pageNum := 0 +// err := client.DescribeDBEngineVersionsPages(params, +// func(page *DescribeDBEngineVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *RDS) DescribeDBEngineVersionsPages(input *DescribeDBEngineVersionsInput, fn func(p *DescribeDBEngineVersionsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeDBEngineVersionsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1174,7 +2071,28 @@ func (c *RDS) DescribeDBEngineVersionsPages(input *DescribeDBEngineVersionsInput const opDescribeDBInstances = "DescribeDBInstances" -// DescribeDBInstancesRequest generates a request for the DescribeDBInstances operation. +// DescribeDBInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDBInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDBInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDBInstancesRequest method. +// req, resp := client.DescribeDBInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DescribeDBInstancesRequest(input *DescribeDBInstancesInput) (req *request.Request, output *DescribeDBInstancesOutput) { op := &request.Operation{ Name: opDescribeDBInstances, @@ -1205,6 +2123,23 @@ func (c *RDS) DescribeDBInstances(input *DescribeDBInstancesInput) (*DescribeDBI return out, err } +// DescribeDBInstancesPages iterates over the pages of a DescribeDBInstances operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDBInstances method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDBInstances operation. +// pageNum := 0 +// err := client.DescribeDBInstancesPages(params, +// func(page *DescribeDBInstancesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *RDS) DescribeDBInstancesPages(input *DescribeDBInstancesInput, fn func(p *DescribeDBInstancesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeDBInstancesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1215,7 +2150,28 @@ func (c *RDS) DescribeDBInstancesPages(input *DescribeDBInstancesInput, fn func( const opDescribeDBLogFiles = "DescribeDBLogFiles" -// DescribeDBLogFilesRequest generates a request for the DescribeDBLogFiles operation. +// DescribeDBLogFilesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDBLogFiles operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDBLogFiles method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDBLogFilesRequest method. +// req, resp := client.DescribeDBLogFilesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DescribeDBLogFilesRequest(input *DescribeDBLogFilesInput) (req *request.Request, output *DescribeDBLogFilesOutput) { op := &request.Operation{ Name: opDescribeDBLogFiles, @@ -1246,6 +2202,23 @@ func (c *RDS) DescribeDBLogFiles(input *DescribeDBLogFilesInput) (*DescribeDBLog return out, err } +// DescribeDBLogFilesPages iterates over the pages of a DescribeDBLogFiles operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDBLogFiles method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDBLogFiles operation. +// pageNum := 0 +// err := client.DescribeDBLogFilesPages(params, +// func(page *DescribeDBLogFilesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *RDS) DescribeDBLogFilesPages(input *DescribeDBLogFilesInput, fn func(p *DescribeDBLogFilesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeDBLogFilesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1256,7 +2229,28 @@ func (c *RDS) DescribeDBLogFilesPages(input *DescribeDBLogFilesInput, fn func(p const opDescribeDBParameterGroups = "DescribeDBParameterGroups" -// DescribeDBParameterGroupsRequest generates a request for the DescribeDBParameterGroups operation. +// DescribeDBParameterGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDBParameterGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDBParameterGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDBParameterGroupsRequest method. +// req, resp := client.DescribeDBParameterGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DescribeDBParameterGroupsRequest(input *DescribeDBParameterGroupsInput) (req *request.Request, output *DescribeDBParameterGroupsOutput) { op := &request.Operation{ Name: opDescribeDBParameterGroups, @@ -1289,6 +2283,23 @@ func (c *RDS) DescribeDBParameterGroups(input *DescribeDBParameterGroupsInput) ( return out, err } +// DescribeDBParameterGroupsPages iterates over the pages of a DescribeDBParameterGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDBParameterGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDBParameterGroups operation. +// pageNum := 0 +// err := client.DescribeDBParameterGroupsPages(params, +// func(page *DescribeDBParameterGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *RDS) DescribeDBParameterGroupsPages(input *DescribeDBParameterGroupsInput, fn func(p *DescribeDBParameterGroupsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeDBParameterGroupsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1299,7 +2310,28 @@ func (c *RDS) DescribeDBParameterGroupsPages(input *DescribeDBParameterGroupsInp const opDescribeDBParameters = "DescribeDBParameters" -// DescribeDBParametersRequest generates a request for the DescribeDBParameters operation. +// DescribeDBParametersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDBParameters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDBParameters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDBParametersRequest method. +// req, resp := client.DescribeDBParametersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DescribeDBParametersRequest(input *DescribeDBParametersInput) (req *request.Request, output *DescribeDBParametersOutput) { op := &request.Operation{ Name: opDescribeDBParameters, @@ -1330,6 +2362,23 @@ func (c *RDS) DescribeDBParameters(input *DescribeDBParametersInput) (*DescribeD return out, err } +// DescribeDBParametersPages iterates over the pages of a DescribeDBParameters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDBParameters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDBParameters operation. +// pageNum := 0 +// err := client.DescribeDBParametersPages(params, +// func(page *DescribeDBParametersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *RDS) DescribeDBParametersPages(input *DescribeDBParametersInput, fn func(p *DescribeDBParametersOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeDBParametersRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1340,7 +2389,28 @@ func (c *RDS) DescribeDBParametersPages(input *DescribeDBParametersInput, fn fun const opDescribeDBSecurityGroups = "DescribeDBSecurityGroups" -// DescribeDBSecurityGroupsRequest generates a request for the DescribeDBSecurityGroups operation. +// DescribeDBSecurityGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDBSecurityGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDBSecurityGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDBSecurityGroupsRequest method. +// req, resp := client.DescribeDBSecurityGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DescribeDBSecurityGroupsRequest(input *DescribeDBSecurityGroupsInput) (req *request.Request, output *DescribeDBSecurityGroupsOutput) { op := &request.Operation{ Name: opDescribeDBSecurityGroups, @@ -1373,6 +2443,23 @@ func (c *RDS) DescribeDBSecurityGroups(input *DescribeDBSecurityGroupsInput) (*D return out, err } +// DescribeDBSecurityGroupsPages iterates over the pages of a DescribeDBSecurityGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDBSecurityGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDBSecurityGroups operation. +// pageNum := 0 +// err := client.DescribeDBSecurityGroupsPages(params, +// func(page *DescribeDBSecurityGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *RDS) DescribeDBSecurityGroupsPages(input *DescribeDBSecurityGroupsInput, fn func(p *DescribeDBSecurityGroupsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeDBSecurityGroupsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1383,7 +2470,28 @@ func (c *RDS) DescribeDBSecurityGroupsPages(input *DescribeDBSecurityGroupsInput const opDescribeDBSnapshotAttributes = "DescribeDBSnapshotAttributes" -// DescribeDBSnapshotAttributesRequest generates a request for the DescribeDBSnapshotAttributes operation. +// DescribeDBSnapshotAttributesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDBSnapshotAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDBSnapshotAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDBSnapshotAttributesRequest method. +// req, resp := client.DescribeDBSnapshotAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DescribeDBSnapshotAttributesRequest(input *DescribeDBSnapshotAttributesInput) (req *request.Request, output *DescribeDBSnapshotAttributesOutput) { op := &request.Operation{ Name: opDescribeDBSnapshotAttributes, @@ -1405,14 +2513,14 @@ func (c *RDS) DescribeDBSnapshotAttributesRequest(input *DescribeDBSnapshotAttri // snapshot. // // When sharing snapshots with other AWS accounts, DescribeDBSnapshotAttributes -// returns the restore attribute and a list of the AWS account ids that are -// authorized to copy or restore the manual DB snapshot. If all is included +// returns the restore attribute and a list of IDs for the AWS accounts that +// are authorized to copy or restore the manual DB snapshot. If all is included // in the list of values for the restore attribute, then the manual DB snapshot // is public and can be copied or restored by all AWS accounts. // // To add or remove access for an AWS account to copy or restore a manual DB // snapshot, or to make the manual DB snapshot public or private, use the ModifyDBSnapshotAttribute -// API. +// API action. func (c *RDS) DescribeDBSnapshotAttributes(input *DescribeDBSnapshotAttributesInput) (*DescribeDBSnapshotAttributesOutput, error) { req, out := c.DescribeDBSnapshotAttributesRequest(input) err := req.Send() @@ -1421,7 +2529,28 @@ func (c *RDS) DescribeDBSnapshotAttributes(input *DescribeDBSnapshotAttributesIn const opDescribeDBSnapshots = "DescribeDBSnapshots" -// DescribeDBSnapshotsRequest generates a request for the DescribeDBSnapshots operation. +// DescribeDBSnapshotsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDBSnapshots operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDBSnapshots method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDBSnapshotsRequest method. +// req, resp := client.DescribeDBSnapshotsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DescribeDBSnapshotsRequest(input *DescribeDBSnapshotsInput) (req *request.Request, output *DescribeDBSnapshotsOutput) { op := &request.Operation{ Name: opDescribeDBSnapshots, @@ -1445,13 +2574,30 @@ func (c *RDS) DescribeDBSnapshotsRequest(input *DescribeDBSnapshotsInput) (req * return } -// Returns information about DB snapshots. This API supports pagination. +// Returns information about DB snapshots. This API action supports pagination. func (c *RDS) DescribeDBSnapshots(input *DescribeDBSnapshotsInput) (*DescribeDBSnapshotsOutput, error) { req, out := c.DescribeDBSnapshotsRequest(input) err := req.Send() return out, err } +// DescribeDBSnapshotsPages iterates over the pages of a DescribeDBSnapshots operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDBSnapshots method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDBSnapshots operation. +// pageNum := 0 +// err := client.DescribeDBSnapshotsPages(params, +// func(page *DescribeDBSnapshotsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *RDS) DescribeDBSnapshotsPages(input *DescribeDBSnapshotsInput, fn func(p *DescribeDBSnapshotsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeDBSnapshotsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1462,7 +2608,28 @@ func (c *RDS) DescribeDBSnapshotsPages(input *DescribeDBSnapshotsInput, fn func( const opDescribeDBSubnetGroups = "DescribeDBSubnetGroups" -// DescribeDBSubnetGroupsRequest generates a request for the DescribeDBSubnetGroups operation. +// DescribeDBSubnetGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDBSubnetGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDBSubnetGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDBSubnetGroupsRequest method. +// req, resp := client.DescribeDBSubnetGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DescribeDBSubnetGroupsRequest(input *DescribeDBSubnetGroupsInput) (req *request.Request, output *DescribeDBSubnetGroupsOutput) { op := &request.Operation{ Name: opDescribeDBSubnetGroups, @@ -1496,6 +2663,23 @@ func (c *RDS) DescribeDBSubnetGroups(input *DescribeDBSubnetGroupsInput) (*Descr return out, err } +// DescribeDBSubnetGroupsPages iterates over the pages of a DescribeDBSubnetGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDBSubnetGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDBSubnetGroups operation. +// pageNum := 0 +// err := client.DescribeDBSubnetGroupsPages(params, +// func(page *DescribeDBSubnetGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *RDS) DescribeDBSubnetGroupsPages(input *DescribeDBSubnetGroupsInput, fn func(p *DescribeDBSubnetGroupsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeDBSubnetGroupsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1506,7 +2690,28 @@ func (c *RDS) DescribeDBSubnetGroupsPages(input *DescribeDBSubnetGroupsInput, fn const opDescribeEngineDefaultClusterParameters = "DescribeEngineDefaultClusterParameters" -// DescribeEngineDefaultClusterParametersRequest generates a request for the DescribeEngineDefaultClusterParameters operation. +// DescribeEngineDefaultClusterParametersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEngineDefaultClusterParameters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEngineDefaultClusterParameters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEngineDefaultClusterParametersRequest method. +// req, resp := client.DescribeEngineDefaultClusterParametersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DescribeEngineDefaultClusterParametersRequest(input *DescribeEngineDefaultClusterParametersInput) (req *request.Request, output *DescribeEngineDefaultClusterParametersOutput) { op := &request.Operation{ Name: opDescribeEngineDefaultClusterParameters, @@ -1537,7 +2742,28 @@ func (c *RDS) DescribeEngineDefaultClusterParameters(input *DescribeEngineDefaul const opDescribeEngineDefaultParameters = "DescribeEngineDefaultParameters" -// DescribeEngineDefaultParametersRequest generates a request for the DescribeEngineDefaultParameters operation. +// DescribeEngineDefaultParametersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEngineDefaultParameters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEngineDefaultParameters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEngineDefaultParametersRequest method. +// req, resp := client.DescribeEngineDefaultParametersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DescribeEngineDefaultParametersRequest(input *DescribeEngineDefaultParametersInput) (req *request.Request, output *DescribeEngineDefaultParametersOutput) { op := &request.Operation{ Name: opDescribeEngineDefaultParameters, @@ -1569,6 +2795,23 @@ func (c *RDS) DescribeEngineDefaultParameters(input *DescribeEngineDefaultParame return out, err } +// DescribeEngineDefaultParametersPages iterates over the pages of a DescribeEngineDefaultParameters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeEngineDefaultParameters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeEngineDefaultParameters operation. +// pageNum := 0 +// err := client.DescribeEngineDefaultParametersPages(params, +// func(page *DescribeEngineDefaultParametersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *RDS) DescribeEngineDefaultParametersPages(input *DescribeEngineDefaultParametersInput, fn func(p *DescribeEngineDefaultParametersOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeEngineDefaultParametersRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1579,7 +2822,28 @@ func (c *RDS) DescribeEngineDefaultParametersPages(input *DescribeEngineDefaultP const opDescribeEventCategories = "DescribeEventCategories" -// DescribeEventCategoriesRequest generates a request for the DescribeEventCategories operation. +// DescribeEventCategoriesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEventCategories operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEventCategories method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEventCategoriesRequest method. +// req, resp := client.DescribeEventCategoriesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DescribeEventCategoriesRequest(input *DescribeEventCategoriesInput) (req *request.Request, output *DescribeEventCategoriesOutput) { op := &request.Operation{ Name: opDescribeEventCategories, @@ -1609,7 +2873,28 @@ func (c *RDS) DescribeEventCategories(input *DescribeEventCategoriesInput) (*Des const opDescribeEventSubscriptions = "DescribeEventSubscriptions" -// DescribeEventSubscriptionsRequest generates a request for the DescribeEventSubscriptions operation. +// DescribeEventSubscriptionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEventSubscriptions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEventSubscriptions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEventSubscriptionsRequest method. +// req, resp := client.DescribeEventSubscriptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DescribeEventSubscriptionsRequest(input *DescribeEventSubscriptionsInput) (req *request.Request, output *DescribeEventSubscriptionsOutput) { op := &request.Operation{ Name: opDescribeEventSubscriptions, @@ -1644,6 +2929,23 @@ func (c *RDS) DescribeEventSubscriptions(input *DescribeEventSubscriptionsInput) return out, err } +// DescribeEventSubscriptionsPages iterates over the pages of a DescribeEventSubscriptions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeEventSubscriptions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeEventSubscriptions operation. +// pageNum := 0 +// err := client.DescribeEventSubscriptionsPages(params, +// func(page *DescribeEventSubscriptionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *RDS) DescribeEventSubscriptionsPages(input *DescribeEventSubscriptionsInput, fn func(p *DescribeEventSubscriptionsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeEventSubscriptionsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1654,7 +2956,28 @@ func (c *RDS) DescribeEventSubscriptionsPages(input *DescribeEventSubscriptionsI const opDescribeEvents = "DescribeEvents" -// DescribeEventsRequest generates a request for the DescribeEvents operation. +// DescribeEventsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEvents operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEvents method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEventsRequest method. +// req, resp := client.DescribeEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DescribeEventsRequest(input *DescribeEventsInput) (req *request.Request, output *DescribeEventsOutput) { op := &request.Operation{ Name: opDescribeEvents, @@ -1689,6 +3012,23 @@ func (c *RDS) DescribeEvents(input *DescribeEventsInput) (*DescribeEventsOutput, return out, err } +// DescribeEventsPages iterates over the pages of a DescribeEvents operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeEvents method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeEvents operation. +// pageNum := 0 +// err := client.DescribeEventsPages(params, +// func(page *DescribeEventsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *RDS) DescribeEventsPages(input *DescribeEventsInput, fn func(p *DescribeEventsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeEventsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1699,7 +3039,28 @@ func (c *RDS) DescribeEventsPages(input *DescribeEventsInput, fn func(p *Describ const opDescribeOptionGroupOptions = "DescribeOptionGroupOptions" -// DescribeOptionGroupOptionsRequest generates a request for the DescribeOptionGroupOptions operation. +// DescribeOptionGroupOptionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeOptionGroupOptions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeOptionGroupOptions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeOptionGroupOptionsRequest method. +// req, resp := client.DescribeOptionGroupOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DescribeOptionGroupOptionsRequest(input *DescribeOptionGroupOptionsInput) (req *request.Request, output *DescribeOptionGroupOptionsOutput) { op := &request.Operation{ Name: opDescribeOptionGroupOptions, @@ -1730,6 +3091,23 @@ func (c *RDS) DescribeOptionGroupOptions(input *DescribeOptionGroupOptionsInput) return out, err } +// DescribeOptionGroupOptionsPages iterates over the pages of a DescribeOptionGroupOptions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeOptionGroupOptions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeOptionGroupOptions operation. +// pageNum := 0 +// err := client.DescribeOptionGroupOptionsPages(params, +// func(page *DescribeOptionGroupOptionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *RDS) DescribeOptionGroupOptionsPages(input *DescribeOptionGroupOptionsInput, fn func(p *DescribeOptionGroupOptionsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeOptionGroupOptionsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1740,7 +3118,28 @@ func (c *RDS) DescribeOptionGroupOptionsPages(input *DescribeOptionGroupOptionsI const opDescribeOptionGroups = "DescribeOptionGroups" -// DescribeOptionGroupsRequest generates a request for the DescribeOptionGroups operation. +// DescribeOptionGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeOptionGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeOptionGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeOptionGroupsRequest method. +// req, resp := client.DescribeOptionGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DescribeOptionGroupsRequest(input *DescribeOptionGroupsInput) (req *request.Request, output *DescribeOptionGroupsOutput) { op := &request.Operation{ Name: opDescribeOptionGroups, @@ -1771,6 +3170,23 @@ func (c *RDS) DescribeOptionGroups(input *DescribeOptionGroupsInput) (*DescribeO return out, err } +// DescribeOptionGroupsPages iterates over the pages of a DescribeOptionGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeOptionGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeOptionGroups operation. +// pageNum := 0 +// err := client.DescribeOptionGroupsPages(params, +// func(page *DescribeOptionGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *RDS) DescribeOptionGroupsPages(input *DescribeOptionGroupsInput, fn func(p *DescribeOptionGroupsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeOptionGroupsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1781,7 +3197,28 @@ func (c *RDS) DescribeOptionGroupsPages(input *DescribeOptionGroupsInput, fn fun const opDescribeOrderableDBInstanceOptions = "DescribeOrderableDBInstanceOptions" -// DescribeOrderableDBInstanceOptionsRequest generates a request for the DescribeOrderableDBInstanceOptions operation. +// DescribeOrderableDBInstanceOptionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeOrderableDBInstanceOptions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeOrderableDBInstanceOptions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeOrderableDBInstanceOptionsRequest method. +// req, resp := client.DescribeOrderableDBInstanceOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DescribeOrderableDBInstanceOptionsRequest(input *DescribeOrderableDBInstanceOptionsInput) (req *request.Request, output *DescribeOrderableDBInstanceOptionsOutput) { op := &request.Operation{ Name: opDescribeOrderableDBInstanceOptions, @@ -1812,6 +3249,23 @@ func (c *RDS) DescribeOrderableDBInstanceOptions(input *DescribeOrderableDBInsta return out, err } +// DescribeOrderableDBInstanceOptionsPages iterates over the pages of a DescribeOrderableDBInstanceOptions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeOrderableDBInstanceOptions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeOrderableDBInstanceOptions operation. +// pageNum := 0 +// err := client.DescribeOrderableDBInstanceOptionsPages(params, +// func(page *DescribeOrderableDBInstanceOptionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *RDS) DescribeOrderableDBInstanceOptionsPages(input *DescribeOrderableDBInstanceOptionsInput, fn func(p *DescribeOrderableDBInstanceOptionsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeOrderableDBInstanceOptionsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1822,7 +3276,28 @@ func (c *RDS) DescribeOrderableDBInstanceOptionsPages(input *DescribeOrderableDB const opDescribePendingMaintenanceActions = "DescribePendingMaintenanceActions" -// DescribePendingMaintenanceActionsRequest generates a request for the DescribePendingMaintenanceActions operation. +// DescribePendingMaintenanceActionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribePendingMaintenanceActions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribePendingMaintenanceActions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribePendingMaintenanceActionsRequest method. +// req, resp := client.DescribePendingMaintenanceActionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DescribePendingMaintenanceActionsRequest(input *DescribePendingMaintenanceActionsInput) (req *request.Request, output *DescribePendingMaintenanceActionsOutput) { op := &request.Operation{ Name: opDescribePendingMaintenanceActions, @@ -1850,7 +3325,28 @@ func (c *RDS) DescribePendingMaintenanceActions(input *DescribePendingMaintenanc const opDescribeReservedDBInstances = "DescribeReservedDBInstances" -// DescribeReservedDBInstancesRequest generates a request for the DescribeReservedDBInstances operation. +// DescribeReservedDBInstancesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReservedDBInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeReservedDBInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeReservedDBInstancesRequest method. +// req, resp := client.DescribeReservedDBInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DescribeReservedDBInstancesRequest(input *DescribeReservedDBInstancesInput) (req *request.Request, output *DescribeReservedDBInstancesOutput) { op := &request.Operation{ Name: opDescribeReservedDBInstances, @@ -1882,6 +3378,23 @@ func (c *RDS) DescribeReservedDBInstances(input *DescribeReservedDBInstancesInpu return out, err } +// DescribeReservedDBInstancesPages iterates over the pages of a DescribeReservedDBInstances operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeReservedDBInstances method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeReservedDBInstances operation. +// pageNum := 0 +// err := client.DescribeReservedDBInstancesPages(params, +// func(page *DescribeReservedDBInstancesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *RDS) DescribeReservedDBInstancesPages(input *DescribeReservedDBInstancesInput, fn func(p *DescribeReservedDBInstancesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeReservedDBInstancesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1892,7 +3405,28 @@ func (c *RDS) DescribeReservedDBInstancesPages(input *DescribeReservedDBInstance const opDescribeReservedDBInstancesOfferings = "DescribeReservedDBInstancesOfferings" -// DescribeReservedDBInstancesOfferingsRequest generates a request for the DescribeReservedDBInstancesOfferings operation. +// DescribeReservedDBInstancesOfferingsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReservedDBInstancesOfferings operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeReservedDBInstancesOfferings method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeReservedDBInstancesOfferingsRequest method. +// req, resp := client.DescribeReservedDBInstancesOfferingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DescribeReservedDBInstancesOfferingsRequest(input *DescribeReservedDBInstancesOfferingsInput) (req *request.Request, output *DescribeReservedDBInstancesOfferingsOutput) { op := &request.Operation{ Name: opDescribeReservedDBInstancesOfferings, @@ -1923,6 +3457,23 @@ func (c *RDS) DescribeReservedDBInstancesOfferings(input *DescribeReservedDBInst return out, err } +// DescribeReservedDBInstancesOfferingsPages iterates over the pages of a DescribeReservedDBInstancesOfferings operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeReservedDBInstancesOfferings method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeReservedDBInstancesOfferings operation. +// pageNum := 0 +// err := client.DescribeReservedDBInstancesOfferingsPages(params, +// func(page *DescribeReservedDBInstancesOfferingsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *RDS) DescribeReservedDBInstancesOfferingsPages(input *DescribeReservedDBInstancesOfferingsInput, fn func(p *DescribeReservedDBInstancesOfferingsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeReservedDBInstancesOfferingsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1933,7 +3484,28 @@ func (c *RDS) DescribeReservedDBInstancesOfferingsPages(input *DescribeReservedD const opDownloadDBLogFilePortion = "DownloadDBLogFilePortion" -// DownloadDBLogFilePortionRequest generates a request for the DownloadDBLogFilePortion operation. +// DownloadDBLogFilePortionRequest generates a "aws/request.Request" representing the +// client's request for the DownloadDBLogFilePortion operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DownloadDBLogFilePortion method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DownloadDBLogFilePortionRequest method. +// req, resp := client.DownloadDBLogFilePortionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) DownloadDBLogFilePortionRequest(input *DownloadDBLogFilePortionInput) (req *request.Request, output *DownloadDBLogFilePortionOutput) { op := &request.Operation{ Name: opDownloadDBLogFilePortion, @@ -1964,6 +3536,23 @@ func (c *RDS) DownloadDBLogFilePortion(input *DownloadDBLogFilePortionInput) (*D return out, err } +// DownloadDBLogFilePortionPages iterates over the pages of a DownloadDBLogFilePortion operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DownloadDBLogFilePortion method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DownloadDBLogFilePortion operation. +// pageNum := 0 +// err := client.DownloadDBLogFilePortionPages(params, +// func(page *DownloadDBLogFilePortionOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *RDS) DownloadDBLogFilePortionPages(input *DownloadDBLogFilePortionInput, fn func(p *DownloadDBLogFilePortionOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DownloadDBLogFilePortionRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1974,7 +3563,28 @@ func (c *RDS) DownloadDBLogFilePortionPages(input *DownloadDBLogFilePortionInput const opFailoverDBCluster = "FailoverDBCluster" -// FailoverDBClusterRequest generates a request for the FailoverDBCluster operation. +// FailoverDBClusterRequest generates a "aws/request.Request" representing the +// client's request for the FailoverDBCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the FailoverDBCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the FailoverDBClusterRequest method. +// req, resp := client.FailoverDBClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) FailoverDBClusterRequest(input *FailoverDBClusterInput) (req *request.Request, output *FailoverDBClusterOutput) { op := &request.Operation{ Name: opFailoverDBCluster, @@ -2015,7 +3625,28 @@ func (c *RDS) FailoverDBCluster(input *FailoverDBClusterInput) (*FailoverDBClust const opListTagsForResource = "ListTagsForResource" -// ListTagsForResourceRequest generates a request for the ListTagsForResource operation. +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTagsForResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { op := &request.Operation{ Name: opListTagsForResource, @@ -2045,7 +3676,28 @@ func (c *RDS) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsFor const opModifyDBCluster = "ModifyDBCluster" -// ModifyDBClusterRequest generates a request for the ModifyDBCluster operation. +// ModifyDBClusterRequest generates a "aws/request.Request" representing the +// client's request for the ModifyDBCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyDBCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyDBClusterRequest method. +// req, resp := client.ModifyDBClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) ModifyDBClusterRequest(input *ModifyDBClusterInput) (req *request.Request, output *ModifyDBClusterOutput) { op := &request.Operation{ Name: opModifyDBCluster, @@ -2076,7 +3728,28 @@ func (c *RDS) ModifyDBCluster(input *ModifyDBClusterInput) (*ModifyDBClusterOutp const opModifyDBClusterParameterGroup = "ModifyDBClusterParameterGroup" -// ModifyDBClusterParameterGroupRequest generates a request for the ModifyDBClusterParameterGroup operation. +// ModifyDBClusterParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the ModifyDBClusterParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyDBClusterParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyDBClusterParameterGroupRequest method. +// req, resp := client.ModifyDBClusterParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) ModifyDBClusterParameterGroupRequest(input *ModifyDBClusterParameterGroupInput) (req *request.Request, output *DBClusterParameterGroupNameMessage) { op := &request.Operation{ Name: opModifyDBClusterParameterGroup, @@ -2101,7 +3774,7 @@ func (c *RDS) ModifyDBClusterParameterGroupRequest(input *ModifyDBClusterParamet // For more information on Amazon Aurora, see Aurora on Amazon RDS (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html) // in the Amazon RDS User Guide. // -// Changes to dynamic parameters are applied immediately. Changes to static +// Changes to dynamic parameters are applied immediately. Changes to static // parameters require a reboot without failover to the DB cluster associated // with the parameter group before the change can take effect. // @@ -2122,9 +3795,93 @@ func (c *RDS) ModifyDBClusterParameterGroup(input *ModifyDBClusterParameterGroup return out, err } +const opModifyDBClusterSnapshotAttribute = "ModifyDBClusterSnapshotAttribute" + +// ModifyDBClusterSnapshotAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ModifyDBClusterSnapshotAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyDBClusterSnapshotAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyDBClusterSnapshotAttributeRequest method. +// req, resp := client.ModifyDBClusterSnapshotAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) ModifyDBClusterSnapshotAttributeRequest(input *ModifyDBClusterSnapshotAttributeInput) (req *request.Request, output *ModifyDBClusterSnapshotAttributeOutput) { + op := &request.Operation{ + Name: opModifyDBClusterSnapshotAttribute, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ModifyDBClusterSnapshotAttributeInput{} + } + + req = c.newRequest(op, input, output) + output = &ModifyDBClusterSnapshotAttributeOutput{} + req.Data = output + return +} + +// Adds an attribute and values to, or removes an attribute and values from, +// a manual DB cluster snapshot. +// +// To share a manual DB cluster snapshot with other AWS accounts, specify restore +// as the AttributeName and use the ValuesToAdd parameter to add a list of IDs +// of the AWS accounts that are authorized to restore the manual DB cluster +// snapshot. Use the value all to make the manual DB cluster snapshot public, +// which means that it can be copied or restored by all AWS accounts. Do not +// add the all value for any manual DB cluster snapshots that contain private +// information that you don't want available to all AWS accounts. +// +// To view which AWS accounts have access to copy or restore a manual DB cluster +// snapshot, or whether a manual DB cluster snapshot public or private, use +// the DescribeDBClusterSnapshotAttributes API action. +// +// If a manual DB cluster snapshot is encrypted, it cannot be shared. +func (c *RDS) ModifyDBClusterSnapshotAttribute(input *ModifyDBClusterSnapshotAttributeInput) (*ModifyDBClusterSnapshotAttributeOutput, error) { + req, out := c.ModifyDBClusterSnapshotAttributeRequest(input) + err := req.Send() + return out, err +} + const opModifyDBInstance = "ModifyDBInstance" -// ModifyDBInstanceRequest generates a request for the ModifyDBInstance operation. +// ModifyDBInstanceRequest generates a "aws/request.Request" representing the +// client's request for the ModifyDBInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyDBInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyDBInstanceRequest method. +// req, resp := client.ModifyDBInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) ModifyDBInstanceRequest(input *ModifyDBInstanceInput) (req *request.Request, output *ModifyDBInstanceOutput) { op := &request.Operation{ Name: opModifyDBInstance, @@ -2152,7 +3909,28 @@ func (c *RDS) ModifyDBInstance(input *ModifyDBInstanceInput) (*ModifyDBInstanceO const opModifyDBParameterGroup = "ModifyDBParameterGroup" -// ModifyDBParameterGroupRequest generates a request for the ModifyDBParameterGroup operation. +// ModifyDBParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the ModifyDBParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyDBParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyDBParameterGroupRequest method. +// req, resp := client.ModifyDBParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) ModifyDBParameterGroupRequest(input *ModifyDBParameterGroupInput) (req *request.Request, output *DBParameterGroupNameMessage) { op := &request.Operation{ Name: opModifyDBParameterGroup, @@ -2174,7 +3952,7 @@ func (c *RDS) ModifyDBParameterGroupRequest(input *ModifyDBParameterGroupInput) // parameter, submit a list of the following: ParameterName, ParameterValue, // and ApplyMethod. A maximum of 20 parameters can be modified in a single request. // -// Changes to dynamic parameters are applied immediately. Changes to static +// Changes to dynamic parameters are applied immediately. Changes to static // parameters require a reboot without failover to the DB instance associated // with the parameter group before the change can take effect. // @@ -2196,7 +3974,28 @@ func (c *RDS) ModifyDBParameterGroup(input *ModifyDBParameterGroupInput) (*DBPar const opModifyDBSnapshotAttribute = "ModifyDBSnapshotAttribute" -// ModifyDBSnapshotAttributeRequest generates a request for the ModifyDBSnapshotAttribute operation. +// ModifyDBSnapshotAttributeRequest generates a "aws/request.Request" representing the +// client's request for the ModifyDBSnapshotAttribute operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyDBSnapshotAttribute method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyDBSnapshotAttributeRequest method. +// req, resp := client.ModifyDBSnapshotAttributeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) ModifyDBSnapshotAttributeRequest(input *ModifyDBSnapshotAttributeInput) (req *request.Request, output *ModifyDBSnapshotAttributeOutput) { op := &request.Operation{ Name: opModifyDBSnapshotAttribute, @@ -2214,20 +4013,20 @@ func (c *RDS) ModifyDBSnapshotAttributeRequest(input *ModifyDBSnapshotAttributeI return } -// Adds an attribute and values to, or removes an attribute and values from +// Adds an attribute and values to, or removes an attribute and values from, // a manual DB snapshot. // // To share a manual DB snapshot with other AWS accounts, specify restore as -// the AttributeName and use the ValuesToAdd parameter to add a list of the -// AWS account ids that are authorized to restore the manual DB snapshot. Uses -// the value all to make the manual DB snapshot public and can by copied or -// restored by all AWS accounts. Do not add the all value for any manual DB -// snapshots that contain private information that you do not want to be available -// to all AWS accounts. +// the AttributeName and use the ValuesToAdd parameter to add a list of IDs +// of the AWS accounts that are authorized to restore the manual DB snapshot. +// Uses the value all to make the manual DB snapshot public, which means it +// can be copied or restored by all AWS accounts. Do not add the all value for +// any manual DB snapshots that contain private information that you don't want +// available to all AWS accounts. // // To view which AWS accounts have access to copy or restore a manual DB snapshot, // or whether a manual DB snapshot public or private, use the DescribeDBSnapshotAttributes -// API. +// API action. // // If the manual DB snapshot is encrypted, it cannot be shared. func (c *RDS) ModifyDBSnapshotAttribute(input *ModifyDBSnapshotAttributeInput) (*ModifyDBSnapshotAttributeOutput, error) { @@ -2238,7 +4037,28 @@ func (c *RDS) ModifyDBSnapshotAttribute(input *ModifyDBSnapshotAttributeInput) ( const opModifyDBSubnetGroup = "ModifyDBSubnetGroup" -// ModifyDBSubnetGroupRequest generates a request for the ModifyDBSubnetGroup operation. +// ModifyDBSubnetGroupRequest generates a "aws/request.Request" representing the +// client's request for the ModifyDBSubnetGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyDBSubnetGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyDBSubnetGroupRequest method. +// req, resp := client.ModifyDBSubnetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) ModifyDBSubnetGroupRequest(input *ModifyDBSubnetGroupInput) (req *request.Request, output *ModifyDBSubnetGroupOutput) { op := &request.Operation{ Name: opModifyDBSubnetGroup, @@ -2266,7 +4086,28 @@ func (c *RDS) ModifyDBSubnetGroup(input *ModifyDBSubnetGroupInput) (*ModifyDBSub const opModifyEventSubscription = "ModifyEventSubscription" -// ModifyEventSubscriptionRequest generates a request for the ModifyEventSubscription operation. +// ModifyEventSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the ModifyEventSubscription operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyEventSubscription method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyEventSubscriptionRequest method. +// req, resp := client.ModifyEventSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) ModifyEventSubscriptionRequest(input *ModifyEventSubscriptionInput) (req *request.Request, output *ModifyEventSubscriptionOutput) { op := &request.Operation{ Name: opModifyEventSubscription, @@ -2301,7 +4142,28 @@ func (c *RDS) ModifyEventSubscription(input *ModifyEventSubscriptionInput) (*Mod const opModifyOptionGroup = "ModifyOptionGroup" -// ModifyOptionGroupRequest generates a request for the ModifyOptionGroup operation. +// ModifyOptionGroupRequest generates a "aws/request.Request" representing the +// client's request for the ModifyOptionGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyOptionGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyOptionGroupRequest method. +// req, resp := client.ModifyOptionGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) ModifyOptionGroupRequest(input *ModifyOptionGroupInput) (req *request.Request, output *ModifyOptionGroupOutput) { op := &request.Operation{ Name: opModifyOptionGroup, @@ -2328,7 +4190,28 @@ func (c *RDS) ModifyOptionGroup(input *ModifyOptionGroupInput) (*ModifyOptionGro const opPromoteReadReplica = "PromoteReadReplica" -// PromoteReadReplicaRequest generates a request for the PromoteReadReplica operation. +// PromoteReadReplicaRequest generates a "aws/request.Request" representing the +// client's request for the PromoteReadReplica operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PromoteReadReplica method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PromoteReadReplicaRequest method. +// req, resp := client.PromoteReadReplicaRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) PromoteReadReplicaRequest(input *PromoteReadReplicaInput) (req *request.Request, output *PromoteReadReplicaOutput) { op := &request.Operation{ Name: opPromoteReadReplica, @@ -2358,9 +4241,78 @@ func (c *RDS) PromoteReadReplica(input *PromoteReadReplicaInput) (*PromoteReadRe return out, err } +const opPromoteReadReplicaDBCluster = "PromoteReadReplicaDBCluster" + +// PromoteReadReplicaDBClusterRequest generates a "aws/request.Request" representing the +// client's request for the PromoteReadReplicaDBCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PromoteReadReplicaDBCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PromoteReadReplicaDBClusterRequest method. +// req, resp := client.PromoteReadReplicaDBClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) PromoteReadReplicaDBClusterRequest(input *PromoteReadReplicaDBClusterInput) (req *request.Request, output *PromoteReadReplicaDBClusterOutput) { + op := &request.Operation{ + Name: opPromoteReadReplicaDBCluster, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PromoteReadReplicaDBClusterInput{} + } + + req = c.newRequest(op, input, output) + output = &PromoteReadReplicaDBClusterOutput{} + req.Data = output + return +} + +// Promotes a Read Replica DB cluster to a standalone DB cluster. +func (c *RDS) PromoteReadReplicaDBCluster(input *PromoteReadReplicaDBClusterInput) (*PromoteReadReplicaDBClusterOutput, error) { + req, out := c.PromoteReadReplicaDBClusterRequest(input) + err := req.Send() + return out, err +} + const opPurchaseReservedDBInstancesOffering = "PurchaseReservedDBInstancesOffering" -// PurchaseReservedDBInstancesOfferingRequest generates a request for the PurchaseReservedDBInstancesOffering operation. +// PurchaseReservedDBInstancesOfferingRequest generates a "aws/request.Request" representing the +// client's request for the PurchaseReservedDBInstancesOffering operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PurchaseReservedDBInstancesOffering method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PurchaseReservedDBInstancesOfferingRequest method. +// req, resp := client.PurchaseReservedDBInstancesOfferingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) PurchaseReservedDBInstancesOfferingRequest(input *PurchaseReservedDBInstancesOfferingInput) (req *request.Request, output *PurchaseReservedDBInstancesOfferingOutput) { op := &request.Operation{ Name: opPurchaseReservedDBInstancesOffering, @@ -2387,7 +4339,28 @@ func (c *RDS) PurchaseReservedDBInstancesOffering(input *PurchaseReservedDBInsta const opRebootDBInstance = "RebootDBInstance" -// RebootDBInstanceRequest generates a request for the RebootDBInstance operation. +// RebootDBInstanceRequest generates a "aws/request.Request" representing the +// client's request for the RebootDBInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RebootDBInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RebootDBInstanceRequest method. +// req, resp := client.RebootDBInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) RebootDBInstanceRequest(input *RebootDBInstanceInput) (req *request.Request, output *RebootDBInstanceOutput) { op := &request.Operation{ Name: opRebootDBInstance, @@ -2413,12 +4386,12 @@ func (c *RDS) RebootDBInstanceRequest(input *RebootDBInstanceInput) (req *reques // will be conducted through a failover. An Amazon RDS event is created when // the reboot is completed. // -// If your DB instance is deployed in multiple Availability Zones, you can +// If your DB instance is deployed in multiple Availability Zones, you can // force a failover from one AZ to the other during the reboot. You might force // a failover to test the availability of your DB instance deployment or to // restore operations to the original AZ after a failover occurs. // -// The time required to reboot is a function of the specific database engine's +// The time required to reboot is a function of the specific database engine's // crash recovery process. To improve the reboot time, we recommend that you // reduce database activities as much as possible during the reboot process // to reduce rollback activity for in-transit transactions. @@ -2430,7 +4403,28 @@ func (c *RDS) RebootDBInstance(input *RebootDBInstanceInput) (*RebootDBInstanceO const opRemoveSourceIdentifierFromSubscription = "RemoveSourceIdentifierFromSubscription" -// RemoveSourceIdentifierFromSubscriptionRequest generates a request for the RemoveSourceIdentifierFromSubscription operation. +// RemoveSourceIdentifierFromSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the RemoveSourceIdentifierFromSubscription operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveSourceIdentifierFromSubscription method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveSourceIdentifierFromSubscriptionRequest method. +// req, resp := client.RemoveSourceIdentifierFromSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) RemoveSourceIdentifierFromSubscriptionRequest(input *RemoveSourceIdentifierFromSubscriptionInput) (req *request.Request, output *RemoveSourceIdentifierFromSubscriptionOutput) { op := &request.Operation{ Name: opRemoveSourceIdentifierFromSubscription, @@ -2457,7 +4451,28 @@ func (c *RDS) RemoveSourceIdentifierFromSubscription(input *RemoveSourceIdentifi const opRemoveTagsFromResource = "RemoveTagsFromResource" -// RemoveTagsFromResourceRequest generates a request for the RemoveTagsFromResource operation. +// RemoveTagsFromResourceRequest generates a "aws/request.Request" representing the +// client's request for the RemoveTagsFromResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveTagsFromResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveTagsFromResourceRequest method. +// req, resp := client.RemoveTagsFromResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) RemoveTagsFromResourceRequest(input *RemoveTagsFromResourceInput) (req *request.Request, output *RemoveTagsFromResourceOutput) { op := &request.Operation{ Name: opRemoveTagsFromResource, @@ -2489,7 +4504,28 @@ func (c *RDS) RemoveTagsFromResource(input *RemoveTagsFromResourceInput) (*Remov const opResetDBClusterParameterGroup = "ResetDBClusterParameterGroup" -// ResetDBClusterParameterGroupRequest generates a request for the ResetDBClusterParameterGroup operation. +// ResetDBClusterParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the ResetDBClusterParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ResetDBClusterParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ResetDBClusterParameterGroupRequest method. +// req, resp := client.ResetDBClusterParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) ResetDBClusterParameterGroupRequest(input *ResetDBClusterParameterGroupInput) (req *request.Request, output *DBClusterParameterGroupNameMessage) { op := &request.Operation{ Name: opResetDBClusterParameterGroup, @@ -2528,7 +4564,28 @@ func (c *RDS) ResetDBClusterParameterGroup(input *ResetDBClusterParameterGroupIn const opResetDBParameterGroup = "ResetDBParameterGroup" -// ResetDBParameterGroupRequest generates a request for the ResetDBParameterGroup operation. +// ResetDBParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the ResetDBParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ResetDBParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ResetDBParameterGroupRequest method. +// req, resp := client.ResetDBParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) ResetDBParameterGroupRequest(input *ResetDBParameterGroupInput) (req *request.Request, output *DBParameterGroupNameMessage) { op := &request.Operation{ Name: opResetDBParameterGroup, @@ -2561,7 +4618,28 @@ func (c *RDS) ResetDBParameterGroup(input *ResetDBParameterGroupInput) (*DBParam const opRestoreDBClusterFromSnapshot = "RestoreDBClusterFromSnapshot" -// RestoreDBClusterFromSnapshotRequest generates a request for the RestoreDBClusterFromSnapshot operation. +// RestoreDBClusterFromSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the RestoreDBClusterFromSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RestoreDBClusterFromSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RestoreDBClusterFromSnapshotRequest method. +// req, resp := client.RestoreDBClusterFromSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) RestoreDBClusterFromSnapshotRequest(input *RestoreDBClusterFromSnapshotInput) (req *request.Request, output *RestoreDBClusterFromSnapshotOutput) { op := &request.Operation{ Name: opRestoreDBClusterFromSnapshot, @@ -2594,7 +4672,28 @@ func (c *RDS) RestoreDBClusterFromSnapshot(input *RestoreDBClusterFromSnapshotIn const opRestoreDBClusterToPointInTime = "RestoreDBClusterToPointInTime" -// RestoreDBClusterToPointInTimeRequest generates a request for the RestoreDBClusterToPointInTime operation. +// RestoreDBClusterToPointInTimeRequest generates a "aws/request.Request" representing the +// client's request for the RestoreDBClusterToPointInTime operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RestoreDBClusterToPointInTime method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RestoreDBClusterToPointInTimeRequest method. +// req, resp := client.RestoreDBClusterToPointInTimeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) RestoreDBClusterToPointInTimeRequest(input *RestoreDBClusterToPointInTimeInput) (req *request.Request, output *RestoreDBClusterToPointInTimeOutput) { op := &request.Operation{ Name: opRestoreDBClusterToPointInTime, @@ -2628,7 +4727,28 @@ func (c *RDS) RestoreDBClusterToPointInTime(input *RestoreDBClusterToPointInTime const opRestoreDBInstanceFromDBSnapshot = "RestoreDBInstanceFromDBSnapshot" -// RestoreDBInstanceFromDBSnapshotRequest generates a request for the RestoreDBInstanceFromDBSnapshot operation. +// RestoreDBInstanceFromDBSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the RestoreDBInstanceFromDBSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RestoreDBInstanceFromDBSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RestoreDBInstanceFromDBSnapshotRequest method. +// req, resp := client.RestoreDBInstanceFromDBSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) RestoreDBInstanceFromDBSnapshotRequest(input *RestoreDBInstanceFromDBSnapshotInput) (req *request.Request, output *RestoreDBInstanceFromDBSnapshotOutput) { op := &request.Operation{ Name: opRestoreDBInstanceFromDBSnapshot, @@ -2673,7 +4793,28 @@ func (c *RDS) RestoreDBInstanceFromDBSnapshot(input *RestoreDBInstanceFromDBSnap const opRestoreDBInstanceToPointInTime = "RestoreDBInstanceToPointInTime" -// RestoreDBInstanceToPointInTimeRequest generates a request for the RestoreDBInstanceToPointInTime operation. +// RestoreDBInstanceToPointInTimeRequest generates a "aws/request.Request" representing the +// client's request for the RestoreDBInstanceToPointInTime operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RestoreDBInstanceToPointInTime method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RestoreDBInstanceToPointInTimeRequest method. +// req, resp := client.RestoreDBInstanceToPointInTimeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) RestoreDBInstanceToPointInTimeRequest(input *RestoreDBInstanceToPointInTimeInput) (req *request.Request, output *RestoreDBInstanceToPointInTimeOutput) { op := &request.Operation{ Name: opRestoreDBInstanceToPointInTime, @@ -2696,7 +4837,7 @@ func (c *RDS) RestoreDBInstanceToPointInTimeRequest(input *RestoreDBInstanceToPo // property. You can restore to a point up to the number of days specified by // the BackupRetentionPeriod property. // -// The target database is created with most of the original configuration, +// The target database is created with most of the original configuration, // but in a system-selected availability zone, with the default security group, // the default subnet group, and the default DB parameter group. By default, // the new DB instance is created as a single-AZ deployment except when the @@ -2711,7 +4852,28 @@ func (c *RDS) RestoreDBInstanceToPointInTime(input *RestoreDBInstanceToPointInTi const opRevokeDBSecurityGroupIngress = "RevokeDBSecurityGroupIngress" -// RevokeDBSecurityGroupIngressRequest generates a request for the RevokeDBSecurityGroupIngress operation. +// RevokeDBSecurityGroupIngressRequest generates a "aws/request.Request" representing the +// client's request for the RevokeDBSecurityGroupIngress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RevokeDBSecurityGroupIngress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RevokeDBSecurityGroupIngressRequest method. +// req, resp := client.RevokeDBSecurityGroupIngressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *RDS) RevokeDBSecurityGroupIngressRequest(input *RevokeDBSecurityGroupIngressInput) (req *request.Request, output *RevokeDBSecurityGroupIngressOutput) { op := &request.Operation{ Name: opRevokeDBSecurityGroupIngress, @@ -2773,11 +4935,16 @@ type AddSourceIdentifierToSubscriptionInput struct { // // Constraints: // - // If the source type is a DB instance, then a DBInstanceIdentifier must be - // supplied. If the source type is a DB security group, a DBSecurityGroupName - // must be supplied. If the source type is a DB parameter group, a DBParameterGroupName - // must be supplied. If the source type is a DB snapshot, a DBSnapshotIdentifier - // must be supplied. + // If the source type is a DB instance, then a DBInstanceIdentifier must + // be supplied. + // + // If the source type is a DB security group, a DBSecurityGroupName must + // be supplied. + // + // If the source type is a DB parameter group, a DBParameterGroupName must + // be supplied. + // + // If the source type is a DB snapshot, a DBSnapshotIdentifier must be supplied. SourceIdentifier *string `type:"string" required:"true"` // The name of the RDS event notification subscription you want to add a source @@ -2894,9 +5061,12 @@ type ApplyPendingMaintenanceActionInput struct { // // Valid values: // - // immediate - Apply the maintenance action immediately. next-maintenance - // - Apply the maintenance action during the next maintenance window for the - // resource. undo-opt-in - Cancel any existing next-maintenance opt-in requests. + // immediate - Apply the maintenance action immediately. + // + // next-maintenance - Apply the maintenance action during the next maintenance + // window for the resource. + // + // undo-opt-in - Cancel any existing next-maintenance opt-in requests. OptInType *string `type:"string" required:"true"` // The RDS Amazon Resource Name (ARN) of the resource that the pending maintenance @@ -3006,9 +5176,16 @@ type AuthorizeDBSecurityGroupIngressOutput struct { // Contains the result of a successful invocation of the following actions: // - // DescribeDBSecurityGroups AuthorizeDBSecurityGroupIngress CreateDBSecurityGroup - // RevokeDBSecurityGroupIngress This data type is used as a response element - // in the DescribeDBSecurityGroups action. + // DescribeDBSecurityGroups + // + // AuthorizeDBSecurityGroupIngress + // + // CreateDBSecurityGroup + // + // RevokeDBSecurityGroupIngress + // + // This data type is used as a response element in the DescribeDBSecurityGroups + // action. DBSecurityGroup *DBSecurityGroup `type:"structure"` } @@ -3024,7 +5201,9 @@ func (s AuthorizeDBSecurityGroupIngressOutput) GoString() string { // Contains Availability Zone information. // -// This data type is used as an element in the following data type: OrderableDBInstanceOption +// This data type is used as an element in the following data type: +// +// OrderableDBInstanceOption type AvailabilityZone struct { _ struct{} `type:"structure"` @@ -3093,6 +5272,98 @@ func (s CharacterSet) GoString() string { return s.String() } +type CopyDBClusterParameterGroupInput struct { + _ struct{} `type:"structure"` + + // The identifier or Amazon Resource Name (ARN) for the source DB cluster parameter + // group. For information about creating an ARN, see Constructing an RDS Amazon + // Resource Name (ARN) (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN). + // + // Constraints: + // + // Must specify a valid DB cluster parameter group. + // + // If the source DB cluster parameter group is in the same region as the + // copy, specify a valid DB parameter group identifier, for example my-db-cluster-param-group, + // or a valid ARN. + // + // If the source DB parameter group is in a different region than the copy, + // specify a valid DB cluster parameter group ARN, for example arn:aws:rds:us-east-1:123456789012:cluster-pg:custom-cluster-group1. + SourceDBClusterParameterGroupIdentifier *string `type:"string" required:"true"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // A description for the copied DB cluster parameter group. + TargetDBClusterParameterGroupDescription *string `type:"string" required:"true"` + + // The identifier for the copied DB cluster parameter group. + // + // Constraints: + // + // Cannot be null, empty, or blank + // + // Must contain from 1 to 255 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // Example: my-cluster-param-group1 + TargetDBClusterParameterGroupIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s CopyDBClusterParameterGroupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyDBClusterParameterGroupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopyDBClusterParameterGroupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopyDBClusterParameterGroupInput"} + if s.SourceDBClusterParameterGroupIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("SourceDBClusterParameterGroupIdentifier")) + } + if s.TargetDBClusterParameterGroupDescription == nil { + invalidParams.Add(request.NewErrParamRequired("TargetDBClusterParameterGroupDescription")) + } + if s.TargetDBClusterParameterGroupIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("TargetDBClusterParameterGroupIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type CopyDBClusterParameterGroupOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the CreateDBClusterParameterGroup + // or CopyDBClusterParameterGroup action. + // + // This data type is used as a request parameter in the DeleteDBClusterParameterGroup + // action, and as a response element in the DescribeDBClusterParameterGroups + // action. + DBClusterParameterGroup *DBClusterParameterGroup `type:"structure"` +} + +// String returns the string representation +func (s CopyDBClusterParameterGroupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyDBClusterParameterGroupOutput) GoString() string { + return s.String() +} + type CopyDBClusterSnapshotInput struct { _ struct{} `type:"structure"` @@ -3101,9 +5372,13 @@ type CopyDBClusterSnapshotInput struct { // // Constraints: // - // Must contain from 1 to 63 alphanumeric characters or hyphens. First character - // must be a letter. Cannot end with a hyphen or contain two consecutive hyphens. - // Example: my-cluster-snapshot1 + // Must contain from 1 to 63 alphanumeric characters or hyphens. + // + // First character must be a letter. + // + // Cannot end with a hyphen or contain two consecutive hyphens. + // + // Example: my-cluster-snapshot1 SourceDBClusterSnapshotIdentifier *string `type:"string" required:"true"` // A list of tags. @@ -3114,9 +5389,13 @@ type CopyDBClusterSnapshotInput struct { // // Constraints: // - // Must contain from 1 to 63 alphanumeric characters or hyphens. First character - // must be a letter. Cannot end with a hyphen or contain two consecutive hyphens. - // Example: my-cluster-snapshot2 + // Must contain from 1 to 63 alphanumeric characters or hyphens. + // + // First character must be a letter. + // + // Cannot end with a hyphen or contain two consecutive hyphens. + // + // Example: my-cluster-snapshot2 TargetDBClusterSnapshotIdentifier *string `type:"string" required:"true"` } @@ -3151,8 +5430,12 @@ type CopyDBClusterSnapshotOutput struct { // Contains the result of a successful invocation of the following actions: // - // CreateDBClusterSnapshot DeleteDBClusterSnapshot This data type is - // used as a response element in the DescribeDBClusterSnapshots action. + // CreateDBClusterSnapshot + // + // DeleteDBClusterSnapshot + // + // This data type is used as a response element in the DescribeDBClusterSnapshots + // action. DBClusterSnapshot *DBClusterSnapshot `type:"structure"` } @@ -3175,11 +5458,14 @@ type CopyDBParameterGroupInput struct { // // Constraints: // - // Must specify a valid DB parameter group. If the source DB parameter group - // is in the same region as the copy, specify a valid DB parameter group identifier, - // for example my-db-param-group, or a valid ARN. If the source DB parameter - // group is in a different region than the copy, specify a valid DB parameter - // group ARN, for example arn:aws:rds:us-west-2:123456789012:pg:special-parameters. + // Must specify a valid DB parameter group. + // + // If the source DB parameter group is in the same region as the copy, specify + // a valid DB parameter group identifier, for example my-db-param-group, or + // a valid ARN. + // + // If the source DB parameter group is in a different region than the copy, + // specify a valid DB parameter group ARN, for example arn:aws:rds:us-west-2:123456789012:pg:special-parameters. SourceDBParameterGroupIdentifier *string `type:"string" required:"true"` // A list of tags. @@ -3192,9 +5478,15 @@ type CopyDBParameterGroupInput struct { // // Constraints: // - // Cannot be null, empty, or blank Must contain from 1 to 255 alphanumeric - // characters or hyphens First character must be a letter Cannot end with a - // hyphen or contain two consecutive hyphens Example: my-db-parameter-group + // Cannot be null, empty, or blank + // + // Must contain from 1 to 255 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // Example: my-db-parameter-group TargetDBParameterGroupIdentifier *string `type:"string" required:"true"` } @@ -3279,12 +5571,16 @@ type CopyDBSnapshotInput struct { // // Constraints: // - // Must specify a valid system snapshot in the "available" state. If the source - // snapshot is in the same region as the copy, specify a valid DB snapshot identifier. - // If the source snapshot is in a different region than the copy, specify a - // valid DB snapshot ARN. For more information, go to Copying a DB Snapshot + // Must specify a valid system snapshot in the "available" state. + // + // If the source snapshot is in the same region as the copy, specify a valid + // DB snapshot identifier. + // + // If the source snapshot is in a different region than the copy, specify + // a valid DB snapshot ARN. For more information, go to Copying a DB Snapshot // (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_CopySnapshot.html). - // Example: rds:mydb-2012-04-02-00-01 + // + // Example: rds:mydb-2012-04-02-00-01 // // Example: arn:aws:rds:rr-regn-1:123456789012:snapshot:mysql-instance1-snapshot-20130805 SourceDBSnapshotIdentifier *string `type:"string" required:"true"` @@ -3296,9 +5592,15 @@ type CopyDBSnapshotInput struct { // // Constraints: // - // Cannot be null, empty, or blank Must contain from 1 to 255 alphanumeric - // characters or hyphens First character must be a letter Cannot end with a - // hyphen or contain two consecutive hyphens Example: my-db-snapshot + // Cannot be null, empty, or blank + // + // Must contain from 1 to 255 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // Example: my-db-snapshot TargetDBSnapshotIdentifier *string `type:"string" required:"true"` } @@ -3333,8 +5635,12 @@ type CopyDBSnapshotOutput struct { // Contains the result of a successful invocation of the following actions: // - // CreateDBSnapshot DeleteDBSnapshot This data type is used as a response - // element in the DescribeDBSnapshots action. + // CreateDBSnapshot + // + // DeleteDBSnapshot + // + // This data type is used as a response element in the DescribeDBSnapshots + // action. DBSnapshot *DBSnapshot `type:"structure"` } @@ -3356,10 +5662,14 @@ type CopyOptionGroupInput struct { // // Constraints: // - // Must specify a valid option group. If the source option group is in the - // same region as the copy, specify a valid option group identifier, for example - // my-option-group, or a valid ARN. If the source option group is in a different - // region than the copy, specify a valid option group ARN, for example arn:aws:rds:us-west-2:123456789012:og:special-options. + // Must specify a valid option group. + // + // If the source option group is in the same region as the copy, specify + // a valid option group identifier, for example my-option-group, or a valid + // ARN. + // + // If the source option group is in a different region than the copy, specify + // a valid option group ARN, for example arn:aws:rds:us-west-2:123456789012:og:special-options. SourceOptionGroupIdentifier *string `type:"string" required:"true"` // A list of tags. @@ -3372,9 +5682,15 @@ type CopyOptionGroupInput struct { // // Constraints: // - // Cannot be null, empty, or blank Must contain from 1 to 255 alphanumeric - // characters or hyphens First character must be a letter Cannot end with a - // hyphen or contain two consecutive hyphens Example: my-option-group + // Cannot be null, empty, or blank + // + // Must contain from 1 to 255 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // Example: my-option-group TargetOptionGroupIdentifier *string `type:"string" required:"true"` } @@ -3438,7 +5754,7 @@ type CreateDBClusterInput struct { // // Constraints: // - // Must be a value from 1 to 35 + // Must be a value from 1 to 35 BackupRetentionPeriod *int64 `type:"integer"` // A value that indicates that the DB cluster should be associated with the @@ -3449,19 +5765,26 @@ type CreateDBClusterInput struct { // // Constraints: // - // Must contain from 1 to 63 alphanumeric characters or hyphens. First character - // must be a letter. Cannot end with a hyphen or contain two consecutive hyphens. - // Example: my-cluster1 + // Must contain from 1 to 63 alphanumeric characters or hyphens. + // + // First character must be a letter. + // + // Cannot end with a hyphen or contain two consecutive hyphens. + // + // Example: my-cluster1 DBClusterIdentifier *string `type:"string" required:"true"` // The name of the DB cluster parameter group to associate with this DB cluster. // If this argument is omitted, default.aurora5.6 for the specified engine will // be used. // - // Constraints: + // Constraints: // - // Must be 1 to 255 alphanumeric characters First character must be a letter - // Cannot end with a hyphen or contain two consecutive hyphens + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens DBClusterParameterGroupName *string `type:"string"` // A DB subnet group to associate with this DB cluster. @@ -3506,15 +5829,18 @@ type CreateDBClusterInput struct { // printable ASCII character except "/", """, or "@". // // Constraints: Must contain from 8 to 41 characters. - MasterUserPassword *string `type:"string" required:"true"` + MasterUserPassword *string `type:"string"` // The name of the master user for the client DB cluster. // // Constraints: // - // Must be 1 to 16 alphanumeric characters. First character must be a letter. - // Cannot be a reserved word for the chosen database engine. - MasterUsername *string `type:"string" required:"true"` + // Must be 1 to 16 alphanumeric characters. + // + // First character must be a letter. + // + // Cannot be a reserved word for the chosen database engine. + MasterUsername *string `type:"string"` // A value that indicates that the DB cluster should be associated with the // specified option group. @@ -3538,9 +5864,13 @@ type CreateDBClusterInput struct { // // Constraints: // - // Must be in the format hh24:mi-hh24:mi. Times should be in Universal Coordinated - // Time (UTC). Must not conflict with the preferred maintenance window. Must - // be at least 30 minutes. + // Must be in the format hh24:mi-hh24:mi. + // + // Times should be in Universal Coordinated Time (UTC). + // + // Must not conflict with the preferred maintenance window. + // + // Must be at least 30 minutes. PreferredBackupWindow *string `type:"string"` // The weekly time range during which system maintenance can occur, in Universal @@ -3558,6 +5888,10 @@ type CreateDBClusterInput struct { // Constraints: Minimum 30-minute window. PreferredMaintenanceWindow *string `type:"string"` + // The Amazon Resource Name (ARN) of the source DB cluster if this DB cluster + // is created as a Read Replica. + ReplicationSourceIdentifier *string `type:"string"` + // Specifies whether the DB cluster is encrypted. StorageEncrypted *bool `type:"boolean"` @@ -3587,12 +5921,6 @@ func (s *CreateDBClusterInput) Validate() error { if s.Engine == nil { invalidParams.Add(request.NewErrParamRequired("Engine")) } - if s.MasterUserPassword == nil { - invalidParams.Add(request.NewErrParamRequired("MasterUserPassword")) - } - if s.MasterUsername == nil { - invalidParams.Add(request.NewErrParamRequired("MasterUsername")) - } if invalidParams.Len() > 0 { return invalidParams @@ -3605,9 +5933,20 @@ type CreateDBClusterOutput struct { // Contains the result of a successful invocation of the following actions: // - // CreateDBCluster DeleteDBCluster FailoverDBCluster ModifyDBCluster - // RestoreDBClusterFromSnapshot This data type is used as a response element - // in the DescribeDBClusters action. + // CreateDBCluster + // + // DeleteDBCluster + // + // FailoverDBCluster + // + // ModifyDBCluster + // + // RestoreDBClusterFromSnapshot + // + // RestoreDBClusterToPointInTime + // + // This data type is used as a response element in the DescribeDBClusters + // action. DBCluster *DBCluster `type:"structure"` } @@ -3626,11 +5965,15 @@ type CreateDBClusterParameterGroupInput struct { // The name of the DB cluster parameter group. // - // Constraints: + // Constraints: // - // Must be 1 to 255 alphanumeric characters First character must be a letter - // Cannot end with a hyphen or contain two consecutive hyphens This value is - // stored as a lowercase string. + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // This value is stored as a lowercase string. DBClusterParameterGroupName *string `type:"string" required:"true"` // The DB cluster parameter group family name. A DB cluster parameter group @@ -3679,7 +6022,7 @@ type CreateDBClusterParameterGroupOutput struct { _ struct{} `type:"structure"` // Contains the result of a successful invocation of the CreateDBClusterParameterGroup - // action. + // or CopyDBClusterParameterGroup action. // // This data type is used as a request parameter in the DeleteDBClusterParameterGroup // action, and as a response element in the DescribeDBClusterParameterGroups @@ -3705,9 +6048,13 @@ type CreateDBClusterSnapshotInput struct { // // Constraints: // - // Must contain from 1 to 63 alphanumeric characters or hyphens. First character - // must be a letter. Cannot end with a hyphen or contain two consecutive hyphens. - // Example: my-cluster1 + // Must contain from 1 to 63 alphanumeric characters or hyphens. + // + // First character must be a letter. + // + // Cannot end with a hyphen or contain two consecutive hyphens. + // + // Example: my-cluster1 DBClusterIdentifier *string `type:"string" required:"true"` // The identifier of the DB cluster snapshot. This parameter is stored as a @@ -3715,9 +6062,13 @@ type CreateDBClusterSnapshotInput struct { // // Constraints: // - // Must contain from 1 to 63 alphanumeric characters or hyphens. First character - // must be a letter. Cannot end with a hyphen or contain two consecutive hyphens. - // Example: my-cluster1-snapshot1 + // Must contain from 1 to 63 alphanumeric characters or hyphens. + // + // First character must be a letter. + // + // Cannot end with a hyphen or contain two consecutive hyphens. + // + // Example: my-cluster1-snapshot1 DBClusterSnapshotIdentifier *string `type:"string" required:"true"` // The tags to be assigned to the DB cluster snapshot. @@ -3755,8 +6106,12 @@ type CreateDBClusterSnapshotOutput struct { // Contains the result of a successful invocation of the following actions: // - // CreateDBClusterSnapshot DeleteDBClusterSnapshot This data type is - // used as a response element in the DescribeDBClusterSnapshots action. + // CreateDBClusterSnapshot + // + // DeleteDBClusterSnapshot + // + // This data type is used as a response element in the DescribeDBClusterSnapshots + // action. DBClusterSnapshot *DBClusterSnapshot `type:"structure"` } @@ -3776,28 +6131,28 @@ type CreateDBInstanceInput struct { // The amount of storage (in gigabytes) to be initially allocated for the database // instance. // - // Type: Integer + // Type: Integer // // MySQL // - // Constraints: Must be an integer from 5 to 6144. + // Constraints: Must be an integer from 5 to 6144. // // MariaDB // - // Constraints: Must be an integer from 5 to 6144. + // Constraints: Must be an integer from 5 to 6144. // // PostgreSQL // - // Constraints: Must be an integer from 5 to 6144. + // Constraints: Must be an integer from 5 to 6144. // // Oracle // - // Constraints: Must be an integer from 10 to 6144. + // Constraints: Must be an integer from 10 to 6144. // // SQL Server // - // Constraints: Must be an integer from 200 to 4096 (Standard Edition and - // Enterprise Edition) or from 20 to 4096 (Express Edition and Web Edition) + // Constraints: Must be an integer from 200 to 4096 (Standard Edition and Enterprise + // Edition) or from 20 to 4096 (Express Edition and Web Edition) AllocatedStorage *int64 `type:"integer"` // Indicates that minor engine upgrades will be applied automatically to the @@ -3810,7 +6165,7 @@ type CreateDBInstanceInput struct { // For information on regions and Availability Zones, see Regions and Availability // Zones (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.RegionsAndAvailabilityZones.html). // - // Default: A random, system-chosen Availability Zone in the endpoint's region. + // Default: A random, system-chosen Availability Zone in the endpoint's region. // // Example: us-east-1d // @@ -3823,12 +6178,13 @@ type CreateDBInstanceInput struct { // parameter to a positive number enables backups. Setting this parameter to // 0 disables automated backups. // - // Default: 1 + // Default: 1 // // Constraints: // - // Must be a value from 0 to 35 Cannot be set to 0 if the DB instance is a - // source to Read Replicas + // Must be a value from 0 to 35 + // + // Cannot be set to 0 if the DB instance is a source to Read Replicas BackupRetentionPeriod *int64 `type:"integer"` // For supported engines, indicates that the DB instance should be associated @@ -3860,9 +6216,14 @@ type CreateDBInstanceInput struct { // // Constraints: // - // Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15 for - // SQL Server). First character must be a letter. Cannot end with a hyphen or - // contain two consecutive hyphens. Example: mydbinstance + // Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15 + // for SQL Server). + // + // First character must be a letter. + // + // Cannot end with a hyphen or contain two consecutive hyphens. + // + // Example: mydbinstance DBInstanceIdentifier *string `type:"string" required:"true"` // The meaning of this parameter differs according to the database engine you @@ -3877,16 +6238,22 @@ type CreateDBInstanceInput struct { // // Constraints: // - // Must contain 1 to 64 alphanumeric characters Cannot be a word reserved - // by the specified database engine MariaDB + // Must contain 1 to 64 alphanumeric characters + // + // Cannot be a word reserved by the specified database engine + // + // MariaDB // // The name of the database to create when the DB instance is created. If this // parameter is not specified, no database is created in the DB instance. // // Constraints: // - // Must contain 1 to 64 alphanumeric characters Cannot be a word reserved - // by the specified database engine PostgreSQL + // Must contain 1 to 64 alphanumeric characters + // + // Cannot be a word reserved by the specified database engine + // + // PostgreSQL // // The name of the database to create when the DB instance is created. If this // parameter is not specified, the default "postgres" database is created in @@ -3894,17 +6261,24 @@ type CreateDBInstanceInput struct { // // Constraints: // - // Must contain 1 to 63 alphanumeric characters Must begin with a letter or - // an underscore. Subsequent characters can be letters, underscores, or digits - // (0-9). Cannot be a word reserved by the specified database engine Oracle + // Must contain 1 to 63 alphanumeric characters // - // The Oracle System ID (SID) of the created DB instance. + // Must begin with a letter or an underscore. Subsequent characters can be + // letters, underscores, or digits (0-9). + // + // Cannot be a word reserved by the specified database engine + // + // Oracle + // + // The Oracle System ID (SID) of the created DB instance. // // Default: ORCL // // Constraints: // - // Cannot be longer than 8 characters SQL Server + // Cannot be longer than 8 characters + // + // SQL Server // // Not applicable. Must be null. // @@ -3916,28 +6290,32 @@ type CreateDBInstanceInput struct { // // Constraints: // - // Must contain 1 to 64 alphanumeric characters Cannot be a word reserved - // by the specified database engine + // Must contain 1 to 64 alphanumeric characters + // + // Cannot be a word reserved by the specified database engine DBName *string `type:"string"` // The name of the DB parameter group to associate with this DB instance. If // this argument is omitted, the default DBParameterGroup for the specified // engine will be used. // - // Constraints: + // Constraints: // - // Must be 1 to 255 alphanumeric characters First character must be a letter - // Cannot end with a hyphen or contain two consecutive hyphens + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens DBParameterGroupName *string `type:"string"` // A list of DB security groups to associate with this DB instance. // - // Default: The default DB security group for the database engine. + // Default: The default DB security group for the database engine. DBSecurityGroups []*string `locationNameList:"DBSecurityGroupName" type:"list"` // A DB subnet group to associate with this DB instance. // - // If there is no DB subnet group, then it is a non-VPC DB instance. + // If there is no DB subnet group, then it is a non-VPC DB instance. DBSubnetGroupName *string `type:"string"` // Specify the Active Directory Domain to create the instance in. @@ -3952,77 +6330,199 @@ type CreateDBInstanceInput struct { // Valid Values: MySQL | mariadb | oracle-se1 | oracle-se | oracle-ee | sqlserver-ee // | sqlserver-se | sqlserver-ex | sqlserver-web | postgres | aurora // - // Not every database engine is available for every AWS region. + // Not every database engine is available for every AWS region. Engine *string `type:"string" required:"true"` // The version number of the database engine to use. // - // The following are the database engines and major and minor versions that + // The following are the database engines and major and minor versions that // are available with Amazon RDS. Not every database engine is available for // every AWS region. // - // MySQL + // Amazon Aurora // - // Version 5.1 (Only available in the following regions: ap-northeast-1, ap-southeast-1, - // ap-southeast-2, eu-west-1, sa-east-1, us-west-1, us-west-2): 5.1.73a | 5.1.73b - // Version 5.5 (Only available in the following regions: ap-northeast-1, ap-southeast-1, - // ap-southeast-2, eu-west-1, sa-east-1, us-west-1, us-west-2): 5.5.40 | 5.5.40a - // Version 5.5 (Available in all regions): 5.5.40b | 5.5.41 | 5.5.42 Version - // 5.6 (Available in all regions): 5.6.19a | 5.6.19b | 5.6.21 | 5.6.21b | 5.6.22 - // | 5.6.23 | 5.6.27 Version 5.7 (Available in all regions): 5.7.10 MariaDB + // Version 5.6 (only available in AWS regions ap-northeast-1, ap-northeast-2, + // ap-south-1, ap-southeast-2, eu-west-1, us-east-1, us-west-2): 5.6.10a // - // Version 10.0 (Available in all regions except AWS GovCloud (US) Region - // (us-gov-west-1)): 10.0.17 Oracle Database Enterprise Edition (oracle-ee) + // MariaDB // - // Version 11.2 (Only available in the following regions: ap-northeast-1, - // ap-southeast-1, ap-southeast-2, eu-west-1, sa-east-1, us-west-1, us-west-2): - // 11.2.0.2.v3 | 11.2.0.2.v4 | 11.2.0.2.v5 | 11.2.0.2.v6 | 11.2.0.2.v7 Version - // 11.2 (Available in all regions): 11.2.0.3.v1 | 11.2.0.3.v2 | 11.2.0.3.v3 - // | 11.2.0.4.v1 | 11.2.0.4.v3 | 11.2.0.4.v4 Version 12.1 (Available in all - // regions): 12.1.0.1.v1 | 12.1.0.1.v2 | 12.1.0.2.v1 Oracle Database Standard - // Edition (oracle-se) + // Version 10.1 (available in all AWS regions except us-gov-west-1): 10.1.14 // - // Version 11.2 (Only available in the following regions: us-west-1): 11.2.0.2.v3 - // | 11.2.0.2.v4 | 11.2.0.2.v5 | 11.2.0.2.v6 | 11.2.0.2.v7 Version 11.2 (Only - // available in the following regions: eu-central-1, us-west-1): 11.2.0.3.v1 - // | 11.2.0.3.v2 | 11.2.0.3.v3 | 11.2.0.4.v1 | 11.2.0.4.v3 | 11.2.0.4.v4 Version - // 12.1 (Only available in the following regions: eu-central-1, us-west-1): - // 12.1.0.1.v1 | 12.1.0.1.v2 Oracle Database Standard Edition One (oracle-se1) + // Version 10.0 (available in all AWS regions): 10.0.17 | 10.0.24 // - // Version 11.2 (Only available in the following regions: us-west-1): 11.2.0.2.v3 - // | 11.2.0.2.v4 | 11.2.0.2.v5 | 11.2.0.2.v6 | 11.2.0.2.v7 Version 11.2 (Only - // available in the following regions: eu-central-1, us-west-1): 11.2.0.3.v1 - // | 11.2.0.3.v2 | 11.2.0.3.v3 | 11.2.0.4.v1 | 11.2.0.4.v3 | 11.2.0.4.v4 Version - // 12.1 (Only available in the following regions: eu-central-1, us-west-1): - // 12.1.0.1.v1 | 12.1.0.1.v2 PostgreSQL + // Microsoft SQL Server Enterprise Edition (sqlserver-ee) // - // Version 9.3 (Only available in the following regions: ap-northeast-1, ap-southeast-1, - // ap-southeast-2, eu-west-1, sa-east-1, us-west-1, us-west-2): 9.3.1 | 9.3.2 - // Version 9.3 (Available in all regions): 9.3.3 | 9.3.5 | 9.3.6 | 9.3.9 | - // 9.3.10 Version 9.4 (Available in all regions): 9.4.1 | 9.4.4 | 9.4.5 Microsoft - // SQL Server Enterprise Edition (sqlserver-ee) + // Version 11.00 (available in all AWS regions): 11.00.2100.60.v1 | 11.00.5058.0.v1 + // | 11.00.6020.0.v1 // - // Version 10.50 (Available in all regions): 10.50.2789.0.v1 Version 10.50 - // (Available in all regions): 10.50.6000.34.v1 Version 11.00 (Available in - // all regions): 11.00.2100.60.v1 Version 11.00 (Available in all regions): - // 11.00.5058.0.v1 Microsoft SQL Server Express Edition (sqlserver-ex) + // Version 10.50 (available in all AWS regions): 10.50.2789.0.v1 | 10.50.6000.34.v1 + // | 10.50.6529.0.v1 // - // Version 10.50 (Available in all regions): 10.50.2789.0.v1 Version 10.50 - // (Available in all regions): 10.50.6000.34.v1 Version 11.00 (Available in - // all regions): 11.00.2100.60.v1 Version 11.00 (Available in all regions): - // 11.00.5058.0.v1 Version 12.00 (Available in all regions): 12.00.4422.0.v1 - // Microsoft SQL Server Standard Edition (sqlserver-se) + // Microsoft SQL Server Express Edition (sqlserver-ex) // - // Version 10.50 (Available in all regions): 10.50.2789.0.v1 Version 10.50 - // (Available in all regions): 10.50.6000.34.v1 Version 11.00 (Available in - // all regions): 11.00.2100.60.v1 Version 11.00 (Available in all regions): - // 11.00.5058.0.v1 Version 12.00 (Available in all regions): 12.00.4422.0.v1 - // Microsoft SQL Server Web Edition (sqlserver-web) + // Version 12.00 (available in all AWS regions): 12.00.4422.0.v1 // - // Version 10.50 (Available in all regions): 10.50.2789.0.v1 Version 10.50 - // (Available in all regions): 10.50.6000.34.v1 Version 11.00 (Available in - // all regions): 11.00.2100.60.v1 Version 11.00 (Available in all regions): - // 11.00.5058.0.v1 Version 12.00 (Available in all regions): 12.00.4422.0.v1 + // Version 11.00 (available in all AWS regions): 11.00.2100.60.v1 | 11.00.5058.0.v1 + // | 11.00.6020.0.v1 + // + // Version 10.50 (available in all AWS regions): 10.50.2789.0.v1 | 10.50.6000.34.v1 + // | 10.50.6529.0.v1 + // + // Microsoft SQL Server Standard Edition (sqlserver-se) + // + // Version 12.00 (available in all AWS regions): 12.00.4422.0.v1 + // + // Version 11.00 (available in all AWS regions): 11.00.2100.60.v1 | 11.00.5058.0.v1 + // | 11.00.6020.0.v1 + // + // Version 10.50 (available in all AWS regions): 10.50.2789.0.v1 | 10.50.6000.34.v1 + // | 10.50.6529.0.v1 + // + // Microsoft SQL Server Web Edition (sqlserver-web) + // + // Version 12.00 (available in all AWS regions): 12.00.4422.0.v1 + // + // Version 11.00 (available in all AWS regions): 11.00.2100.60.v1 | 11.00.5058.0.v1 + // | 11.00.6020.0.v1 + // + // Version 10.50 (available in all AWS regions): 10.50.2789.0.v1 | 10.50.6000.34.v1 + // | 10.50.6529.0.v1 + // + // MySQL + // + // Version 5.7 (available in all AWS regions): 5.7.10 | 5.7.11 + // + // Version 5.6 (available in all AWS regions except ap-south-1, ap-northeast-2): + // 5.6.19a | 5.6.19b | 5.6.21 | 5.6.21b | 5.6.22 + // + // Version 5.6 (available in all AWS regions except ap-south-1): 5.6.23 + // + // Version 5.6 (available in all AWS regions): 5.6.27 | 5.6.29 + // + // Version 5.5 (only available in AWS regions ap-northeast-1, ap-southeast-1, + // ap-southeast-2, eu-west-1, sa-east-1, us-east-1, us-gov-west-1, us-west-1, + // us-west-2): 5.5.40 | 5.5.40a + // + // Version 5.5 (available in all AWS regions except ap-south-1, ap-northeast-2): + // 5.5.40b | 5.5.41 + // + // Version 5.5 (available in all AWS regions except ap-south-1): 5.5.42 + // + // Version 5.5 (available in all AWS regions): 5.5.46 + // + // Version 5.1 (only available in AWS regions ap-northeast-1, ap-southeast-1, + // ap-southeast-2, eu-west-1, sa-east-1, us-east-1, us-gov-west-1, us-west-1, + // us-west-2): 5.1.73a | 5.1.73b + // + // Oracle Database Enterprise Edition (oracle-ee) + // + // Version 12.1 (available in all AWS regions except ap-south-1, ap-northeast-2): + // 12.1.0.1.v1 | 12.1.0.1.v2 + // + // Version 12.1 (only available in AWS regions ap-northeast-1, ap-southeast-1, + // ap-southeast-2, eu-central-1, eu-west-1, sa-east-1, us-east-1, us-west-1, + // us-west-2): 12.1.0.1.v3 | 12.1.0.1.v4 | 12.1.0.1.v5 + // + // Version 12.1 (available in all AWS regions): 12.1.0.2.v1 + // + // Version 12.1 (available in all AWS regions except us-gov-west-1): 12.1.0.2.v2 + // | 12.1.0.2.v3 | 12.1.0.2.v4 + // + // Version 11.2 (only available in AWS regions ap-northeast-1, ap-southeast-1, + // ap-southeast-2, eu-west-1, sa-east-1, us-east-1, us-gov-west-1, us-west-1, + // us-west-2): 11.2.0.2.v3 | 11.2.0.2.v4 | 11.2.0.2.v5 | 11.2.0.2.v6 | 11.2.0.2.v7 + // + // Version 11.2 (available in all AWS regions except ap-south-1, ap-northeast-2): + // 11.2.0.3.v1 | 11.2.0.3.v2 | 11.2.0.3.v3 + // + // Version 11.2 (only available in AWS regions ap-northeast-1, ap-southeast-1, + // ap-southeast-2, eu-central-1, eu-west-1, sa-east-1, us-east-1, us-west-1, + // us-west-2): 11.2.0.3.v4 + // + // Version 11.2 (available in all AWS regions): 11.2.0.4.v1 | 11.2.0.4.v3 + // | 11.2.0.4.v4 + // + // Version 11.2 (available in all AWS regions except us-gov-west-1): 11.2.0.4.v5 + // | 11.2.0.4.v6 | 11.2.0.4.v7 | 11.2.0.4.v8 + // + // Oracle Database Standard Edition (oracle-se) + // + // Version 12.1 (available in all AWS regions except ap-south-1, ap-northeast-2): + // 12.1.0.1.v1 | 12.1.0.1.v2 + // + // Version 12.1 (only available in AWS regions ap-northeast-1, ap-southeast-1, + // ap-southeast-2, eu-central-1, eu-west-1, sa-east-1, us-east-1, us-west-1, + // us-west-2): 12.1.0.1.v3 | 12.1.0.1.v4 | 12.1.0.1.v5 + // + // Version 11.2 (only available in AWS regions ap-northeast-1, ap-southeast-1, + // ap-southeast-2, eu-west-1, sa-east-1, us-east-1, us-gov-west-1, us-west-1, + // us-west-2): 11.2.0.2.v3 | 11.2.0.2.v4 | 11.2.0.2.v5 | 11.2.0.2.v6 | 11.2.0.2.v7 + // + // Version 11.2 (available in all AWS regions except ap-south-1, ap-northeast-2): + // 11.2.0.3.v1 | 11.2.0.3.v2 | 11.2.0.3.v3 + // + // Version 11.2 (only available in AWS regions ap-northeast-1, ap-southeast-1, + // ap-southeast-2, eu-central-1, eu-west-1, sa-east-1, us-east-1, us-west-1, + // us-west-2): 11.2.0.3.v4 + // + // Version 11.2 (available in all AWS regions): 11.2.0.4.v1 | 11.2.0.4.v3 + // | 11.2.0.4.v4 + // + // Version 11.2 (available in all AWS regions except us-gov-west-1): 11.2.0.4.v5 + // | 11.2.0.4.v6 | 11.2.0.4.v7 | 11.2.0.4.v8 + // + // Oracle Database Standard Edition One (oracle-se1) + // + // Version 12.1 (available in all AWS regions except ap-south-1, ap-northeast-2): + // 12.1.0.1.v1 | 12.1.0.1.v2 + // + // Version 12.1 (only available in AWS regions ap-northeast-1, ap-southeast-1, + // ap-southeast-2, eu-central-1, eu-west-1, sa-east-1, us-east-1, us-west-1, + // us-west-2): 12.1.0.1.v3 | 12.1.0.1.v4 | 12.1.0.1.v5 + // + // Version 11.2 (only available in AWS regions ap-northeast-1, ap-southeast-1, + // ap-southeast-2, eu-west-1, sa-east-1, us-east-1, us-gov-west-1, us-west-1, + // us-west-2): 11.2.0.2.v3 | 11.2.0.2.v4 | 11.2.0.2.v5 | 11.2.0.2.v6 | 11.2.0.2.v7 + // + // Version 11.2 (available in all AWS regions except ap-south-1, ap-northeast-2): + // 11.2.0.3.v1 | 11.2.0.3.v2 | 11.2.0.3.v3 + // + // Version 11.2 (only available in AWS regions ap-northeast-1, ap-southeast-1, + // ap-southeast-2, eu-central-1, eu-west-1, sa-east-1, us-east-1, us-west-1, + // us-west-2): 11.2.0.3.v4 + // + // Version 11.2 (available in all AWS regions): 11.2.0.4.v1 | 11.2.0.4.v3 + // | 11.2.0.4.v4 + // + // Version 11.2 (available in all AWS regions except us-gov-west-1): 11.2.0.4.v5 + // | 11.2.0.4.v6 | 11.2.0.4.v7 | 11.2.0.4.v8 + // + // Oracle Database Standard Edition Two (oracle-se2) + // + // Version 12.1 (available in all AWS regions except us-gov-west-1): 12.1.0.2.v2 + // | 12.1.0.2.v3 | 12.1.0.2.v4 + // + // PostgreSQL + // + // Version 9.5 (available in all AWS regions except us-gov-west-1): 9.5.2 + // + // Version 9.4 (available in all AWS regions except ap-south-1): 9.4.1 + // | 9.4.4 + // + // Version 9.4 (available in all AWS regions): 9.4.5 + // + // Version 9.4 (available in all AWS regions except us-gov-west-1): 9.4.7 + // + // Version 9.3 (only available in AWS regions ap-northeast-1, ap-southeast-1, + // ap-southeast-2, eu-west-1, sa-east-1, us-east-1, us-gov-west-1, us-west-1, + // us-west-2): 9.3.1 | 9.3.2 + // + // Version 9.3 (available in all AWS regions except ap-south-1, ap-northeast-2): + // 9.3.10 | 9.3.3 | 9.3.5 | 9.3.6 | 9.3.9 + // + // Version 9.3 (only available in AWS regions ap-northeast-1, ap-southeast-1, + // ap-southeast-2, eu-central-1, eu-west-1, sa-east-1, us-east-1, us-west-1, + // us-west-2): 9.3.12 EngineVersion *string `type:"string"` // The amount of Provisioned IOPS (input/output operations per second) to be @@ -4059,27 +6559,27 @@ type CreateDBInstanceInput struct { // // MySQL // - // Constraints: Must contain from 8 to 41 characters. + // Constraints: Must contain from 8 to 41 characters. // // MariaDB // - // Constraints: Must contain from 8 to 41 characters. + // Constraints: Must contain from 8 to 41 characters. // // Oracle // - // Constraints: Must contain from 8 to 30 characters. + // Constraints: Must contain from 8 to 30 characters. // // SQL Server // - // Constraints: Must contain from 8 to 128 characters. + // Constraints: Must contain from 8 to 128 characters. // // PostgreSQL // - // Constraints: Must contain from 8 to 128 characters. + // Constraints: Must contain from 8 to 128 characters. // // Amazon Aurora // - // Constraints: Must contain from 8 to 41 characters. + // Constraints: Must contain from 8 to 41 characters. MasterUserPassword *string `type:"string"` // The name of master user for the client DB instance. @@ -4088,35 +6588,56 @@ type CreateDBInstanceInput struct { // // Constraints: // - // Must be 1 to 16 alphanumeric characters. First character must be a letter. - // Cannot be a reserved word for the chosen database engine. MariaDB + // Must be 1 to 16 alphanumeric characters. + // + // First character must be a letter. + // + // Cannot be a reserved word for the chosen database engine. + // + // MariaDB // // Constraints: // - // Must be 1 to 16 alphanumeric characters. Cannot be a reserved word for - // the chosen database engine. Type: String + // Must be 1 to 16 alphanumeric characters. + // + // Cannot be a reserved word for the chosen database engine. + // + // Type: String // // Oracle // // Constraints: // - // Must be 1 to 30 alphanumeric characters. First character must be a letter. - // Cannot be a reserved word for the chosen database engine. SQL Server + // Must be 1 to 30 alphanumeric characters. + // + // First character must be a letter. + // + // Cannot be a reserved word for the chosen database engine. + // + // SQL Server // // Constraints: // - // Must be 1 to 128 alphanumeric characters. First character must be a letter. - // Cannot be a reserved word for the chosen database engine. PostgreSQL + // Must be 1 to 128 alphanumeric characters. + // + // First character must be a letter. + // + // Cannot be a reserved word for the chosen database engine. + // + // PostgreSQL // // Constraints: // - // Must be 1 to 63 alphanumeric characters. First character must be a letter. - // Cannot be a reserved word for the chosen database engine. + // Must be 1 to 63 alphanumeric characters. + // + // First character must be a letter. + // + // Cannot be a reserved word for the chosen database engine. MasterUsername *string `type:"string"` // The interval, in seconds, between points when Enhanced Monitoring metrics // are collected for the DB instance. To disable collecting Enhanced Monitoring - // metrics, specify 0. The default is 60. + // metrics, specify 0. The default is 0. // // If MonitoringRoleArn is specified, then you must also set MonitoringInterval // to a value other than 0. @@ -4134,17 +6655,15 @@ type CreateDBInstanceInput struct { MonitoringRoleArn *string `type:"string"` // Specifies if the DB instance is a Multi-AZ deployment. You cannot set the - // AvailabilityZone parameter if the MultiAZ parameter is set to true. Do not - // set this value if you want a Multi-AZ deployment for a SQL Server DB instance. - // Multi-AZ for SQL Server is set using the Mirroring option in an option group. + // AvailabilityZone parameter if the MultiAZ parameter is set to true. MultiAZ *bool `type:"boolean"` // Indicates that the DB instance should be associated with the specified option // group. // - // Permanent options, such as the TDE option for Oracle Advanced Security - // TDE, cannot be removed from an option group, and that option group cannot - // be removed from a DB instance once it is associated with a DB instance + // Permanent options, such as the TDE option for Oracle Advanced Security TDE, + // cannot be removed from an option group, and that option group cannot be removed + // from a DB instance once it is associated with a DB instance OptionGroupName *string `type:"string"` // The port number on which the database accepts connections. @@ -4206,9 +6725,13 @@ type CreateDBInstanceInput struct { // // Constraints: // - // Must be in the format hh24:mi-hh24:mi. Times should be in Universal Coordinated - // Time (UTC). Must not conflict with the preferred maintenance window. Must - // be at least 30 minutes. + // Must be in the format hh24:mi-hh24:mi. + // + // Times should be in Universal Coordinated Time (UTC). + // + // Must not conflict with the preferred maintenance window. + // + // Must be at least 30 minutes. PreferredBackupWindow *string `type:"string"` // The weekly time range during which system maintenance can occur, in Universal @@ -4241,19 +6764,23 @@ type CreateDBInstanceInput struct { // which resolves to a public IP address. A value of false specifies an internal // instance with a DNS name that resolves to a private IP address. // - // Default: The default behavior varies depending on whether a VPC has been + // Default: The default behavior varies depending on whether a VPC has been // requested or not. The following list shows the default behavior in each case. // - // Default VPC: true VPC: false If no DB subnet group has been specified - // as part of the request and the PubliclyAccessible value has not been set, - // the DB instance will be publicly accessible. If a specific DB subnet group - // has been specified as part of the request and the PubliclyAccessible value - // has not been set, the DB instance will be private. + // Default VPC: true + // + // VPC: false + // + // If no DB subnet group has been specified as part of the request and the + // PubliclyAccessible value has not been set, the DB instance will be publicly + // accessible. If a specific DB subnet group has been specified as part of the + // request and the PubliclyAccessible value has not been set, the DB instance + // will be private. PubliclyAccessible *bool `type:"boolean"` // Specifies whether the DB instance is encrypted. // - // Default: false + // Default: false StorageEncrypted *bool `type:"boolean"` // Specifies the storage type to be associated with the DB instance. @@ -4277,7 +6804,7 @@ type CreateDBInstanceInput struct { // A list of EC2 VPC security groups to associate with this DB instance. // - // Default: The default EC2 VPC security group for the DB subnet group's VPC. + // Default: The default EC2 VPC security group for the DB subnet group's VPC. VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` } @@ -4315,8 +6842,14 @@ type CreateDBInstanceOutput struct { // Contains the result of a successful invocation of the following actions: // - // CreateDBInstance DeleteDBInstance ModifyDBInstance This data type - // is used as a response element in the DescribeDBInstances action. + // CreateDBInstance + // + // DeleteDBInstance + // + // ModifyDBInstance + // + // This data type is used as a response element in the DescribeDBInstances + // action. DBInstance *DBInstance `type:"structure"` } @@ -4341,7 +6874,7 @@ type CreateDBInstanceReadReplicaInput struct { // The Amazon EC2 Availability Zone that the Read Replica will be created in. // - // Default: A random, system-chosen Availability Zone in the endpoint's region. + // Default: A random, system-chosen Availability Zone in the endpoint's region. // // Example: us-east-1d AvailabilityZone *string `type:"string"` @@ -4373,15 +6906,23 @@ type CreateDBInstanceReadReplicaInput struct { // // Constraints: // - // Can only be specified if the source DB instance identifier specifies a - // DB instance in another region. The specified DB subnet group must be in the - // same region in which the operation is running. All Read Replicas in one - // region that are created from the same source DB instance must either: Specify - // DB subnet groups from the same VPC. All these Read Replicas will be created - // in the same VPC.Not specify a DB subnet group. All these Read Replicas will - // be created outside of any VPC. Constraints: Must contain no more than 255 - // alphanumeric characters, periods, underscores, spaces, or hyphens. Must not - // be default. + // Can only be specified if the source DB instance identifier specifies a + // DB instance in another region. + // + // The specified DB subnet group must be in the same region in which the + // operation is running. + // + // All Read Replicas in one region that are created from the same source + // DB instance must either:> + // + // Specify DB subnet groups from the same VPC. All these Read Replicas will + // be created in the same VPC. + // + // Not specify a DB subnet group. All these Read Replicas will be created + // outside of any VPC. + // + // Constraints: Must contain no more than 255 alphanumeric characters, + // periods, underscores, spaces, or hyphens. Must not be default. // // Example: mySubnetgroup DBSubnetGroupName *string `type:"string"` @@ -4392,7 +6933,7 @@ type CreateDBInstanceReadReplicaInput struct { // The interval, in seconds, between points when Enhanced Monitoring metrics // are collected for the Read Replica. To disable collecting Enhanced Monitoring - // metrics, specify 0. The default is 60. + // metrics, specify 0. The default is 0. // // If MonitoringRoleArn is specified, then you must also set MonitoringInterval // to a value other than 0. @@ -4425,14 +6966,18 @@ type CreateDBInstanceReadReplicaInput struct { // which resolves to a public IP address. A value of false specifies an internal // instance with a DNS name that resolves to a private IP address. // - // Default: The default behavior varies depending on whether a VPC has been + // Default: The default behavior varies depending on whether a VPC has been // requested or not. The following list shows the default behavior in each case. // - // Default VPC:true VPC:false If no DB subnet group has been specified - // as part of the request and the PubliclyAccessible value has not been set, - // the DB instance will be publicly accessible. If a specific DB subnet group - // has been specified as part of the request and the PubliclyAccessible value - // has not been set, the DB instance will be private. + // Default VPC:true + // + // VPC:false + // + // If no DB subnet group has been specified as part of the request and the + // PubliclyAccessible value has not been set, the DB instance will be publicly + // accessible. If a specific DB subnet group has been specified as part of the + // request and the PubliclyAccessible value has not been set, the DB instance + // will be private. PubliclyAccessible *bool `type:"boolean"` // The identifier of the DB instance that will act as the source for the Read @@ -4440,16 +6985,24 @@ type CreateDBInstanceReadReplicaInput struct { // // Constraints: // - // Must be the identifier of an existing MySQL, MariaDB, or PostgreSQL DB - // instance. Can specify a DB instance that is a MySQL Read Replica only if - // the source is running MySQL 5.6. Can specify a DB instance that is a PostgreSQL - // Read Replica only if the source is running PostgreSQL 9.3.5. The specified - // DB instance must have automatic backups enabled, its backup retention period - // must be greater than 0. If the source DB instance is in the same region as - // the Read Replica, specify a valid DB instance identifier. If the source DB - // instance is in a different region than the Read Replica, specify a valid - // DB instance ARN. For more information, go to Constructing a Amazon RDS Amazon - // Resource Name (ARN) (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN). + // Must be the identifier of an existing MySQL, MariaDB, or PostgreSQL DB + // instance. + // + // Can specify a DB instance that is a MySQL Read Replica only if the source + // is running MySQL 5.6. + // + // Can specify a DB instance that is a PostgreSQL Read Replica only if the + // source is running PostgreSQL 9.3.5. + // + // The specified DB instance must have automatic backups enabled, its backup + // retention period must be greater than 0. + // + // If the source DB instance is in the same region as the Read Replica, specify + // a valid DB instance identifier. + // + // If the source DB instance is in a different region than the Read Replica, + // specify a valid DB instance ARN. For more information, go to Constructing + // a Amazon RDS Amazon Resource Name (ARN) (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html#USER_Tagging.ARN). SourceDBInstanceIdentifier *string `type:"string" required:"true"` // Specifies the storage type to be associated with the Read Replica. @@ -4496,8 +7049,14 @@ type CreateDBInstanceReadReplicaOutput struct { // Contains the result of a successful invocation of the following actions: // - // CreateDBInstance DeleteDBInstance ModifyDBInstance This data type - // is used as a response element in the DescribeDBInstances action. + // CreateDBInstance + // + // DeleteDBInstance + // + // ModifyDBInstance + // + // This data type is used as a response element in the DescribeDBInstances + // action. DBInstance *DBInstance `type:"structure"` } @@ -4522,11 +7081,15 @@ type CreateDBParameterGroupInput struct { // The name of the DB parameter group. // - // Constraints: + // Constraints: // - // Must be 1 to 255 alphanumeric characters First character must be a letter - // Cannot end with a hyphen or contain two consecutive hyphens This value is - // stored as a lowercase string. + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // This value is stored as a lowercase string. DBParameterGroupName *string `type:"string" required:"true"` // The description for the DB parameter group. @@ -4596,9 +7159,17 @@ type CreateDBSecurityGroupInput struct { // // Constraints: // - // Must be 1 to 255 alphanumeric characters First character must be a letter - // Cannot end with a hyphen or contain two consecutive hyphens Must not be "Default" - // Cannot contain spaces Example: mysecuritygroup + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // Must not be "Default" + // + // Cannot contain spaces + // + // Example: mysecuritygroup DBSecurityGroupName *string `type:"string" required:"true"` // A list of tags. @@ -4636,9 +7207,16 @@ type CreateDBSecurityGroupOutput struct { // Contains the result of a successful invocation of the following actions: // - // DescribeDBSecurityGroups AuthorizeDBSecurityGroupIngress CreateDBSecurityGroup - // RevokeDBSecurityGroupIngress This data type is used as a response element - // in the DescribeDBSecurityGroups action. + // DescribeDBSecurityGroups + // + // AuthorizeDBSecurityGroupIngress + // + // CreateDBSecurityGroup + // + // RevokeDBSecurityGroupIngress + // + // This data type is used as a response element in the DescribeDBSecurityGroups + // action. DBSecurityGroup *DBSecurityGroup `type:"structure"` } @@ -4659,17 +7237,26 @@ type CreateDBSnapshotInput struct { // // Constraints: // - // Must contain from 1 to 63 alphanumeric characters or hyphens First character - // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens DBInstanceIdentifier *string `type:"string" required:"true"` // The identifier for the DB snapshot. // // Constraints: // - // Cannot be null, empty, or blank Must contain from 1 to 255 alphanumeric - // characters or hyphens First character must be a letter Cannot end with a - // hyphen or contain two consecutive hyphens Example: my-snapshot-id + // Cannot be null, empty, or blank + // + // Must contain from 1 to 255 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // Example: my-snapshot-id DBSnapshotIdentifier *string `type:"string" required:"true"` // A list of tags. @@ -4707,8 +7294,12 @@ type CreateDBSnapshotOutput struct { // Contains the result of a successful invocation of the following actions: // - // CreateDBSnapshot DeleteDBSnapshot This data type is used as a response - // element in the DescribeDBSnapshots action. + // CreateDBSnapshot + // + // DeleteDBSnapshot + // + // This data type is used as a response element in the DescribeDBSnapshots + // action. DBSnapshot *DBSnapshot `type:"structure"` } @@ -4730,8 +7321,8 @@ type CreateDBSubnetGroupInput struct { // The name for the DB subnet group. This value is stored as a lowercase string. // - // Constraints: Must contain no more than 255 alphanumeric characters, periods, - // underscores, spaces, or hyphens. Must not be default. + // Constraints: Must contain no more than 255 alphanumeric characters. Cannot + // contain periods, underscores, spaces, or hyphens. Must not be default. // // Example: mySubnetgroup DBSubnetGroupName *string `type:"string" required:"true"` @@ -4777,7 +7368,14 @@ type CreateDBSubnetGroupOutput struct { // Contains the result of a successful invocation of the following actions: // - // CreateDBSubnetGroup ModifyDBSubnetGroup DescribeDBSubnetGroups DeleteDBSubnetGroup + // CreateDBSubnetGroup + // + // ModifyDBSubnetGroup + // + // DescribeDBSubnetGroups + // + // DeleteDBSubnetGroup + // // This data type is used as a response element in the DescribeDBSubnetGroups // action. DBSubnetGroup *DBSubnetGroup `type:"structure"` @@ -4819,12 +7417,18 @@ type CreateEventSubscriptionInput struct { // // Constraints: // - // If SourceIds are supplied, SourceType must also be provided. If the source - // type is a DB instance, then a DBInstanceIdentifier must be supplied. If the - // source type is a DB security group, a DBSecurityGroupName must be supplied. - // If the source type is a DB parameter group, a DBParameterGroupName must be - // supplied. If the source type is a DB snapshot, a DBSnapshotIdentifier must + // If SourceIds are supplied, SourceType must also be provided. + // + // If the source type is a DB instance, then a DBInstanceIdentifier must // be supplied. + // + // If the source type is a DB security group, a DBSecurityGroupName must + // be supplied. + // + // If the source type is a DB parameter group, a DBParameterGroupName must + // be supplied. + // + // If the source type is a DB snapshot, a DBSnapshotIdentifier must be supplied. SourceIds []*string `locationNameList:"SourceId" type:"list"` // The type of source that will be generating the events. For example, if you @@ -4832,7 +7436,8 @@ type CreateEventSubscriptionInput struct { // parameter to db-instance. if this value is not specified, all events are // returned. // - // Valid values: db-instance | db-parameter-group | db-security-group | db-snapshot + // Valid values: db-instance | db-cluster | db-parameter-group | db-security-group + // | db-snapshot | db-cluster-snapshot SourceType *string `type:"string"` // The name of the subscription. @@ -4904,11 +7509,15 @@ type CreateOptionGroupInput struct { // Specifies the name of the option group to be created. // - // Constraints: + // Constraints: // - // Must be 1 to 255 alphanumeric characters or hyphens First character must - // be a letter Cannot end with a hyphen or contain two consecutive hyphens - // Example: myoptiongroup + // Must be 1 to 255 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // Example: myoptiongroup OptionGroupName *string `type:"string" required:"true"` // A list of tags. @@ -4965,9 +7574,20 @@ func (s CreateOptionGroupOutput) GoString() string { // Contains the result of a successful invocation of the following actions: // -// CreateDBCluster DeleteDBCluster FailoverDBCluster ModifyDBCluster -// RestoreDBClusterFromSnapshot This data type is used as a response element -// in the DescribeDBClusters action. +// CreateDBCluster +// +// DeleteDBCluster +// +// FailoverDBCluster +// +// ModifyDBCluster +// +// RestoreDBClusterFromSnapshot +// +// RestoreDBClusterToPointInTime +// +// This data type is used as a response element in the DescribeDBClusters +// action. type DBCluster struct { _ struct{} `type:"structure"` @@ -5053,6 +7673,14 @@ type DBCluster struct { // in Universal Coordinated Time (UTC). PreferredMaintenanceWindow *string `type:"string"` + // Contains one or more identifiers of the Read Replicas associated with this + // DB cluster. + ReadReplicaIdentifiers []*string `locationNameList:"ReadReplicaIdentifier" type:"list"` + + // Contains the identifier of the source DB cluster if this DB cluster is a + // Read Replica. + ReplicationSourceIdentifier *string `type:"string"` + // Specifies the current state of this DB cluster. Status *string `type:"string"` @@ -5126,7 +7754,7 @@ func (s DBClusterOptionGroupStatus) GoString() string { } // Contains the result of a successful invocation of the CreateDBClusterParameterGroup -// action. +// or CopyDBClusterParameterGroup action. // // This data type is used as a request parameter in the DeleteDBClusterParameterGroup // action, and as a response element in the DescribeDBClusterParameterGroups @@ -5161,11 +7789,15 @@ type DBClusterParameterGroupNameMessage struct { // The name of the DB cluster parameter group. // - // Constraints: + // Constraints: // - // Must be 1 to 255 alphanumeric characters First character must be a letter - // Cannot end with a hyphen or contain two consecutive hyphens This value is - // stored as a lowercase string. + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // This value is stored as a lowercase string. DBClusterParameterGroupName *string `type:"string"` } @@ -5181,8 +7813,12 @@ func (s DBClusterParameterGroupNameMessage) GoString() string { // Contains the result of a successful invocation of the following actions: // -// CreateDBClusterSnapshot DeleteDBClusterSnapshot This data type is -// used as a response element in the DescribeDBClusterSnapshots action. +// CreateDBClusterSnapshot +// +// DeleteDBClusterSnapshot +// +// This data type is used as a response element in the DescribeDBClusterSnapshots +// action. type DBClusterSnapshot struct { _ struct{} `type:"structure"` @@ -5254,6 +7890,68 @@ func (s DBClusterSnapshot) GoString() string { return s.String() } +// Contains the name and values of a manual DB cluster snapshot attribute. +// +// Manual DB cluster snapshot attributes are used to authorize other AWS accounts +// to restore a manual DB cluster snapshot. For more information, see the ModifyDBClusterSnapshotAttribute +// API action. +type DBClusterSnapshotAttribute struct { + _ struct{} `type:"structure"` + + // The name of the manual DB cluster snapshot attribute. + // + // The attribute named restore refers to the list of AWS accounts that have + // permission to copy or restore the manual DB cluster snapshot. For more information, + // see the ModifyDBClusterSnapshotAttribute API action. + AttributeName *string `type:"string"` + + // The value(s) for the manual DB cluster snapshot attribute. + // + // If the AttributeName field is set to restore, then this element returns + // a list of IDs of the AWS accounts that are authorized to copy or restore + // the manual DB cluster snapshot. If a value of all is in the list, then the + // manual DB cluster snapshot is public and available for any AWS account to + // copy or restore. + AttributeValues []*string `locationNameList:"AttributeValue" type:"list"` +} + +// String returns the string representation +func (s DBClusterSnapshotAttribute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBClusterSnapshotAttribute) GoString() string { + return s.String() +} + +// Contains the results of a successful call to the DescribeDBClusterSnapshotAttributes +// API action. +// +// Manual DB cluster snapshot attributes are used to authorize other AWS accounts +// to copy or restore a manual DB cluster snapshot. For more information, see +// the ModifyDBClusterSnapshotAttribute API action. +type DBClusterSnapshotAttributesResult struct { + _ struct{} `type:"structure"` + + // The list of attributes and values for the manual DB cluster snapshot. + DBClusterSnapshotAttributes []*DBClusterSnapshotAttribute `locationNameList:"DBClusterSnapshotAttribute" type:"list"` + + // The identifier of the manual DB cluster snapshot that the attributes apply + // to. + DBClusterSnapshotIdentifier *string `type:"string"` +} + +// String returns the string representation +func (s DBClusterSnapshotAttributesResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DBClusterSnapshotAttributesResult) GoString() string { + return s.String() +} + // This data type is used as a response element in the action DescribeDBEngineVersions. type DBEngineVersion struct { _ struct{} `type:"structure"` @@ -5298,8 +7996,14 @@ func (s DBEngineVersion) GoString() string { // Contains the result of a successful invocation of the following actions: // -// CreateDBInstance DeleteDBInstance ModifyDBInstance This data type -// is used as a response element in the DescribeDBInstances action. +// CreateDBInstance +// +// DeleteDBInstance +// +// ModifyDBInstance +// +// This data type is used as a response element in the DescribeDBInstances +// action. type DBInstance struct { _ struct{} `type:"structure"` @@ -5347,7 +8051,7 @@ type DBInstance struct { // // MySQL, MariaDB, SQL Server, PostgreSQL, Amazon Aurora // - // Contains the name of the initial database of this instance that was provided + // Contains the name of the initial database of this instance that was provided // at create time, if one was specified when the DB instance was created. This // same name is returned for the life of the DB instance. // @@ -5355,7 +8059,7 @@ type DBInstance struct { // // Oracle // - // Contains the Oracle System ID (SID) of the created DB instance. Not shown + // Contains the Oracle System ID (SID) of the created DB instance. Not shown // when the returned parameters do not apply to an Oracle DB instance. DBName *string `type:"string"` @@ -5451,14 +8155,18 @@ type DBInstance struct { // which resolves to a public IP address. A value of false specifies an internal // instance with a DNS name that resolves to a private IP address. // - // Default: The default behavior varies depending on whether a VPC has been + // Default: The default behavior varies depending on whether a VPC has been // requested or not. The following list shows the default behavior in each case. // - // Default VPC:true VPC:false If no DB subnet group has been specified - // as part of the request and the PubliclyAccessible value has not been set, - // the DB instance will be publicly accessible. If a specific DB subnet group - // has been specified as part of the request and the PubliclyAccessible value - // has not been set, the DB instance will be private. + // Default VPC:true + // + // VPC:false + // + // If no DB subnet group has been specified as part of the request and the + // PubliclyAccessible value has not been set, the DB instance will be publicly + // accessible. If a specific DB subnet group has been specified as part of the + // request and the PubliclyAccessible value has not been set, the DB instance + // will be private. PubliclyAccessible *bool `type:"boolean"` // Contains one or more identifiers of the Read Replicas associated with this @@ -5584,8 +8292,17 @@ func (s DBParameterGroupNameMessage) GoString() string { // // This data type is used as a response element in the following actions: // -// CreateDBInstance CreateDBInstanceReadReplica DeleteDBInstance ModifyDBInstance -// RebootDBInstance RestoreDBInstanceFromDBSnapshot +// CreateDBInstance +// +// CreateDBInstanceReadReplica +// +// DeleteDBInstance +// +// ModifyDBInstance +// +// RebootDBInstance +// +// RestoreDBInstanceFromDBSnapshot type DBParameterGroupStatus struct { _ struct{} `type:"structure"` @@ -5608,9 +8325,16 @@ func (s DBParameterGroupStatus) GoString() string { // Contains the result of a successful invocation of the following actions: // -// DescribeDBSecurityGroups AuthorizeDBSecurityGroupIngress CreateDBSecurityGroup -// RevokeDBSecurityGroupIngress This data type is used as a response element -// in the DescribeDBSecurityGroups action. +// DescribeDBSecurityGroups +// +// AuthorizeDBSecurityGroupIngress +// +// CreateDBSecurityGroup +// +// RevokeDBSecurityGroupIngress +// +// This data type is used as a response element in the DescribeDBSecurityGroups +// action. type DBSecurityGroup struct { _ struct{} `type:"structure"` @@ -5645,8 +8369,13 @@ func (s DBSecurityGroup) GoString() string { // This data type is used as a response element in the following actions: // -// ModifyDBInstance RebootDBInstance RestoreDBInstanceFromDBSnapshot -// RestoreDBInstanceToPointInTime +// ModifyDBInstance +// +// RebootDBInstance +// +// RestoreDBInstanceFromDBSnapshot +// +// RestoreDBInstanceToPointInTime type DBSecurityGroupMembership struct { _ struct{} `type:"structure"` @@ -5669,8 +8398,12 @@ func (s DBSecurityGroupMembership) GoString() string { // Contains the result of a successful invocation of the following actions: // -// CreateDBSnapshot DeleteDBSnapshot This data type is used as a response -// element in the DescribeDBSnapshots action. +// CreateDBSnapshot +// +// DeleteDBSnapshot +// +// This data type is used as a response element in the DescribeDBSnapshots +// action. type DBSnapshot struct { _ struct{} `type:"structure"` @@ -5771,16 +8504,17 @@ type DBSnapshotAttribute struct { // The name of the manual DB snapshot attribute. // - // An attribute name of restore applies to the list of AWS accounts that have - // permission to copy or restore the manual DB snapshot. + // The attribute named restore refers to the list of AWS accounts that have + // permission to copy or restore the manual DB cluster snapshot. For more information, + // see the ModifyDBSnapshotAttribute API action. AttributeName *string `type:"string"` - // The value(s) for the manual DB snapshot attribute. + // The value or values for the manual DB snapshot attribute. // - // If the AttributeName field is restore, then this field returns a list of - // AWS account ids that are authorized to copy or restore the manual DB snapshot. - // If a value of all is in the list, then the manual DB snapshot is public and - // available for any AWS account to copy or restore. + // If the AttributeName field is set to restore, then this element returns + // a list of IDs of the AWS accounts that are authorized to copy or restore + // the manual DB snapshot. If a value of all is in the list, then the manual + // DB snapshot is public and available for any AWS account to copy or restore. AttributeValues []*string `locationNameList:"AttributeValue" type:"list"` } @@ -5795,11 +8529,11 @@ func (s DBSnapshotAttribute) GoString() string { } // Contains the results of a successful call to the DescribeDBSnapshotAttributes -// API. +// API action. // // Manual DB snapshot attributes are used to authorize other AWS accounts to // copy or restore a manual DB snapshot. For more information, see the ModifyDBSnapshotAttribute -// API. +// API action. type DBSnapshotAttributesResult struct { _ struct{} `type:"structure"` @@ -5822,7 +8556,14 @@ func (s DBSnapshotAttributesResult) GoString() string { // Contains the result of a successful invocation of the following actions: // -// CreateDBSubnetGroup ModifyDBSubnetGroup DescribeDBSubnetGroups DeleteDBSubnetGroup +// CreateDBSubnetGroup +// +// ModifyDBSubnetGroup +// +// DescribeDBSubnetGroups +// +// DeleteDBSubnetGroup +// // This data type is used as a response element in the DescribeDBSubnetGroups // action. type DBSubnetGroup struct { @@ -5862,26 +8603,36 @@ type DeleteDBClusterInput struct { // // Constraints: // - // Must contain from 1 to 63 alphanumeric characters or hyphens First character - // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens DBClusterIdentifier *string `type:"string" required:"true"` // The DB cluster snapshot identifier of the new DB cluster snapshot created // when SkipFinalSnapshot is set to false. // - // Specifying this parameter and also setting the SkipFinalShapshot parameter - // to true results in an error. Constraints: + // Specifying this parameter and also setting the SkipFinalShapshot parameter + // to true results in an error. // - // Must be 1 to 255 alphanumeric characters First character must be a letter - // Cannot end with a hyphen or contain two consecutive hyphens + // Constraints: + // + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens FinalDBSnapshotIdentifier *string `type:"string"` // Determines whether a final DB cluster snapshot is created before the DB cluster // is deleted. If true is specified, no DB cluster snapshot is created. If false // is specified, a DB cluster snapshot is created before the DB cluster is deleted. // - // You must specify a FinalDBSnapshotIdentifier parameter if SkipFinalSnapshot - // is false. Default: false + // You must specify a FinalDBSnapshotIdentifier parameter if SkipFinalSnapshot + // is false. + // + // Default: false SkipFinalSnapshot *bool `type:"boolean"` } @@ -5913,9 +8664,20 @@ type DeleteDBClusterOutput struct { // Contains the result of a successful invocation of the following actions: // - // CreateDBCluster DeleteDBCluster FailoverDBCluster ModifyDBCluster - // RestoreDBClusterFromSnapshot This data type is used as a response element - // in the DescribeDBClusters action. + // CreateDBCluster + // + // DeleteDBCluster + // + // FailoverDBCluster + // + // ModifyDBCluster + // + // RestoreDBClusterFromSnapshot + // + // RestoreDBClusterToPointInTime + // + // This data type is used as a response element in the DescribeDBClusters + // action. DBCluster *DBCluster `type:"structure"` } @@ -5936,9 +8698,11 @@ type DeleteDBClusterParameterGroupInput struct { // // Constraints: // - // Must be the name of an existing DB cluster parameter group. You cannot - // delete a default DB cluster parameter group. Cannot be associated with any - // DB clusters. + // Must be the name of an existing DB cluster parameter group. + // + // You cannot delete a default DB cluster parameter group. + // + // Cannot be associated with any DB clusters. DBClusterParameterGroupName *string `type:"string" required:"true"` } @@ -6017,8 +8781,12 @@ type DeleteDBClusterSnapshotOutput struct { // Contains the result of a successful invocation of the following actions: // - // CreateDBClusterSnapshot DeleteDBClusterSnapshot This data type is - // used as a response element in the DescribeDBClusterSnapshots action. + // CreateDBClusterSnapshot + // + // DeleteDBClusterSnapshot + // + // This data type is used as a response element in the DescribeDBClusterSnapshots + // action. DBClusterSnapshot *DBClusterSnapshot `type:"structure"` } @@ -6040,19 +8808,28 @@ type DeleteDBInstanceInput struct { // // Constraints: // - // Must contain from 1 to 63 alphanumeric characters or hyphens First character - // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens DBInstanceIdentifier *string `type:"string" required:"true"` // The DBSnapshotIdentifier of the new DBSnapshot created when SkipFinalSnapshot // is set to false. // // Specifying this parameter and also setting the SkipFinalShapshot parameter - // to true results in an error. Constraints: + // to true results in an error. // - // Must be 1 to 255 alphanumeric characters First character must be a letter - // Cannot end with a hyphen or contain two consecutive hyphens Cannot be specified - // when deleting a Read Replica. + // Constraints: + // + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // Cannot be specified when deleting a Read Replica. FinalDBSnapshotIdentifier *string `type:"string"` // Determines whether a final DB snapshot is created before the DB instance @@ -6065,8 +8842,10 @@ type DeleteDBInstanceInput struct { // // Specify true when deleting a Read Replica. // - // The FinalDBSnapshotIdentifier parameter must be specified if SkipFinalSnapshot - // is false. Default: false + // The FinalDBSnapshotIdentifier parameter must be specified if SkipFinalSnapshot + // is false. + // + // Default: false SkipFinalSnapshot *bool `type:"boolean"` } @@ -6098,8 +8877,14 @@ type DeleteDBInstanceOutput struct { // Contains the result of a successful invocation of the following actions: // - // CreateDBInstance DeleteDBInstance ModifyDBInstance This data type - // is used as a response element in the DescribeDBInstances action. + // CreateDBInstance + // + // DeleteDBInstance + // + // ModifyDBInstance + // + // This data type is used as a response element in the DescribeDBInstances + // action. DBInstance *DBInstance `type:"structure"` } @@ -6120,8 +8905,11 @@ type DeleteDBParameterGroupInput struct { // // Constraints: // - // Must be the name of an existing DB parameter group You cannot delete a - // default DB parameter group Cannot be associated with any DB instances + // Must be the name of an existing DB parameter group + // + // You cannot delete a default DB parameter group + // + // Cannot be associated with any DB instances DBParameterGroupName *string `type:"string" required:"true"` } @@ -6167,11 +8955,19 @@ type DeleteDBSecurityGroupInput struct { // The name of the DB security group to delete. // - // You cannot delete the default DB security group. Constraints: + // You cannot delete the default DB security group. // - // Must be 1 to 255 alphanumeric characters First character must be a letter - // Cannot end with a hyphen or contain two consecutive hyphens Must not be "Default" - // Cannot contain spaces + // Constraints: + // + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // Must not be "Default" + // + // Cannot contain spaces DBSecurityGroupName *string `type:"string" required:"true"` } @@ -6250,8 +9046,12 @@ type DeleteDBSnapshotOutput struct { // Contains the result of a successful invocation of the following actions: // - // CreateDBSnapshot DeleteDBSnapshot This data type is used as a response - // element in the DescribeDBSnapshots action. + // CreateDBSnapshot + // + // DeleteDBSnapshot + // + // This data type is used as a response element in the DescribeDBSnapshots + // action. DBSnapshot *DBSnapshot `type:"structure"` } @@ -6270,7 +9070,9 @@ type DeleteDBSubnetGroupInput struct { // The name of the database subnet group to delete. // - // You cannot delete the default subnet group. Constraints: + // You cannot delete the default subnet group. + // + // Constraints: // // Constraints: Must contain no more than 255 alphanumeric characters, periods, // underscores, spaces, or hyphens. Must not be default. @@ -6369,7 +9171,7 @@ type DeleteOptionGroupInput struct { // The name of the option group to be deleted. // - // You cannot delete default option groups. + // You cannot delete default option groups. OptionGroupName *string `type:"string" required:"true"` } @@ -6452,8 +9254,11 @@ type DescribeCertificatesInput struct { // // Constraints: // - // Must contain from 1 to 63 alphanumeric characters or hyphens First character - // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens CertificateIdentifier *string `type:"string"` // This parameter is not currently supported. @@ -6534,8 +9339,11 @@ type DescribeDBClusterParameterGroupsInput struct { // // Constraints: // - // Must be 1 to 255 alphanumeric characters First character must be a letter - // Cannot end with a hyphen or contain two consecutive hyphens + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens DBClusterParameterGroupName *string `type:"string"` // This parameter is not currently supported. @@ -6616,8 +9424,11 @@ type DescribeDBClusterParametersInput struct { // // Constraints: // - // Must be 1 to 255 alphanumeric characters First character must be a letter - // Cannot end with a hyphen or contain two consecutive hyphens + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens DBClusterParameterGroupName *string `type:"string" required:"true"` // This parameter is not currently supported. @@ -6699,17 +9510,72 @@ func (s DescribeDBClusterParametersOutput) GoString() string { return s.String() } +type DescribeDBClusterSnapshotAttributesInput struct { + _ struct{} `type:"structure"` + + // The identifier for the DB cluster snapshot to describe the attributes for. + DBClusterSnapshotIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeDBClusterSnapshotAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBClusterSnapshotAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDBClusterSnapshotAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDBClusterSnapshotAttributesInput"} + if s.DBClusterSnapshotIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBClusterSnapshotIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeDBClusterSnapshotAttributesOutput struct { + _ struct{} `type:"structure"` + + // Contains the results of a successful call to the DescribeDBClusterSnapshotAttributes + // API action. + // + // Manual DB cluster snapshot attributes are used to authorize other AWS accounts + // to copy or restore a manual DB cluster snapshot. For more information, see + // the ModifyDBClusterSnapshotAttribute API action. + DBClusterSnapshotAttributesResult *DBClusterSnapshotAttributesResult `type:"structure"` +} + +// String returns the string representation +func (s DescribeDBClusterSnapshotAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeDBClusterSnapshotAttributesOutput) GoString() string { + return s.String() +} + type DescribeDBClusterSnapshotsInput struct { _ struct{} `type:"structure"` - // A DB cluster identifier to retrieve the list of DB cluster snapshots for. + // The ID of the DB cluster to retrieve the list of DB cluster snapshots for. // This parameter cannot be used in conjunction with the DBClusterSnapshotIdentifier // parameter. This parameter is not case-sensitive. // // Constraints: // - // Must contain from 1 to 63 alphanumeric characters or hyphens First character - // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens DBClusterIdentifier *string `type:"string"` // A specific DB cluster snapshot identifier to describe. This parameter cannot @@ -6718,15 +9584,35 @@ type DescribeDBClusterSnapshotsInput struct { // // Constraints: // - // Must be 1 to 255 alphanumeric characters First character must be a letter - // Cannot end with a hyphen or contain two consecutive hyphens If this is the - // identifier of an automated snapshot, the SnapshotType parameter must also - // be specified. + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // If this identifier is for an automated snapshot, the SnapshotType parameter + // must also be specified. DBClusterSnapshotIdentifier *string `type:"string"` // This parameter is not currently supported. Filters []*Filter `locationNameList:"Filter" type:"list"` + // Set this value to true to include manual DB cluster snapshots that are public + // and can be copied or restored by any AWS account, otherwise set this value + // to false. The default is false. The default is false. + // + // You can share a manual DB cluster snapshot as public by using the ModifyDBClusterSnapshotAttribute + // API action. + IncludePublic *bool `type:"boolean"` + + // Set this value to true to include shared manual DB cluster snapshots from + // other AWS accounts that this AWS account has been given permission to copy + // or restore, otherwise set this value to false. The default is false. + // + // You can give an AWS account permission to restore a manual DB cluster snapshot + // from another AWS account by the ModifyDBClusterSnapshotAttribute API action. + IncludeShared *bool `type:"boolean"` + // An optional pagination token provided by a previous DescribeDBClusterSnapshots // request. If this parameter is specified, the response includes only records // beyond the marker, up to the value specified by MaxRecords. @@ -6741,9 +9627,30 @@ type DescribeDBClusterSnapshotsInput struct { // Constraints: Minimum 20, maximum 100. MaxRecords *int64 `type:"integer"` - // The type of DB cluster snapshots that will be returned. Values can be automated - // or manual. If this parameter is not specified, the returned results will - // include all snapshot types. + // The type of DB cluster snapshots to be returned. You can specify one of the + // following values: + // + // automated - Return all DB cluster snapshots that have been automatically + // taken by Amazon RDS for my AWS account. + // + // manual - Return all DB cluster snapshots that have been taken by my AWS + // account. + // + // shared - Return all manual DB cluster snapshots that have been shared + // to my AWS account. + // + // public - Return all DB cluster snapshots that have been marked as public. + // + // If you don't specify a SnapshotType value, then both automated and manual + // DB cluster snapshots are returned. You can include shared DB cluster snapshots + // with these results by setting the IncludeShared parameter to true. You can + // include public DB cluster snapshots with these results by setting the IncludePublic + // parameter to true. + // + // The IncludeShared and IncludePublic parameters don't apply for SnapshotType + // values of manual or automated. The IncludePublic parameter doesn't apply + // when SnapshotType is set to shared. The IncludeShared parameter doesn't apply + // when SnapshotType is set to public. SnapshotType *string `type:"string"` } @@ -6810,8 +9717,11 @@ type DescribeDBClustersInput struct { // // Constraints: // - // Must contain from 1 to 63 alphanumeric characters or hyphens First character - // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens DBClusterIdentifier *string `type:"string"` // This parameter is not currently supported. @@ -6891,8 +9801,11 @@ type DescribeDBEngineVersionsInput struct { // // Constraints: // - // Must be 1 to 255 alphanumeric characters First character must be a letter - // Cannot end with a hyphen or contain two consecutive hyphens + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens DBParameterGroupFamily *string `type:"string"` // Indicates that only the default version of the specified engine or engine @@ -6992,8 +9905,11 @@ type DescribeDBInstancesInput struct { // // Constraints: // - // Must contain from 1 to 63 alphanumeric characters or hyphens First character - // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens DBInstanceIdentifier *string `type:"string"` // This parameter is not currently supported. @@ -7100,8 +10016,11 @@ type DescribeDBLogFilesInput struct { // // Constraints: // - // Must contain from 1 to 63 alphanumeric characters or hyphens First character - // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens DBInstanceIdentifier *string `type:"string" required:"true"` // Filters the available log files for files written since the specified date, @@ -7190,8 +10109,11 @@ type DescribeDBParameterGroupsInput struct { // // Constraints: // - // Must be 1 to 255 alphanumeric characters First character must be a letter - // Cannot end with a hyphen or contain two consecutive hyphens + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens DBParameterGroupName *string `type:"string"` // This parameter is not currently supported. @@ -7273,8 +10195,11 @@ type DescribeDBParametersInput struct { // // Constraints: // - // Must be 1 to 255 alphanumeric characters First character must be a letter - // Cannot end with a hyphen or contain two consecutive hyphens + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens DBParameterGroupName *string `type:"string" required:"true"` // This parameter is not currently supported. @@ -7440,8 +10365,8 @@ func (s DescribeDBSecurityGroupsOutput) GoString() string { type DescribeDBSnapshotAttributesInput struct { _ struct{} `type:"structure"` - // The identifier for the DB snapshot to modify the attributes for. - DBSnapshotIdentifier *string `type:"string"` + // The identifier for the DB snapshot to describe the attributes for. + DBSnapshotIdentifier *string `type:"string" required:"true"` } // String returns the string representation @@ -7454,15 +10379,28 @@ func (s DescribeDBSnapshotAttributesInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeDBSnapshotAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeDBSnapshotAttributesInput"} + if s.DBSnapshotIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBSnapshotIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type DescribeDBSnapshotAttributesOutput struct { _ struct{} `type:"structure"` // Contains the results of a successful call to the DescribeDBSnapshotAttributes - // API. + // API action. // // Manual DB snapshot attributes are used to authorize other AWS accounts to // copy or restore a manual DB snapshot. For more information, see the ModifyDBSnapshotAttribute - // API. + // API action. DBSnapshotAttributesResult *DBSnapshotAttributesResult `type:"structure"` } @@ -7479,14 +10417,17 @@ func (s DescribeDBSnapshotAttributesOutput) GoString() string { type DescribeDBSnapshotsInput struct { _ struct{} `type:"structure"` - // A DB instance identifier to retrieve the list of DB snapshots for. This parameter - // cannot be used in conjunction with DBSnapshotIdentifier. This parameter is - // not case-sensitive. + // The ID of the DB instance to retrieve the list of DB snapshots for. This + // parameter cannot be used in conjunction with DBSnapshotIdentifier. This parameter + // is not case-sensitive. // // Constraints: // - // Must contain from 1 to 63 alphanumeric characters or hyphens First character - // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens DBInstanceIdentifier *string `type:"string"` // A specific DB snapshot identifier to describe. This parameter cannot be used @@ -7495,28 +10436,33 @@ type DescribeDBSnapshotsInput struct { // // Constraints: // - // Must be 1 to 255 alphanumeric characters. First character must be a letter. - // Cannot end with a hyphen or contain two consecutive hyphens. If this is the - // identifier of an automated snapshot, the SnapshotType parameter must also - // be specified. + // Must be 1 to 255 alphanumeric characters. + // + // First character must be a letter. + // + // Cannot end with a hyphen or contain two consecutive hyphens. + // + // If this identifier is for an automated snapshot, the SnapshotType parameter + // must also be specified. DBSnapshotIdentifier *string `type:"string"` // This parameter is not currently supported. Filters []*Filter `locationNameList:"Filter" type:"list"` - // True to include manual DB snapshots that are public and can be copied or - // restored by any AWS account; otherwise false. The default is false. + // Set this value to true to include manual DB snapshots that are public and + // can be copied or restored by any AWS account, otherwise set this value to + // false. The default is false. // - // An manual DB snapshot is shared as public by the ModifyDBSnapshotAttribute + // You can share a manual DB snapshot as public by using the ModifyDBSnapshotAttribute // API. IncludePublic *bool `type:"boolean"` - // True to include shared manual DB snapshots from other AWS accounts that this - // AWS account has been given permission to copy or restore; otherwise false. - // The default is false. + // Set this value to true to include shared manual DB snapshots from other AWS + // accounts that this AWS account has been given permission to copy or restore, + // otherwise set this value to false. The default is false. // - // An AWS account is given permission to restore a manual DB snapshot from - // another AWS account by the ModifyDBSnapshotAttribute API. + // You can give an AWS account permission to restore a manual DB snapshot from + // another AWS account by using the ModifyDBSnapshotAttribute API action. IncludeShared *bool `type:"boolean"` // An optional pagination token provided by a previous DescribeDBSnapshots request. @@ -7533,23 +10479,28 @@ type DescribeDBSnapshotsInput struct { // Constraints: Minimum 20, maximum 100. MaxRecords *int64 `type:"integer"` - // The type of snapshots that will be returned. You can specify one of the following + // The type of snapshots to be returned. You can specify one of the following // values: // - // automated - Return all DB snapshots that have been automatically taken - // by Amazon RDS for my AWS account. manual - Return all DB snapshots that have - // been taken by my AWS account. shared - Return all manual DB snapshots that - // have been shared to my AWS account. public - Return all DB snapshots that - // have been marked as public. If you do not specify a SnapshotType, then both - // automated and manual snapshots are returned. You can include shared snapshots - // with these results by setting the IncludeShared parameter to true. You can - // include public snapshots with these results by setting the IncludePublic - // parameter to true. + // automated - Return all DB snapshots that have been automatically taken + // by Amazon RDS for my AWS account. // - // The IncludeShared and IncludePublic parameters do not apply for SnapshotType - // values of manual or automated. The IncludePublic parameter does not apply - // when SnapshotType is set to shared. the IncludeShared parameter does not - // apply when SnapshotType is set to public. + // manual - Return all DB snapshots that have been taken by my AWS account. + // + // shared - Return all manual DB snapshots that have been shared to my AWS + // account. + // + // public - Return all DB snapshots that have been marked as public. + // + // If you don't specify a SnapshotType value, then both automated and manual + // snapshots are returned. You can include shared snapshots with these results + // by setting the IncludeShared parameter to true. You can include public snapshots + // with these results by setting the IncludePublic parameter to true. + // + // The IncludeShared and IncludePublic parameters don't apply for SnapshotType + // values of manual or automated. The IncludePublic parameter doesn't apply + // when SnapshotType is set to shared. The IncludeShared parameter doesn't apply + // when SnapshotType is set to public. SnapshotType *string `type:"string"` } @@ -8014,12 +10965,19 @@ type DescribeEventsInput struct { // // Constraints: // - // If SourceIdentifier is supplied, SourceType must also be provided. If the - // source type is DBInstance, then a DBInstanceIdentifier must be supplied. - // If the source type is DBSecurityGroup, a DBSecurityGroupName must be supplied. - // If the source type is DBParameterGroup, a DBParameterGroupName must be supplied. - // If the source type is DBSnapshot, a DBSnapshotIdentifier must be supplied. - // Cannot end with a hyphen or contain two consecutive hyphens. + // If SourceIdentifier is supplied, SourceType must also be provided. + // + // If the source type is DBInstance, then a DBInstanceIdentifier must be + // supplied. + // + // If the source type is DBSecurityGroup, a DBSecurityGroupName must be supplied. + // + // If the source type is DBParameterGroup, a DBParameterGroupName must be + // supplied. + // + // If the source type is DBSnapshot, a DBSnapshotIdentifier must be supplied. + // + // Cannot end with a hyphen or contain two consecutive hyphens. SourceIdentifier *string `type:"string"` // The event source to retrieve events for. If no value is specified, all events @@ -8364,7 +11322,7 @@ type DescribePendingMaintenanceActionsInput struct { // // Supported filters: // - // db-instance-id - Accepts DB instance identifiers and DB instance Amazon + // db-instance-id - Accepts DB instance identifiers and DB instance Amazon // Resource Names (ARNs). The results list will only include pending maintenance // actions for the DB instances identified by these ARNs. Filters []*Filter `locationNameList:"Filter" type:"list"` @@ -8689,8 +11647,11 @@ type DownloadDBLogFilePortionInput struct { // // Constraints: // - // Must contain from 1 to 63 alphanumeric characters or hyphens First character - // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens DBInstanceIdentifier *string `type:"string" required:"true"` // The name of the log file to be downloaded. @@ -8706,21 +11667,23 @@ type DownloadDBLogFilePortionInput struct { // // If the NumberOfLines parameter is specified, then the block of lines returned // can be from the beginning or the end of the log file, depending on the value - // of the Marker parameter. If neither Marker or NumberOfLines are specified, - // the entire log file is returned up to a maximum of 10000 lines, starting - // with the most recent log entries first. + // of the Marker parameter. // - // If NumberOfLines is specified and Marker is not specified, then the most + // If neither Marker or NumberOfLines are specified, the entire log file + // is returned up to a maximum of 10000 lines, starting with the most recent + // log entries first. + // + // If NumberOfLines is specified and Marker is not specified, then the most // recent lines from the end of the log file are returned. // - // If Marker is specified as "0", then the specified number of lines from the - // beginning of the log file are returned. + // If Marker is specified as "0", then the specified number of lines from + // the beginning of the log file are returned. // - // You can download the log file in blocks of lines by specifying the size of - // the block using the NumberOfLines parameter, and by specifying a value of - // "0" for the Marker parameter in your first request. Include the Marker value - // returned in the response as the Marker value for the next request, continuing - // until the AdditionalDataPending response element returns false. + // You can download the log file in blocks of lines by specifying the size + // of the block using the NumberOfLines parameter, and by specifying a value + // of "0" for the Marker parameter in your first request. Include the Marker + // value returned in the response as the Marker value for the next request, + // continuing until the AdditionalDataPending response element returns false. NumberOfLines *int64 `type:"integer"` } @@ -8777,7 +11740,11 @@ func (s DownloadDBLogFilePortionOutput) GoString() string { // This data type is used as a response element in the following actions: // -// AuthorizeDBSecurityGroupIngress DescribeDBSecurityGroups RevokeDBSecurityGroupIngress +// AuthorizeDBSecurityGroupIngress +// +// DescribeDBSecurityGroups +// +// RevokeDBSecurityGroupIngress type EC2SecurityGroup struct { _ struct{} `type:"structure"` @@ -8808,7 +11775,11 @@ func (s EC2SecurityGroup) GoString() string { // This data type is used as a response element in the following actions: // -// CreateDBInstance DescribeDBInstances DeleteDBInstance +// CreateDBInstance +// +// DescribeDBInstances +// +// DeleteDBInstance type Endpoint struct { _ struct{} `type:"structure"` @@ -8972,9 +11943,18 @@ type FailoverDBClusterInput struct { // // Constraints: // - // Must contain from 1 to 63 alphanumeric characters or hyphens First character - // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens DBClusterIdentifier *string `type:"string"` + + // The name of the instance to promote to the primary instance. + // + // You must specify the instance identifier for an Aurora Replica in the DB + // cluster. For example, mydbcluster-replica1. + TargetDBInstanceIdentifier *string `type:"string"` } // String returns the string representation @@ -8992,9 +11972,20 @@ type FailoverDBClusterOutput struct { // Contains the result of a successful invocation of the following actions: // - // CreateDBCluster DeleteDBCluster FailoverDBCluster ModifyDBCluster - // RestoreDBClusterFromSnapshot This data type is used as a response element - // in the DescribeDBClusters action. + // CreateDBCluster + // + // DeleteDBCluster + // + // FailoverDBCluster + // + // ModifyDBCluster + // + // RestoreDBClusterFromSnapshot + // + // RestoreDBClusterToPointInTime + // + // This data type is used as a response element in the DescribeDBClusters + // action. DBCluster *DBCluster `type:"structure"` } @@ -9008,6 +11999,7 @@ func (s FailoverDBClusterOutput) GoString() string { return s.String() } +// This type is not currently supported. type Filter struct { _ struct{} `type:"structure"` @@ -9155,7 +12147,7 @@ type ModifyDBClusterInput struct { // // Constraints: // - // Must be a value from 1 to 35 + // Must be a value from 1 to 35 BackupRetentionPeriod *int64 `type:"integer"` // The DB cluster identifier for the cluster being modified. This parameter @@ -9163,9 +12155,13 @@ type ModifyDBClusterInput struct { // // Constraints: // - // Must be the identifier for an existing DB cluster. Must contain from 1 - // to 63 alphanumeric characters or hyphens. First character must be a letter. - // Cannot end with a hyphen or contain two consecutive hyphens. + // Must be the identifier for an existing DB cluster. + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. + // + // First character must be a letter. + // + // Cannot end with a hyphen or contain two consecutive hyphens. DBClusterIdentifier *string `type:"string" required:"true"` // The name of the DB cluster parameter group to use for the DB cluster. @@ -9182,9 +12178,13 @@ type ModifyDBClusterInput struct { // // Constraints: // - // Must contain from 1 to 63 alphanumeric characters or hyphens First character - // must be a letter Cannot end with a hyphen or contain two consecutive hyphens - // Example: my-cluster2 + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // Example: my-cluster2 NewDBClusterIdentifier *string `type:"string"` // A value that indicates that the DB cluster should be associated with the @@ -9216,9 +12216,13 @@ type ModifyDBClusterInput struct { // // Constraints: // - // Must be in the format hh24:mi-hh24:mi. Times should be in Universal Coordinated - // Time (UTC). Must not conflict with the preferred maintenance window. Must - // be at least 30 minutes. + // Must be in the format hh24:mi-hh24:mi. + // + // Times should be in Universal Coordinated Time (UTC). + // + // Must not conflict with the preferred maintenance window. + // + // Must be at least 30 minutes. PreferredBackupWindow *string `type:"string"` // The weekly time range during which system maintenance can occur, in Universal @@ -9268,9 +12272,20 @@ type ModifyDBClusterOutput struct { // Contains the result of a successful invocation of the following actions: // - // CreateDBCluster DeleteDBCluster FailoverDBCluster ModifyDBCluster - // RestoreDBClusterFromSnapshot This data type is used as a response element - // in the DescribeDBClusters action. + // CreateDBCluster + // + // DeleteDBCluster + // + // FailoverDBCluster + // + // ModifyDBCluster + // + // RestoreDBClusterFromSnapshot + // + // RestoreDBClusterToPointInTime + // + // This data type is used as a response element in the DescribeDBClusters + // action. DBCluster *DBCluster `type:"structure"` } @@ -9320,6 +12335,88 @@ func (s *ModifyDBClusterParameterGroupInput) Validate() error { return nil } +type ModifyDBClusterSnapshotAttributeInput struct { + _ struct{} `type:"structure"` + + // The name of the DB cluster snapshot attribute to modify. + // + // To manage authorization for other AWS accounts to copy or restore a manual + // DB cluster snapshot, set this value to restore. + AttributeName *string `type:"string" required:"true"` + + // The identifier for the DB cluster snapshot to modify the attributes for. + DBClusterSnapshotIdentifier *string `type:"string" required:"true"` + + // A list of DB cluster snapshot attributes to add to the attribute specified + // by AttributeName. + // + // To authorize other AWS accounts to copy or restore a manual DB cluster snapshot, + // set this list to include one or more AWS account IDs, or all to make the + // manual DB cluster snapshot restorable by any AWS account. Do not add the + // all value for any manual DB cluster snapshots that contain private information + // that you don't want available to all AWS accounts. + ValuesToAdd []*string `locationNameList:"AttributeValue" type:"list"` + + // A list of DB cluster snapshot attributes to remove from the attribute specified + // by AttributeName. + // + // To remove authorization for other AWS accounts to copy or restore a manual + // DB cluster snapshot, set this list to include one or more AWS account identifiers, + // or all to remove authorization for any AWS account to copy or restore the + // DB cluster snapshot. If you specify all, an AWS account whose account ID + // is explicitly added to the restore attribute can still copy or restore a + // manual DB cluster snapshot. + ValuesToRemove []*string `locationNameList:"AttributeValue" type:"list"` +} + +// String returns the string representation +func (s ModifyDBClusterSnapshotAttributeInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBClusterSnapshotAttributeInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ModifyDBClusterSnapshotAttributeInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ModifyDBClusterSnapshotAttributeInput"} + if s.AttributeName == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeName")) + } + if s.DBClusterSnapshotIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBClusterSnapshotIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ModifyDBClusterSnapshotAttributeOutput struct { + _ struct{} `type:"structure"` + + // Contains the results of a successful call to the DescribeDBClusterSnapshotAttributes + // API action. + // + // Manual DB cluster snapshot attributes are used to authorize other AWS accounts + // to copy or restore a manual DB cluster snapshot. For more information, see + // the ModifyDBClusterSnapshotAttribute API action. + DBClusterSnapshotAttributesResult *DBClusterSnapshotAttributesResult `type:"structure"` +} + +// String returns the string representation +func (s ModifyDBClusterSnapshotAttributeOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ModifyDBClusterSnapshotAttributeOutput) GoString() string { + return s.String() +} + type ModifyDBInstanceInput struct { _ struct{} `type:"structure"` @@ -9377,7 +12474,7 @@ type ModifyDBInstanceInput struct { // // Cannot be modified. // - // If you choose to migrate your DB instance from using standard storage to + // If you choose to migrate your DB instance from using standard storage to // using Provisioned IOPS, or from using Provisioned IOPS to using standard // storage, the process can take time. The duration of the migration depends // on several factors such as database load, storage size, storage type (standard @@ -9439,10 +12536,15 @@ type ModifyDBInstanceInput struct { // // Constraints: // - // Must be a value from 0 to 35 Can be specified for a MySQL Read Replica - // only if the source is running MySQL 5.6 Can be specified for a PostgreSQL - // Read Replica only if the source is running PostgreSQL 9.3.5 Cannot be set - // to 0 if the DB instance is a source to Read Replicas + // Must be a value from 0 to 35 + // + // Can be specified for a MySQL Read Replica only if the source is running + // MySQL 5.6 + // + // Can be specified for a PostgreSQL Read Replica only if the source is running + // PostgreSQL 9.3.5 + // + // Cannot be set to 0 if the DB instance is a source to Read Replicas BackupRetentionPeriod *int64 `type:"integer"` // Indicates the certificate that needs to be associated with the instance. @@ -9474,9 +12576,13 @@ type ModifyDBInstanceInput struct { // // Constraints: // - // Must be the identifier for an existing DB instance Must contain from 1 - // to 63 alphanumeric characters or hyphens First character must be a letter - // Cannot end with a hyphen or contain two consecutive hyphens + // Must be the identifier for an existing DB instance + // + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens DBInstanceIdentifier *string `type:"string" required:"true"` // The name of the DB parameter group to apply to the DB instance. Changing @@ -9546,8 +12652,11 @@ type ModifyDBInstanceInput struct { // // Constraints: // - // Must be 1 to 255 alphanumeric characters First character must be a letter - // Cannot end with a hyphen or contain two consecutive hyphens + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens DBSecurityGroups []*string `locationNameList:"DBSecurityGroupName" type:"list"` // Specify the Active Directory Domain to move the instance to. @@ -9565,7 +12674,7 @@ type ModifyDBInstanceInput struct { // results in an outage and the change is applied during the next maintenance // window unless the ApplyImmediately parameter is set to true for this request. // - // For major version upgrades, if a non-default DB parameter group is currently + // For major version upgrades, if a non-default DB parameter group is currently // in use, a new DB parameter group in the DB parameter group family for the // new engine version must be specified. The new DB parameter group can be the // default for that DB parameter group family. @@ -9593,7 +12702,7 @@ type ModifyDBInstanceInput struct { // // Type: Integer // - // If you choose to migrate your DB instance from using standard storage to + // If you choose to migrate your DB instance from using standard storage to // using Provisioned IOPS, or from using Provisioned IOPS to using standard // storage, the process can take time. The duration of the migration depends // on several factors such as database load, storage size, storage type (standard @@ -9629,7 +12738,7 @@ type ModifyDBInstanceInput struct { // The interval, in seconds, between points when Enhanced Monitoring metrics // are collected for the DB instance. To disable collecting Enhanced Monitoring - // metrics, specify 0. The default is 60. + // metrics, specify 0. The default is 0. // // If MonitoringRoleArn is specified, then you must also set MonitoringInterval // to a value other than 0. @@ -9650,10 +12759,7 @@ type ModifyDBInstanceInput struct { // does not result in an outage and the change is applied during the next maintenance // window unless the ApplyImmediately parameter is set to true for this request. // - // Constraints: Cannot be specified if the DB instance is a Read Replica. This - // parameter cannot be used with SQL Server DB instances. Multi-AZ for SQL Server - // DB instances is set using the Mirroring option in an option group associated - // with the DB instance. + // Constraints: Cannot be specified if the DB instance is a Read Replica. MultiAZ *bool `type:"boolean"` // The new DB instance identifier for the DB instance when renaming a DB instance. @@ -9664,8 +12770,11 @@ type ModifyDBInstanceInput struct { // // Constraints: // - // Must contain from 1 to 63 alphanumeric characters or hyphens First character - // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens NewDBInstanceIdentifier *string `type:"string"` // Indicates that the DB instance should be associated with the specified option @@ -9676,9 +12785,9 @@ type ModifyDBInstanceInput struct { // can cause a brief (sub-second) period during which new connections are rejected // but existing connections are not interrupted. // - // Permanent options, such as the TDE option for Oracle Advanced Security - // TDE, cannot be removed from an option group, and that option group cannot - // be removed from a DB instance once it is associated with a DB instance + // Permanent options, such as the TDE option for Oracle Advanced Security TDE, + // cannot be removed from an option group, and that option group cannot be removed + // from a DB instance once it is associated with a DB instance OptionGroupName *string `type:"string"` // The daily time range during which automated backups are created if automated @@ -9688,9 +12797,13 @@ type ModifyDBInstanceInput struct { // // Constraints: // - // Must be in the format hh24:mi-hh24:mi Times should be in Universal Time - // Coordinated (UTC) Must not conflict with the preferred maintenance window - // Must be at least 30 minutes + // Must be in the format hh24:mi-hh24:mi + // + // Times should be in Universal Time Coordinated (UTC) + // + // Must not conflict with the preferred maintenance window + // + // Must be at least 30 minutes PreferredBackupWindow *string `type:"string"` // The weekly time range (in UTC) during which system maintenance can occur, @@ -9726,14 +12839,14 @@ type ModifyDBInstanceInput struct { // to make the DB instance internal with a DNS name that resolves to a private // IP address. // - // PubliclyAccessible only applies to DB instances in a VPC. The DB instance + // PubliclyAccessible only applies to DB instances in a VPC. The DB instance // must be part of a public subnet and PubliclyAccessible must be true in order // for it to be publicly accessible. // // Changes to the PubliclyAccessible parameter are applied immediately regardless // of the value of the ApplyImmediately parameter. // - // Default: false + // Default: false PubliclyAccessible *bool `type:"boolean"` // Specifies the storage type to be associated with the DB instance. @@ -9757,8 +12870,11 @@ type ModifyDBInstanceInput struct { // // Constraints: // - // Must be 1 to 255 alphanumeric characters First character must be a letter - // Cannot end with a hyphen or contain two consecutive hyphens + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` } @@ -9790,8 +12906,14 @@ type ModifyDBInstanceOutput struct { // Contains the result of a successful invocation of the following actions: // - // CreateDBInstance DeleteDBInstance ModifyDBInstance This data type - // is used as a response element in the DescribeDBInstances action. + // CreateDBInstance + // + // DeleteDBInstance + // + // ModifyDBInstance + // + // This data type is used as a response element in the DescribeDBInstances + // action. DBInstance *DBInstance `type:"structure"` } @@ -9812,9 +12934,13 @@ type ModifyDBParameterGroupInput struct { // // Constraints: // - // Must be the name of an existing DB parameter group Must be 1 to 255 alphanumeric - // characters First character must be a letter Cannot end with a hyphen or contain - // two consecutive hyphens + // Must be the name of an existing DB parameter group + // + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens DBParameterGroupName *string `type:"string" required:"true"` // An array of parameter names, values, and the apply method for the parameter @@ -9824,7 +12950,7 @@ type ModifyDBParameterGroupInput struct { // // Valid Values (for the application method): immediate | pending-reboot // - // You can use the immediate value with dynamic parameters only. You can use + // You can use the immediate value with dynamic parameters only. You can use // the pending-reboot value for both dynamic and static parameters, and changes // are applied when you reboot the DB instance without failover. Parameters []*Parameter `locationNameList:"Parameter" type:"list" required:"true"` @@ -9862,29 +12988,29 @@ type ModifyDBSnapshotAttributeInput struct { // The name of the DB snapshot attribute to modify. // // To manage authorization for other AWS accounts to copy or restore a manual - // DB snapshot, this value is restore. - AttributeName *string `type:"string"` + // DB snapshot, set this value to restore. + AttributeName *string `type:"string" required:"true"` // The identifier for the DB snapshot to modify the attributes for. DBSnapshotIdentifier *string `type:"string" required:"true"` // A list of DB snapshot attributes to add to the attribute specified by AttributeName. // - // To authorize other AWS Accounts to copy or restore a manual snapshot, this - // is one or more AWS account identifiers, or all to make the manual DB snapshot - // restorable by any AWS account. Do not add the all value for any manual DB - // snapshots that contain private information that you do not want to be available - // to all AWS accounts. + // To authorize other AWS accounts to copy or restore a manual snapshot, set + // this list to include one or more AWS account IDs, or all to make the manual + // DB snapshot restorable by any AWS account. Do not add the all value for any + // manual DB snapshots that contain private information that you don't want + // available to all AWS accounts. ValuesToAdd []*string `locationNameList:"AttributeValue" type:"list"` // A list of DB snapshot attributes to remove from the attribute specified by // AttributeName. // - // To remove authorization for other AWS Accounts to copy or restore a manual - // snapshot, this is one or more AWS account identifiers, or all to remove authorization - // for any AWS account to copy or restore the DB snapshot. If you specify all, - // AWS accounts that have their account identifier explicitly added to the restore - // attribute can still copy or restore the manual DB snapshot. + // To remove authorization for other AWS accounts to copy or restore a manual + // snapshot, set this list to include one or more AWS account identifiers, or + // all to remove authorization for any AWS account to copy or restore the DB + // snapshot. If you specify all, an AWS account whose account ID is explicitly + // added to the restore attribute can still copy or restore the manual DB snapshot. ValuesToRemove []*string `locationNameList:"AttributeValue" type:"list"` } @@ -9901,6 +13027,9 @@ func (s ModifyDBSnapshotAttributeInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *ModifyDBSnapshotAttributeInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ModifyDBSnapshotAttributeInput"} + if s.AttributeName == nil { + invalidParams.Add(request.NewErrParamRequired("AttributeName")) + } if s.DBSnapshotIdentifier == nil { invalidParams.Add(request.NewErrParamRequired("DBSnapshotIdentifier")) } @@ -9915,11 +13044,11 @@ type ModifyDBSnapshotAttributeOutput struct { _ struct{} `type:"structure"` // Contains the results of a successful call to the DescribeDBSnapshotAttributes - // API. + // API action. // // Manual DB snapshot attributes are used to authorize other AWS accounts to // copy or restore a manual DB snapshot. For more information, see the ModifyDBSnapshotAttribute - // API. + // API action. DBSnapshotAttributesResult *DBSnapshotAttributesResult `type:"structure"` } @@ -9982,7 +13111,14 @@ type ModifyDBSubnetGroupOutput struct { // Contains the result of a successful invocation of the following actions: // - // CreateDBSubnetGroup ModifyDBSubnetGroup DescribeDBSubnetGroups DeleteDBSubnetGroup + // CreateDBSubnetGroup + // + // ModifyDBSubnetGroup + // + // DescribeDBSubnetGroups + // + // DeleteDBSubnetGroup + // // This data type is used as a response element in the DescribeDBSubnetGroups // action. DBSubnetGroup *DBSubnetGroup `type:"structure"` @@ -10078,9 +13214,9 @@ type ModifyOptionGroupInput struct { // The name of the option group to be modified. // - // Permanent options, such as the TDE option for Oracle Advanced Security - // TDE, cannot be removed from an option group, and that option group cannot - // be removed from a DB instance once it is associated with a DB instance + // Permanent options, such as the TDE option for Oracle Advanced Security TDE, + // cannot be removed from an option group, and that option group cannot be removed + // from a DB instance once it is associated with a DB instance OptionGroupName *string `type:"string" required:"true"` // Options in this list are added to the option group or, if already present, @@ -10626,6 +13762,79 @@ func (s PendingModifiedValues) GoString() string { return s.String() } +type PromoteReadReplicaDBClusterInput struct { + _ struct{} `type:"structure"` + + // The identifier of the DB cluster Read Replica to promote. This parameter + // is not case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. + // + // First character must be a letter. + // + // Cannot end with a hyphen or contain two consecutive hyphens. + // + // Example: my-cluster-replica1 + DBClusterIdentifier *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PromoteReadReplicaDBClusterInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PromoteReadReplicaDBClusterInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PromoteReadReplicaDBClusterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PromoteReadReplicaDBClusterInput"} + if s.DBClusterIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBClusterIdentifier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PromoteReadReplicaDBClusterOutput struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBCluster + // + // DeleteDBCluster + // + // FailoverDBCluster + // + // ModifyDBCluster + // + // RestoreDBClusterFromSnapshot + // + // RestoreDBClusterToPointInTime + // + // This data type is used as a response element in the DescribeDBClusters + // action. + DBCluster *DBCluster `type:"structure"` +} + +// String returns the string representation +func (s PromoteReadReplicaDBClusterOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PromoteReadReplicaDBClusterOutput) GoString() string { + return s.String() +} + type PromoteReadReplicaInput struct { _ struct{} `type:"structure"` @@ -10633,21 +13842,26 @@ type PromoteReadReplicaInput struct { // a positive number enables backups. Setting this parameter to 0 disables automated // backups. // - // Default: 1 + // Default: 1 // // Constraints: // - // Must be a value from 0 to 8 + // Must be a value from 0 to 8 BackupRetentionPeriod *int64 `type:"integer"` // The DB instance identifier. This value is stored as a lowercase string. // // Constraints: // - // Must be the identifier for an existing Read Replica DB instance Must contain - // from 1 to 63 alphanumeric characters or hyphens First character must be a - // letter Cannot end with a hyphen or contain two consecutive hyphens Example: - // mydbinstance + // Must be the identifier for an existing Read Replica DB instance + // + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // Example: mydbinstance DBInstanceIdentifier *string `type:"string" required:"true"` // The daily time range during which automated backups are created if automated @@ -10660,9 +13874,13 @@ type PromoteReadReplicaInput struct { // // Constraints: // - // Must be in the format hh24:mi-hh24:mi. Times should be in Universal Coordinated - // Time (UTC). Must not conflict with the preferred maintenance window. Must - // be at least 30 minutes. + // Must be in the format hh24:mi-hh24:mi. + // + // Times should be in Universal Coordinated Time (UTC). + // + // Must not conflict with the preferred maintenance window. + // + // Must be at least 30 minutes. PreferredBackupWindow *string `type:"string"` } @@ -10694,8 +13912,14 @@ type PromoteReadReplicaOutput struct { // Contains the result of a successful invocation of the following actions: // - // CreateDBInstance DeleteDBInstance ModifyDBInstance This data type - // is used as a response element in the DescribeDBInstances action. + // CreateDBInstance + // + // DeleteDBInstance + // + // ModifyDBInstance + // + // This data type is used as a response element in the DescribeDBInstances + // action. DBInstance *DBInstance `type:"structure"` } @@ -10779,8 +14003,11 @@ type RebootDBInstanceInput struct { // // Constraints: // - // Must contain from 1 to 63 alphanumeric characters or hyphens First character - // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens DBInstanceIdentifier *string `type:"string" required:"true"` // When true, the reboot will be conducted through a MultiAZ failover. @@ -10818,8 +14045,14 @@ type RebootDBInstanceOutput struct { // Contains the result of a successful invocation of the following actions: // - // CreateDBInstance DeleteDBInstance ModifyDBInstance This data type - // is used as a response element in the DescribeDBInstances action. + // CreateDBInstance + // + // DeleteDBInstance + // + // ModifyDBInstance + // + // This data type is used as a response element in the DescribeDBInstances + // action. DBInstance *DBInstance `type:"structure"` } @@ -11114,8 +14347,11 @@ type ResetDBParameterGroupInput struct { // // Constraints: // - // Must be 1 to 255 alphanumeric characters First character must be a letter - // Cannot end with a hyphen or contain two consecutive hyphens + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens DBParameterGroupName *string `type:"string" required:"true"` // An array of parameter names, values, and the apply method for the parameter @@ -11208,9 +14444,13 @@ type RestoreDBClusterFromSnapshotInput struct { // // Constraints: // - // Must contain from 1 to 255 alphanumeric characters or hyphens First character - // must be a letter Cannot end with a hyphen or contain two consecutive hyphens - // Example: my-snapshot-id + // Must contain from 1 to 255 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // Example: my-snapshot-id DBClusterIdentifier *string `type:"string" required:"true"` // The name of the DB subnet group to use for the new DB cluster. @@ -11235,7 +14475,7 @@ type RestoreDBClusterFromSnapshotInput struct { EngineVersion *string `type:"string"` // The KMS key identifier to use when restoring an encrypted DB cluster from - // an encrypted DB cluster snapshot. + // a DB cluster snapshot. // // The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption // key. If you are restoring a DB cluster with the same AWS account that owns @@ -11245,12 +14485,11 @@ type RestoreDBClusterFromSnapshotInput struct { // If you do not specify a value for the KmsKeyId parameter, then the following // will occur: // - // If the DB cluster snapshot is encrypted, then the restored DB cluster is - // encrypted using the KMS key that was used to encrypt the DB cluster snapshot. - // If the DB cluster snapshot is not encrypted, then the restored DB cluster - // is not encrypted. If SnapshotIdentifier refers to a DB cluster snapshot - // that is not encrypted, and you specify a value for the KmsKeyId parameter, - // then the restore request is rejected. + // If the DB cluster snapshot is encrypted, then the restored DB cluster + // is encrypted using the KMS key that was used to encrypt the DB cluster snapshot. + // + // If the DB cluster snapshot is not encrypted, then the restored DB cluster + // is encrypted using the specified encryption key. KmsKeyId *string `type:"string"` // The name of the option group to use for the restored DB cluster. @@ -11267,8 +14506,11 @@ type RestoreDBClusterFromSnapshotInput struct { // // Constraints: // - // Must contain from 1 to 63 alphanumeric characters or hyphens First character - // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens SnapshotIdentifier *string `type:"string" required:"true"` // The tags to be assigned to the restored DB cluster. @@ -11312,9 +14554,20 @@ type RestoreDBClusterFromSnapshotOutput struct { // Contains the result of a successful invocation of the following actions: // - // CreateDBCluster DeleteDBCluster FailoverDBCluster ModifyDBCluster - // RestoreDBClusterFromSnapshot This data type is used as a response element - // in the DescribeDBClusters action. + // CreateDBCluster + // + // DeleteDBCluster + // + // FailoverDBCluster + // + // ModifyDBCluster + // + // RestoreDBClusterFromSnapshot + // + // RestoreDBClusterToPointInTime + // + // This data type is used as a response element in the DescribeDBClusters + // action. DBCluster *DBCluster `type:"structure"` } @@ -11335,8 +14588,11 @@ type RestoreDBClusterToPointInTimeInput struct { // // Constraints: // - // Must contain from 1 to 63 alphanumeric characters or hyphens First character - // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens DBClusterIdentifier *string `type:"string" required:"true"` // The DB subnet group name to use for the new DB cluster. @@ -11363,11 +14619,14 @@ type RestoreDBClusterToPointInTimeInput struct { // If you do not specify a value for the KmsKeyId parameter, then the following // will occur: // - // If the DB cluster is encrypted, then the restored DB cluster is encrypted - // using the KMS key that was used to encrypt the source DB cluster. If the - // DB cluster is not encrypted, then the restored DB cluster is not encrypted. - // If DBClusterIdentifier refers to a DB cluster that is note encrypted, then - // the restore request is rejected. + // If the DB cluster is encrypted, then the restored DB cluster is encrypted + // using the KMS key that was used to encrypt the source DB cluster. + // + // If the DB cluster is not encrypted, then the restored DB cluster is not + // encrypted. + // + // If DBClusterIdentifier refers to a DB cluster that is note encrypted, + // then the restore request is rejected. KmsKeyId *string `type:"string"` // The name of the option group for the new DB cluster. @@ -11386,17 +14645,24 @@ type RestoreDBClusterToPointInTimeInput struct { // // Constraints: // - // Must be before the latest restorable time for the DB instance Cannot be - // specified if UseLatestRestorableTime parameter is true Example: 2015-03-07T23:45:00Z + // Must be before the latest restorable time for the DB instance + // + // Cannot be specified if UseLatestRestorableTime parameter is true + // + // Example: 2015-03-07T23:45:00Z RestoreToTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` // The identifier of the source DB cluster from which to restore. // // Constraints: // - // Must be the identifier of an existing database instance Must contain from - // 1 to 63 alphanumeric characters or hyphens First character must be a letter - // Cannot end with a hyphen or contain two consecutive hyphens + // Must be the identifier of an existing database instance + // + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens SourceDBClusterIdentifier *string `type:"string" required:"true"` // A list of tags. @@ -11445,9 +14711,20 @@ type RestoreDBClusterToPointInTimeOutput struct { // Contains the result of a successful invocation of the following actions: // - // CreateDBCluster DeleteDBCluster FailoverDBCluster ModifyDBCluster - // RestoreDBClusterFromSnapshot This data type is used as a response element - // in the DescribeDBClusters action. + // CreateDBCluster + // + // DeleteDBCluster + // + // FailoverDBCluster + // + // ModifyDBCluster + // + // RestoreDBClusterFromSnapshot + // + // RestoreDBClusterToPointInTime + // + // This data type is used as a response element in the DescribeDBClusters + // action. DBCluster *DBCluster `type:"structure"` } @@ -11496,9 +14773,14 @@ type RestoreDBInstanceFromDBSnapshotInput struct { // // Constraints: // - // Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15 for - // SQL Server) First character must be a letter Cannot end with a hyphen or - // contain two consecutive hyphens Example: my-snapshot-id + // Must contain from 1 to 63 alphanumeric characters or hyphens (1 to 15 + // for SQL Server) + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // Example: my-snapshot-id DBInstanceIdentifier *string `type:"string" required:"true"` // The database name for the restored DB instance. @@ -11510,9 +14792,13 @@ type RestoreDBInstanceFromDBSnapshotInput struct { // // Constraints: // - // Must contain from 1 to 255 alphanumeric characters or hyphens First character - // must be a letter Cannot end with a hyphen or contain two consecutive hyphens - // If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier + // Must contain from 1 to 255 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + // + // If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier // must be the ARN of the shared DB snapshot. DBSnapshotIdentifier *string `type:"string" required:"true"` @@ -11548,7 +14834,7 @@ type RestoreDBInstanceFromDBSnapshotInput struct { // though your DB instance will be available for connections before the conversion // starts. // - // Constraints: Must be an integer greater than 1000. + // Constraints: Must be an integer greater than 1000. // // SQL Server // @@ -11557,7 +14843,7 @@ type RestoreDBInstanceFromDBSnapshotInput struct { // License model information for the restored DB instance. // - // Default: Same as source. + // Default: Same as source. // // Valid values: license-included | bring-your-own-license | general-public-license LicenseModel *string `type:"string"` @@ -11587,14 +14873,18 @@ type RestoreDBInstanceFromDBSnapshotInput struct { // which resolves to a public IP address. A value of false specifies an internal // instance with a DNS name that resolves to a private IP address. // - // Default: The default behavior varies depending on whether a VPC has been + // Default: The default behavior varies depending on whether a VPC has been // requested or not. The following list shows the default behavior in each case. // - // Default VPC: true VPC: false If no DB subnet group has been specified - // as part of the request and the PubliclyAccessible value has not been set, - // the DB instance will be publicly accessible. If a specific DB subnet group - // has been specified as part of the request and the PubliclyAccessible value - // has not been set, the DB instance will be private. + // Default VPC: true + // + // VPC: false + // + // If no DB subnet group has been specified as part of the request and the + // PubliclyAccessible value has not been set, the DB instance will be publicly + // accessible. If a specific DB subnet group has been specified as part of the + // request and the PubliclyAccessible value has not been set, the DB instance + // will be private. PubliclyAccessible *bool `type:"boolean"` // Specifies the storage type to be associated with the DB instance. @@ -11648,8 +14938,14 @@ type RestoreDBInstanceFromDBSnapshotOutput struct { // Contains the result of a successful invocation of the following actions: // - // CreateDBInstance DeleteDBInstance ModifyDBInstance This data type - // is used as a response element in the DescribeDBInstances action. + // CreateDBInstance + // + // DeleteDBInstance + // + // ModifyDBInstance + // + // This data type is used as a response element in the DescribeDBInstances + // action. DBInstance *DBInstance `type:"structure"` } @@ -11728,7 +15024,7 @@ type RestoreDBInstanceToPointInTimeInput struct { // The amount of Provisioned IOPS (input/output operations per second) to be // initially allocated for the DB instance. // - // Constraints: Must be an integer greater than 1000. + // Constraints: Must be an integer greater than 1000. // // SQL Server // @@ -11737,7 +15033,7 @@ type RestoreDBInstanceToPointInTimeInput struct { // License model information for the restored DB instance. // - // Default: Same as source. + // Default: Same as source. // // Valid values: license-included | bring-your-own-license | general-public-license LicenseModel *string `type:"string"` @@ -11767,14 +15063,18 @@ type RestoreDBInstanceToPointInTimeInput struct { // which resolves to a public IP address. A value of false specifies an internal // instance with a DNS name that resolves to a private IP address. // - // Default: The default behavior varies depending on whether a VPC has been + // Default: The default behavior varies depending on whether a VPC has been // requested or not. The following list shows the default behavior in each case. // - // Default VPC:true VPC:false If no DB subnet group has been specified - // as part of the request and the PubliclyAccessible value has not been set, - // the DB instance will be publicly accessible. If a specific DB subnet group - // has been specified as part of the request and the PubliclyAccessible value - // has not been set, the DB instance will be private. + // Default VPC:true + // + // VPC:false + // + // If no DB subnet group has been specified as part of the request and the + // PubliclyAccessible value has not been set, the DB instance will be publicly + // accessible. If a specific DB subnet group has been specified as part of the + // request and the PubliclyAccessible value has not been set, the DB instance + // will be private. PubliclyAccessible *bool `type:"boolean"` // The date and time to restore from. @@ -11783,17 +15083,24 @@ type RestoreDBInstanceToPointInTimeInput struct { // // Constraints: // - // Must be before the latest restorable time for the DB instance Cannot be - // specified if UseLatestRestorableTime parameter is true Example: 2009-09-07T23:45:00Z + // Must be before the latest restorable time for the DB instance + // + // Cannot be specified if UseLatestRestorableTime parameter is true + // + // Example: 2009-09-07T23:45:00Z RestoreTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` // The identifier of the source DB instance from which to restore. // // Constraints: // - // Must be the identifier of an existing database instance Must contain from - // 1 to 63 alphanumeric characters or hyphens First character must be a letter - // Cannot end with a hyphen or contain two consecutive hyphens + // Must be the identifier of an existing database instance + // + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens SourceDBInstanceIdentifier *string `type:"string" required:"true"` // Specifies the storage type to be associated with the DB instance. @@ -11812,8 +15119,11 @@ type RestoreDBInstanceToPointInTimeInput struct { // // Constraints: // - // Must contain from 1 to 63 alphanumeric characters or hyphens First character - // must be a letter Cannot end with a hyphen or contain two consecutive hyphens + // Must contain from 1 to 63 alphanumeric characters or hyphens + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens TargetDBInstanceIdentifier *string `type:"string" required:"true"` // The ARN from the Key Store with which to associate the instance for TDE encryption. @@ -11863,8 +15173,14 @@ type RestoreDBInstanceToPointInTimeOutput struct { // Contains the result of a successful invocation of the following actions: // - // CreateDBInstance DeleteDBInstance ModifyDBInstance This data type - // is used as a response element in the DescribeDBInstances action. + // CreateDBInstance + // + // DeleteDBInstance + // + // ModifyDBInstance + // + // This data type is used as a response element in the DescribeDBInstances + // action. DBInstance *DBInstance `type:"structure"` } @@ -11935,9 +15251,16 @@ type RevokeDBSecurityGroupIngressOutput struct { // Contains the result of a successful invocation of the following actions: // - // DescribeDBSecurityGroups AuthorizeDBSecurityGroupIngress CreateDBSecurityGroup - // RevokeDBSecurityGroupIngress This data type is used as a response element - // in the DescribeDBSecurityGroups action. + // DescribeDBSecurityGroups + // + // AuthorizeDBSecurityGroupIngress + // + // CreateDBSecurityGroup + // + // RevokeDBSecurityGroupIngress + // + // This data type is used as a response element in the DescribeDBSecurityGroups + // action. DBSecurityGroup *DBSecurityGroup `type:"structure"` } @@ -11958,7 +15281,9 @@ type Subnet struct { // Contains Availability Zone information. // - // This data type is used as an element in the following data type: OrderableDBInstanceOption + // This data type is used as an element in the following data type: + // + // OrderableDBInstanceOption SubnetAvailabilityZone *AvailabilityZone `type:"structure"` // Specifies the identifier of the subnet. diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/service.go b/vendor/github.com/aws/aws-sdk-go/service/rds/service.go index e07795efa..1082f992a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/rds/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/rds/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/query" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // Amazon Relational Database Service (Amazon RDS) is a web service that makes @@ -17,7 +17,7 @@ import ( // relational database and manages common database administration tasks, freeing // up developers to focus on what makes their applications and businesses unique. // -// Amazon RDS gives you access to the capabilities of a MySQL, MariaDB, PostgreSQL, +// Amazon RDS gives you access to the capabilities of a MySQL, MariaDB, PostgreSQL, // Microsoft SQL Server, Oracle, or Amazon Aurora database server. These capabilities // mean that the code, applications, and tools you already use today with your // existing databases work with Amazon RDS without modification. Amazon RDS @@ -27,7 +27,7 @@ import ( // demand. As with all Amazon Web Services, there are no up-front investments, // and you pay only for the resources you use. // -// This interface reference for Amazon RDS contains documentation for a programming +// This interface reference for Amazon RDS contains documentation for a programming // or command line interface you can use to manage Amazon RDS. Note that Amazon // RDS is asynchronous, which means that some interfaces might require techniques // such as polling or callback functions to determine when a command has been @@ -36,22 +36,22 @@ import ( // maintenance window. The reference structure is as follows, and we list following // some related topics from the user guide. // -// Amazon RDS API Reference +// Amazon RDS API Reference // -// For the alphabetical list of API actions, see API Actions (http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_Operations.html). +// For the alphabetical list of API actions, see API Actions (http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_Operations.html). // -// For the alphabetical list of data types, see Data Types (http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_Types.html). +// For the alphabetical list of data types, see Data Types (http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_Types.html). // -// For a list of common query parameters, see Common Parameters (http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/CommonParameters.html). +// For a list of common query parameters, see Common Parameters (http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/CommonParameters.html). // -// For descriptions of the error codes, see Common Errors (http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/CommonErrors.html). +// For descriptions of the error codes, see Common Errors (http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/CommonErrors.html). // -// Amazon RDS User Guide +// Amazon RDS User Guide // -// For a summary of the Amazon RDS interfaces, see Available RDS Interfaces +// For a summary of the Amazon RDS interfaces, see Available RDS Interfaces // (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Welcome.html#Welcome.Interfaces). // -// For more information about how to use the Query API, see Using the Query +// For more information about how to use the Query API, see Using the Query // API (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Using_the_Query_API.html). //The service client's operations are safe to be used concurrently. // It is not safe to mutate any of the client's properties though. @@ -99,7 +99,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(query.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/redshift/api.go b/vendor/github.com/aws/aws-sdk-go/service/redshift/api.go index 97ff5ecfe..d9d17aefa 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/redshift/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/redshift/api.go @@ -14,7 +14,28 @@ import ( const opAuthorizeClusterSecurityGroupIngress = "AuthorizeClusterSecurityGroupIngress" -// AuthorizeClusterSecurityGroupIngressRequest generates a request for the AuthorizeClusterSecurityGroupIngress operation. +// AuthorizeClusterSecurityGroupIngressRequest generates a "aws/request.Request" representing the +// client's request for the AuthorizeClusterSecurityGroupIngress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AuthorizeClusterSecurityGroupIngress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AuthorizeClusterSecurityGroupIngressRequest method. +// req, resp := client.AuthorizeClusterSecurityGroupIngressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) AuthorizeClusterSecurityGroupIngressRequest(input *AuthorizeClusterSecurityGroupIngressInput) (req *request.Request, output *AuthorizeClusterSecurityGroupIngressOutput) { op := &request.Operation{ Name: opAuthorizeClusterSecurityGroupIngress, @@ -60,7 +81,28 @@ func (c *Redshift) AuthorizeClusterSecurityGroupIngress(input *AuthorizeClusterS const opAuthorizeSnapshotAccess = "AuthorizeSnapshotAccess" -// AuthorizeSnapshotAccessRequest generates a request for the AuthorizeSnapshotAccess operation. +// AuthorizeSnapshotAccessRequest generates a "aws/request.Request" representing the +// client's request for the AuthorizeSnapshotAccess operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AuthorizeSnapshotAccess method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AuthorizeSnapshotAccessRequest method. +// req, resp := client.AuthorizeSnapshotAccessRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) AuthorizeSnapshotAccessRequest(input *AuthorizeSnapshotAccessInput) (req *request.Request, output *AuthorizeSnapshotAccessOutput) { op := &request.Operation{ Name: opAuthorizeSnapshotAccess, @@ -91,7 +133,28 @@ func (c *Redshift) AuthorizeSnapshotAccess(input *AuthorizeSnapshotAccessInput) const opCopyClusterSnapshot = "CopyClusterSnapshot" -// CopyClusterSnapshotRequest generates a request for the CopyClusterSnapshot operation. +// CopyClusterSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the CopyClusterSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CopyClusterSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CopyClusterSnapshotRequest method. +// req, resp := client.CopyClusterSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) CopyClusterSnapshotRequest(input *CopyClusterSnapshotInput) (req *request.Request, output *CopyClusterSnapshotOutput) { op := &request.Operation{ Name: opCopyClusterSnapshot, @@ -130,7 +193,28 @@ func (c *Redshift) CopyClusterSnapshot(input *CopyClusterSnapshotInput) (*CopyCl const opCreateCluster = "CreateCluster" -// CreateClusterRequest generates a request for the CreateCluster operation. +// CreateClusterRequest generates a "aws/request.Request" representing the +// client's request for the CreateCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateClusterRequest method. +// req, resp := client.CreateClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) CreateClusterRequest(input *CreateClusterInput) (req *request.Request, output *CreateClusterOutput) { op := &request.Operation{ Name: opCreateCluster, @@ -163,7 +247,28 @@ func (c *Redshift) CreateCluster(input *CreateClusterInput) (*CreateClusterOutpu const opCreateClusterParameterGroup = "CreateClusterParameterGroup" -// CreateClusterParameterGroupRequest generates a request for the CreateClusterParameterGroup operation. +// CreateClusterParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateClusterParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateClusterParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateClusterParameterGroupRequest method. +// req, resp := client.CreateClusterParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) CreateClusterParameterGroupRequest(input *CreateClusterParameterGroupInput) (req *request.Request, output *CreateClusterParameterGroupOutput) { op := &request.Operation{ Name: opCreateClusterParameterGroup, @@ -200,7 +305,28 @@ func (c *Redshift) CreateClusterParameterGroup(input *CreateClusterParameterGrou const opCreateClusterSecurityGroup = "CreateClusterSecurityGroup" -// CreateClusterSecurityGroupRequest generates a request for the CreateClusterSecurityGroup operation. +// CreateClusterSecurityGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateClusterSecurityGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateClusterSecurityGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateClusterSecurityGroupRequest method. +// req, resp := client.CreateClusterSecurityGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) CreateClusterSecurityGroupRequest(input *CreateClusterSecurityGroupInput) (req *request.Request, output *CreateClusterSecurityGroupOutput) { op := &request.Operation{ Name: opCreateClusterSecurityGroup, @@ -232,7 +358,28 @@ func (c *Redshift) CreateClusterSecurityGroup(input *CreateClusterSecurityGroupI const opCreateClusterSnapshot = "CreateClusterSnapshot" -// CreateClusterSnapshotRequest generates a request for the CreateClusterSnapshot operation. +// CreateClusterSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the CreateClusterSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateClusterSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateClusterSnapshotRequest method. +// req, resp := client.CreateClusterSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) CreateClusterSnapshotRequest(input *CreateClusterSnapshotInput) (req *request.Request, output *CreateClusterSnapshotOutput) { op := &request.Operation{ Name: opCreateClusterSnapshot, @@ -264,7 +411,28 @@ func (c *Redshift) CreateClusterSnapshot(input *CreateClusterSnapshotInput) (*Cr const opCreateClusterSubnetGroup = "CreateClusterSubnetGroup" -// CreateClusterSubnetGroupRequest generates a request for the CreateClusterSubnetGroup operation. +// CreateClusterSubnetGroupRequest generates a "aws/request.Request" representing the +// client's request for the CreateClusterSubnetGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateClusterSubnetGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateClusterSubnetGroupRequest method. +// req, resp := client.CreateClusterSubnetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) CreateClusterSubnetGroupRequest(input *CreateClusterSubnetGroupInput) (req *request.Request, output *CreateClusterSubnetGroupOutput) { op := &request.Operation{ Name: opCreateClusterSubnetGroup, @@ -297,7 +465,28 @@ func (c *Redshift) CreateClusterSubnetGroup(input *CreateClusterSubnetGroupInput const opCreateEventSubscription = "CreateEventSubscription" -// CreateEventSubscriptionRequest generates a request for the CreateEventSubscription operation. +// CreateEventSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the CreateEventSubscription operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateEventSubscription method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateEventSubscriptionRequest method. +// req, resp := client.CreateEventSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) CreateEventSubscriptionRequest(input *CreateEventSubscriptionInput) (req *request.Request, output *CreateEventSubscriptionOutput) { op := &request.Operation{ Name: opCreateEventSubscription, @@ -345,7 +534,28 @@ func (c *Redshift) CreateEventSubscription(input *CreateEventSubscriptionInput) const opCreateHsmClientCertificate = "CreateHsmClientCertificate" -// CreateHsmClientCertificateRequest generates a request for the CreateHsmClientCertificate operation. +// CreateHsmClientCertificateRequest generates a "aws/request.Request" representing the +// client's request for the CreateHsmClientCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateHsmClientCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateHsmClientCertificateRequest method. +// req, resp := client.CreateHsmClientCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) CreateHsmClientCertificateRequest(input *CreateHsmClientCertificateInput) (req *request.Request, output *CreateHsmClientCertificateOutput) { op := &request.Operation{ Name: opCreateHsmClientCertificate, @@ -380,7 +590,28 @@ func (c *Redshift) CreateHsmClientCertificate(input *CreateHsmClientCertificateI const opCreateHsmConfiguration = "CreateHsmConfiguration" -// CreateHsmConfigurationRequest generates a request for the CreateHsmConfiguration operation. +// CreateHsmConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the CreateHsmConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateHsmConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateHsmConfigurationRequest method. +// req, resp := client.CreateHsmConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) CreateHsmConfigurationRequest(input *CreateHsmConfigurationInput) (req *request.Request, output *CreateHsmConfigurationOutput) { op := &request.Operation{ Name: opCreateHsmConfiguration, @@ -416,7 +647,28 @@ func (c *Redshift) CreateHsmConfiguration(input *CreateHsmConfigurationInput) (* const opCreateSnapshotCopyGrant = "CreateSnapshotCopyGrant" -// CreateSnapshotCopyGrantRequest generates a request for the CreateSnapshotCopyGrant operation. +// CreateSnapshotCopyGrantRequest generates a "aws/request.Request" representing the +// client's request for the CreateSnapshotCopyGrant operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateSnapshotCopyGrant method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateSnapshotCopyGrantRequest method. +// req, resp := client.CreateSnapshotCopyGrantRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) CreateSnapshotCopyGrantRequest(input *CreateSnapshotCopyGrantInput) (req *request.Request, output *CreateSnapshotCopyGrantOutput) { op := &request.Operation{ Name: opCreateSnapshotCopyGrant, @@ -449,7 +701,28 @@ func (c *Redshift) CreateSnapshotCopyGrant(input *CreateSnapshotCopyGrantInput) const opCreateTags = "CreateTags" -// CreateTagsRequest generates a request for the CreateTags operation. +// CreateTagsRequest generates a "aws/request.Request" representing the +// client's request for the CreateTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateTagsRequest method. +// req, resp := client.CreateTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) CreateTagsRequest(input *CreateTagsInput) (req *request.Request, output *CreateTagsOutput) { op := &request.Operation{ Name: opCreateTags, @@ -484,7 +757,28 @@ func (c *Redshift) CreateTags(input *CreateTagsInput) (*CreateTagsOutput, error) const opDeleteCluster = "DeleteCluster" -// DeleteClusterRequest generates a request for the DeleteCluster operation. +// DeleteClusterRequest generates a "aws/request.Request" representing the +// client's request for the DeleteCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteClusterRequest method. +// req, resp := client.DeleteClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DeleteClusterRequest(input *DeleteClusterInput) (req *request.Request, output *DeleteClusterOutput) { op := &request.Operation{ Name: opDeleteCluster, @@ -527,7 +821,28 @@ func (c *Redshift) DeleteCluster(input *DeleteClusterInput) (*DeleteClusterOutpu const opDeleteClusterParameterGroup = "DeleteClusterParameterGroup" -// DeleteClusterParameterGroupRequest generates a request for the DeleteClusterParameterGroup operation. +// DeleteClusterParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteClusterParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteClusterParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteClusterParameterGroupRequest method. +// req, resp := client.DeleteClusterParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DeleteClusterParameterGroupRequest(input *DeleteClusterParameterGroupInput) (req *request.Request, output *DeleteClusterParameterGroupOutput) { op := &request.Operation{ Name: opDeleteClusterParameterGroup, @@ -557,7 +872,28 @@ func (c *Redshift) DeleteClusterParameterGroup(input *DeleteClusterParameterGrou const opDeleteClusterSecurityGroup = "DeleteClusterSecurityGroup" -// DeleteClusterSecurityGroupRequest generates a request for the DeleteClusterSecurityGroup operation. +// DeleteClusterSecurityGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteClusterSecurityGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteClusterSecurityGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteClusterSecurityGroupRequest method. +// req, resp := client.DeleteClusterSecurityGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DeleteClusterSecurityGroupRequest(input *DeleteClusterSecurityGroupInput) (req *request.Request, output *DeleteClusterSecurityGroupOutput) { op := &request.Operation{ Name: opDeleteClusterSecurityGroup, @@ -591,7 +927,28 @@ func (c *Redshift) DeleteClusterSecurityGroup(input *DeleteClusterSecurityGroupI const opDeleteClusterSnapshot = "DeleteClusterSnapshot" -// DeleteClusterSnapshotRequest generates a request for the DeleteClusterSnapshot operation. +// DeleteClusterSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the DeleteClusterSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteClusterSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteClusterSnapshotRequest method. +// req, resp := client.DeleteClusterSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DeleteClusterSnapshotRequest(input *DeleteClusterSnapshotInput) (req *request.Request, output *DeleteClusterSnapshotOutput) { op := &request.Operation{ Name: opDeleteClusterSnapshot, @@ -625,7 +982,28 @@ func (c *Redshift) DeleteClusterSnapshot(input *DeleteClusterSnapshotInput) (*De const opDeleteClusterSubnetGroup = "DeleteClusterSubnetGroup" -// DeleteClusterSubnetGroupRequest generates a request for the DeleteClusterSubnetGroup operation. +// DeleteClusterSubnetGroupRequest generates a "aws/request.Request" representing the +// client's request for the DeleteClusterSubnetGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteClusterSubnetGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteClusterSubnetGroupRequest method. +// req, resp := client.DeleteClusterSubnetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DeleteClusterSubnetGroupRequest(input *DeleteClusterSubnetGroupInput) (req *request.Request, output *DeleteClusterSubnetGroupOutput) { op := &request.Operation{ Name: opDeleteClusterSubnetGroup, @@ -654,7 +1032,28 @@ func (c *Redshift) DeleteClusterSubnetGroup(input *DeleteClusterSubnetGroupInput const opDeleteEventSubscription = "DeleteEventSubscription" -// DeleteEventSubscriptionRequest generates a request for the DeleteEventSubscription operation. +// DeleteEventSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the DeleteEventSubscription operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteEventSubscription method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteEventSubscriptionRequest method. +// req, resp := client.DeleteEventSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DeleteEventSubscriptionRequest(input *DeleteEventSubscriptionInput) (req *request.Request, output *DeleteEventSubscriptionOutput) { op := &request.Operation{ Name: opDeleteEventSubscription, @@ -683,7 +1082,28 @@ func (c *Redshift) DeleteEventSubscription(input *DeleteEventSubscriptionInput) const opDeleteHsmClientCertificate = "DeleteHsmClientCertificate" -// DeleteHsmClientCertificateRequest generates a request for the DeleteHsmClientCertificate operation. +// DeleteHsmClientCertificateRequest generates a "aws/request.Request" representing the +// client's request for the DeleteHsmClientCertificate operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteHsmClientCertificate method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteHsmClientCertificateRequest method. +// req, resp := client.DeleteHsmClientCertificateRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DeleteHsmClientCertificateRequest(input *DeleteHsmClientCertificateInput) (req *request.Request, output *DeleteHsmClientCertificateOutput) { op := &request.Operation{ Name: opDeleteHsmClientCertificate, @@ -712,7 +1132,28 @@ func (c *Redshift) DeleteHsmClientCertificate(input *DeleteHsmClientCertificateI const opDeleteHsmConfiguration = "DeleteHsmConfiguration" -// DeleteHsmConfigurationRequest generates a request for the DeleteHsmConfiguration operation. +// DeleteHsmConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteHsmConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteHsmConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteHsmConfigurationRequest method. +// req, resp := client.DeleteHsmConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DeleteHsmConfigurationRequest(input *DeleteHsmConfigurationInput) (req *request.Request, output *DeleteHsmConfigurationOutput) { op := &request.Operation{ Name: opDeleteHsmConfiguration, @@ -741,7 +1182,28 @@ func (c *Redshift) DeleteHsmConfiguration(input *DeleteHsmConfigurationInput) (* const opDeleteSnapshotCopyGrant = "DeleteSnapshotCopyGrant" -// DeleteSnapshotCopyGrantRequest generates a request for the DeleteSnapshotCopyGrant operation. +// DeleteSnapshotCopyGrantRequest generates a "aws/request.Request" representing the +// client's request for the DeleteSnapshotCopyGrant operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteSnapshotCopyGrant method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteSnapshotCopyGrantRequest method. +// req, resp := client.DeleteSnapshotCopyGrantRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DeleteSnapshotCopyGrantRequest(input *DeleteSnapshotCopyGrantInput) (req *request.Request, output *DeleteSnapshotCopyGrantOutput) { op := &request.Operation{ Name: opDeleteSnapshotCopyGrant, @@ -770,7 +1232,28 @@ func (c *Redshift) DeleteSnapshotCopyGrant(input *DeleteSnapshotCopyGrantInput) const opDeleteTags = "DeleteTags" -// DeleteTagsRequest generates a request for the DeleteTags operation. +// DeleteTagsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteTagsRequest method. +// req, resp := client.DeleteTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DeleteTagsRequest(input *DeleteTagsInput) (req *request.Request, output *DeleteTagsOutput) { op := &request.Operation{ Name: opDeleteTags, @@ -800,7 +1283,28 @@ func (c *Redshift) DeleteTags(input *DeleteTagsInput) (*DeleteTagsOutput, error) const opDescribeClusterParameterGroups = "DescribeClusterParameterGroups" -// DescribeClusterParameterGroupsRequest generates a request for the DescribeClusterParameterGroups operation. +// DescribeClusterParameterGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeClusterParameterGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeClusterParameterGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeClusterParameterGroupsRequest method. +// req, resp := client.DescribeClusterParameterGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DescribeClusterParameterGroupsRequest(input *DescribeClusterParameterGroupsInput) (req *request.Request, output *DescribeClusterParameterGroupsOutput) { op := &request.Operation{ Name: opDescribeClusterParameterGroups, @@ -849,6 +1353,23 @@ func (c *Redshift) DescribeClusterParameterGroups(input *DescribeClusterParamete return out, err } +// DescribeClusterParameterGroupsPages iterates over the pages of a DescribeClusterParameterGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeClusterParameterGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeClusterParameterGroups operation. +// pageNum := 0 +// err := client.DescribeClusterParameterGroupsPages(params, +// func(page *DescribeClusterParameterGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *Redshift) DescribeClusterParameterGroupsPages(input *DescribeClusterParameterGroupsInput, fn func(p *DescribeClusterParameterGroupsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeClusterParameterGroupsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -859,7 +1380,28 @@ func (c *Redshift) DescribeClusterParameterGroupsPages(input *DescribeClusterPar const opDescribeClusterParameters = "DescribeClusterParameters" -// DescribeClusterParametersRequest generates a request for the DescribeClusterParameters operation. +// DescribeClusterParametersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeClusterParameters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeClusterParameters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeClusterParametersRequest method. +// req, resp := client.DescribeClusterParametersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DescribeClusterParametersRequest(input *DescribeClusterParametersInput) (req *request.Request, output *DescribeClusterParametersOutput) { op := &request.Operation{ Name: opDescribeClusterParameters, @@ -901,6 +1443,23 @@ func (c *Redshift) DescribeClusterParameters(input *DescribeClusterParametersInp return out, err } +// DescribeClusterParametersPages iterates over the pages of a DescribeClusterParameters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeClusterParameters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeClusterParameters operation. +// pageNum := 0 +// err := client.DescribeClusterParametersPages(params, +// func(page *DescribeClusterParametersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *Redshift) DescribeClusterParametersPages(input *DescribeClusterParametersInput, fn func(p *DescribeClusterParametersOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeClusterParametersRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -911,7 +1470,28 @@ func (c *Redshift) DescribeClusterParametersPages(input *DescribeClusterParamete const opDescribeClusterSecurityGroups = "DescribeClusterSecurityGroups" -// DescribeClusterSecurityGroupsRequest generates a request for the DescribeClusterSecurityGroups operation. +// DescribeClusterSecurityGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeClusterSecurityGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeClusterSecurityGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeClusterSecurityGroupsRequest method. +// req, resp := client.DescribeClusterSecurityGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DescribeClusterSecurityGroupsRequest(input *DescribeClusterSecurityGroupsInput) (req *request.Request, output *DescribeClusterSecurityGroupsOutput) { op := &request.Operation{ Name: opDescribeClusterSecurityGroups, @@ -958,6 +1538,23 @@ func (c *Redshift) DescribeClusterSecurityGroups(input *DescribeClusterSecurityG return out, err } +// DescribeClusterSecurityGroupsPages iterates over the pages of a DescribeClusterSecurityGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeClusterSecurityGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeClusterSecurityGroups operation. +// pageNum := 0 +// err := client.DescribeClusterSecurityGroupsPages(params, +// func(page *DescribeClusterSecurityGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *Redshift) DescribeClusterSecurityGroupsPages(input *DescribeClusterSecurityGroupsInput, fn func(p *DescribeClusterSecurityGroupsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeClusterSecurityGroupsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -968,7 +1565,28 @@ func (c *Redshift) DescribeClusterSecurityGroupsPages(input *DescribeClusterSecu const opDescribeClusterSnapshots = "DescribeClusterSnapshots" -// DescribeClusterSnapshotsRequest generates a request for the DescribeClusterSnapshots operation. +// DescribeClusterSnapshotsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeClusterSnapshots operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeClusterSnapshots method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeClusterSnapshotsRequest method. +// req, resp := client.DescribeClusterSnapshotsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DescribeClusterSnapshotsRequest(input *DescribeClusterSnapshotsInput) (req *request.Request, output *DescribeClusterSnapshotsOutput) { op := &request.Operation{ Name: opDescribeClusterSnapshots, @@ -1014,6 +1632,23 @@ func (c *Redshift) DescribeClusterSnapshots(input *DescribeClusterSnapshotsInput return out, err } +// DescribeClusterSnapshotsPages iterates over the pages of a DescribeClusterSnapshots operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeClusterSnapshots method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeClusterSnapshots operation. +// pageNum := 0 +// err := client.DescribeClusterSnapshotsPages(params, +// func(page *DescribeClusterSnapshotsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *Redshift) DescribeClusterSnapshotsPages(input *DescribeClusterSnapshotsInput, fn func(p *DescribeClusterSnapshotsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeClusterSnapshotsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1024,7 +1659,28 @@ func (c *Redshift) DescribeClusterSnapshotsPages(input *DescribeClusterSnapshots const opDescribeClusterSubnetGroups = "DescribeClusterSubnetGroups" -// DescribeClusterSubnetGroupsRequest generates a request for the DescribeClusterSubnetGroups operation. +// DescribeClusterSubnetGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeClusterSubnetGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeClusterSubnetGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeClusterSubnetGroupsRequest method. +// req, resp := client.DescribeClusterSubnetGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DescribeClusterSubnetGroupsRequest(input *DescribeClusterSubnetGroupsInput) (req *request.Request, output *DescribeClusterSubnetGroupsOutput) { op := &request.Operation{ Name: opDescribeClusterSubnetGroups, @@ -1067,6 +1723,23 @@ func (c *Redshift) DescribeClusterSubnetGroups(input *DescribeClusterSubnetGroup return out, err } +// DescribeClusterSubnetGroupsPages iterates over the pages of a DescribeClusterSubnetGroups operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeClusterSubnetGroups method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeClusterSubnetGroups operation. +// pageNum := 0 +// err := client.DescribeClusterSubnetGroupsPages(params, +// func(page *DescribeClusterSubnetGroupsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *Redshift) DescribeClusterSubnetGroupsPages(input *DescribeClusterSubnetGroupsInput, fn func(p *DescribeClusterSubnetGroupsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeClusterSubnetGroupsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1077,7 +1750,28 @@ func (c *Redshift) DescribeClusterSubnetGroupsPages(input *DescribeClusterSubnet const opDescribeClusterVersions = "DescribeClusterVersions" -// DescribeClusterVersionsRequest generates a request for the DescribeClusterVersions operation. +// DescribeClusterVersionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeClusterVersions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeClusterVersions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeClusterVersionsRequest method. +// req, resp := client.DescribeClusterVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DescribeClusterVersionsRequest(input *DescribeClusterVersionsInput) (req *request.Request, output *DescribeClusterVersionsOutput) { op := &request.Operation{ Name: opDescribeClusterVersions, @@ -1112,6 +1806,23 @@ func (c *Redshift) DescribeClusterVersions(input *DescribeClusterVersionsInput) return out, err } +// DescribeClusterVersionsPages iterates over the pages of a DescribeClusterVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeClusterVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeClusterVersions operation. +// pageNum := 0 +// err := client.DescribeClusterVersionsPages(params, +// func(page *DescribeClusterVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *Redshift) DescribeClusterVersionsPages(input *DescribeClusterVersionsInput, fn func(p *DescribeClusterVersionsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeClusterVersionsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1122,7 +1833,28 @@ func (c *Redshift) DescribeClusterVersionsPages(input *DescribeClusterVersionsIn const opDescribeClusters = "DescribeClusters" -// DescribeClustersRequest generates a request for the DescribeClusters operation. +// DescribeClustersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeClusters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeClusters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeClustersRequest method. +// req, resp := client.DescribeClustersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DescribeClustersRequest(input *DescribeClustersInput) (req *request.Request, output *DescribeClustersOutput) { op := &request.Operation{ Name: opDescribeClusters, @@ -1166,6 +1898,23 @@ func (c *Redshift) DescribeClusters(input *DescribeClustersInput) (*DescribeClus return out, err } +// DescribeClustersPages iterates over the pages of a DescribeClusters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeClusters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeClusters operation. +// pageNum := 0 +// err := client.DescribeClustersPages(params, +// func(page *DescribeClustersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *Redshift) DescribeClustersPages(input *DescribeClustersInput, fn func(p *DescribeClustersOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeClustersRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1176,7 +1925,28 @@ func (c *Redshift) DescribeClustersPages(input *DescribeClustersInput, fn func(p const opDescribeDefaultClusterParameters = "DescribeDefaultClusterParameters" -// DescribeDefaultClusterParametersRequest generates a request for the DescribeDefaultClusterParameters operation. +// DescribeDefaultClusterParametersRequest generates a "aws/request.Request" representing the +// client's request for the DescribeDefaultClusterParameters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeDefaultClusterParameters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeDefaultClusterParametersRequest method. +// req, resp := client.DescribeDefaultClusterParametersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DescribeDefaultClusterParametersRequest(input *DescribeDefaultClusterParametersInput) (req *request.Request, output *DescribeDefaultClusterParametersOutput) { op := &request.Operation{ Name: opDescribeDefaultClusterParameters, @@ -1211,6 +1981,23 @@ func (c *Redshift) DescribeDefaultClusterParameters(input *DescribeDefaultCluste return out, err } +// DescribeDefaultClusterParametersPages iterates over the pages of a DescribeDefaultClusterParameters operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeDefaultClusterParameters method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeDefaultClusterParameters operation. +// pageNum := 0 +// err := client.DescribeDefaultClusterParametersPages(params, +// func(page *DescribeDefaultClusterParametersOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *Redshift) DescribeDefaultClusterParametersPages(input *DescribeDefaultClusterParametersInput, fn func(p *DescribeDefaultClusterParametersOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeDefaultClusterParametersRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1221,7 +2008,28 @@ func (c *Redshift) DescribeDefaultClusterParametersPages(input *DescribeDefaultC const opDescribeEventCategories = "DescribeEventCategories" -// DescribeEventCategoriesRequest generates a request for the DescribeEventCategories operation. +// DescribeEventCategoriesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEventCategories operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEventCategories method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEventCategoriesRequest method. +// req, resp := client.DescribeEventCategoriesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DescribeEventCategoriesRequest(input *DescribeEventCategoriesInput) (req *request.Request, output *DescribeEventCategoriesOutput) { op := &request.Operation{ Name: opDescribeEventCategories, @@ -1250,7 +2058,28 @@ func (c *Redshift) DescribeEventCategories(input *DescribeEventCategoriesInput) const opDescribeEventSubscriptions = "DescribeEventSubscriptions" -// DescribeEventSubscriptionsRequest generates a request for the DescribeEventSubscriptions operation. +// DescribeEventSubscriptionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEventSubscriptions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEventSubscriptions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEventSubscriptionsRequest method. +// req, resp := client.DescribeEventSubscriptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DescribeEventSubscriptionsRequest(input *DescribeEventSubscriptionsInput) (req *request.Request, output *DescribeEventSubscriptionsOutput) { op := &request.Operation{ Name: opDescribeEventSubscriptions, @@ -1283,6 +2112,23 @@ func (c *Redshift) DescribeEventSubscriptions(input *DescribeEventSubscriptionsI return out, err } +// DescribeEventSubscriptionsPages iterates over the pages of a DescribeEventSubscriptions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeEventSubscriptions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeEventSubscriptions operation. +// pageNum := 0 +// err := client.DescribeEventSubscriptionsPages(params, +// func(page *DescribeEventSubscriptionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *Redshift) DescribeEventSubscriptionsPages(input *DescribeEventSubscriptionsInput, fn func(p *DescribeEventSubscriptionsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeEventSubscriptionsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1293,7 +2139,28 @@ func (c *Redshift) DescribeEventSubscriptionsPages(input *DescribeEventSubscript const opDescribeEvents = "DescribeEvents" -// DescribeEventsRequest generates a request for the DescribeEvents operation. +// DescribeEventsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeEvents operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeEvents method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeEventsRequest method. +// req, resp := client.DescribeEventsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DescribeEventsRequest(input *DescribeEventsInput) (req *request.Request, output *DescribeEventsOutput) { op := &request.Operation{ Name: opDescribeEvents, @@ -1327,6 +2194,23 @@ func (c *Redshift) DescribeEvents(input *DescribeEventsInput) (*DescribeEventsOu return out, err } +// DescribeEventsPages iterates over the pages of a DescribeEvents operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeEvents method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeEvents operation. +// pageNum := 0 +// err := client.DescribeEventsPages(params, +// func(page *DescribeEventsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *Redshift) DescribeEventsPages(input *DescribeEventsInput, fn func(p *DescribeEventsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeEventsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1337,7 +2221,28 @@ func (c *Redshift) DescribeEventsPages(input *DescribeEventsInput, fn func(p *De const opDescribeHsmClientCertificates = "DescribeHsmClientCertificates" -// DescribeHsmClientCertificatesRequest generates a request for the DescribeHsmClientCertificates operation. +// DescribeHsmClientCertificatesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeHsmClientCertificates operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeHsmClientCertificates method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeHsmClientCertificatesRequest method. +// req, resp := client.DescribeHsmClientCertificatesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DescribeHsmClientCertificatesRequest(input *DescribeHsmClientCertificatesInput) (req *request.Request, output *DescribeHsmClientCertificatesOutput) { op := &request.Operation{ Name: opDescribeHsmClientCertificates, @@ -1380,6 +2285,23 @@ func (c *Redshift) DescribeHsmClientCertificates(input *DescribeHsmClientCertifi return out, err } +// DescribeHsmClientCertificatesPages iterates over the pages of a DescribeHsmClientCertificates operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeHsmClientCertificates method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeHsmClientCertificates operation. +// pageNum := 0 +// err := client.DescribeHsmClientCertificatesPages(params, +// func(page *DescribeHsmClientCertificatesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *Redshift) DescribeHsmClientCertificatesPages(input *DescribeHsmClientCertificatesInput, fn func(p *DescribeHsmClientCertificatesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeHsmClientCertificatesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1390,7 +2312,28 @@ func (c *Redshift) DescribeHsmClientCertificatesPages(input *DescribeHsmClientCe const opDescribeHsmConfigurations = "DescribeHsmConfigurations" -// DescribeHsmConfigurationsRequest generates a request for the DescribeHsmConfigurations operation. +// DescribeHsmConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeHsmConfigurations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeHsmConfigurations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeHsmConfigurationsRequest method. +// req, resp := client.DescribeHsmConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DescribeHsmConfigurationsRequest(input *DescribeHsmConfigurationsInput) (req *request.Request, output *DescribeHsmConfigurationsOutput) { op := &request.Operation{ Name: opDescribeHsmConfigurations, @@ -1433,6 +2376,23 @@ func (c *Redshift) DescribeHsmConfigurations(input *DescribeHsmConfigurationsInp return out, err } +// DescribeHsmConfigurationsPages iterates over the pages of a DescribeHsmConfigurations operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeHsmConfigurations method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeHsmConfigurations operation. +// pageNum := 0 +// err := client.DescribeHsmConfigurationsPages(params, +// func(page *DescribeHsmConfigurationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *Redshift) DescribeHsmConfigurationsPages(input *DescribeHsmConfigurationsInput, fn func(p *DescribeHsmConfigurationsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeHsmConfigurationsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1443,7 +2403,28 @@ func (c *Redshift) DescribeHsmConfigurationsPages(input *DescribeHsmConfiguratio const opDescribeLoggingStatus = "DescribeLoggingStatus" -// DescribeLoggingStatusRequest generates a request for the DescribeLoggingStatus operation. +// DescribeLoggingStatusRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLoggingStatus operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLoggingStatus method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLoggingStatusRequest method. +// req, resp := client.DescribeLoggingStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DescribeLoggingStatusRequest(input *DescribeLoggingStatusInput) (req *request.Request, output *LoggingStatus) { op := &request.Operation{ Name: opDescribeLoggingStatus, @@ -1471,7 +2452,28 @@ func (c *Redshift) DescribeLoggingStatus(input *DescribeLoggingStatusInput) (*Lo const opDescribeOrderableClusterOptions = "DescribeOrderableClusterOptions" -// DescribeOrderableClusterOptionsRequest generates a request for the DescribeOrderableClusterOptions operation. +// DescribeOrderableClusterOptionsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeOrderableClusterOptions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeOrderableClusterOptions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeOrderableClusterOptionsRequest method. +// req, resp := client.DescribeOrderableClusterOptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DescribeOrderableClusterOptionsRequest(input *DescribeOrderableClusterOptionsInput) (req *request.Request, output *DescribeOrderableClusterOptionsOutput) { op := &request.Operation{ Name: opDescribeOrderableClusterOptions, @@ -1510,6 +2512,23 @@ func (c *Redshift) DescribeOrderableClusterOptions(input *DescribeOrderableClust return out, err } +// DescribeOrderableClusterOptionsPages iterates over the pages of a DescribeOrderableClusterOptions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeOrderableClusterOptions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeOrderableClusterOptions operation. +// pageNum := 0 +// err := client.DescribeOrderableClusterOptionsPages(params, +// func(page *DescribeOrderableClusterOptionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *Redshift) DescribeOrderableClusterOptionsPages(input *DescribeOrderableClusterOptionsInput, fn func(p *DescribeOrderableClusterOptionsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeOrderableClusterOptionsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1520,7 +2539,28 @@ func (c *Redshift) DescribeOrderableClusterOptionsPages(input *DescribeOrderable const opDescribeReservedNodeOfferings = "DescribeReservedNodeOfferings" -// DescribeReservedNodeOfferingsRequest generates a request for the DescribeReservedNodeOfferings operation. +// DescribeReservedNodeOfferingsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReservedNodeOfferings operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeReservedNodeOfferings method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeReservedNodeOfferingsRequest method. +// req, resp := client.DescribeReservedNodeOfferingsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DescribeReservedNodeOfferingsRequest(input *DescribeReservedNodeOfferingsInput) (req *request.Request, output *DescribeReservedNodeOfferingsOutput) { op := &request.Operation{ Name: opDescribeReservedNodeOfferings, @@ -1560,6 +2600,23 @@ func (c *Redshift) DescribeReservedNodeOfferings(input *DescribeReservedNodeOffe return out, err } +// DescribeReservedNodeOfferingsPages iterates over the pages of a DescribeReservedNodeOfferings operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeReservedNodeOfferings method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeReservedNodeOfferings operation. +// pageNum := 0 +// err := client.DescribeReservedNodeOfferingsPages(params, +// func(page *DescribeReservedNodeOfferingsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *Redshift) DescribeReservedNodeOfferingsPages(input *DescribeReservedNodeOfferingsInput, fn func(p *DescribeReservedNodeOfferingsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeReservedNodeOfferingsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1570,7 +2627,28 @@ func (c *Redshift) DescribeReservedNodeOfferingsPages(input *DescribeReservedNod const opDescribeReservedNodes = "DescribeReservedNodes" -// DescribeReservedNodesRequest generates a request for the DescribeReservedNodes operation. +// DescribeReservedNodesRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReservedNodes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeReservedNodes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeReservedNodesRequest method. +// req, resp := client.DescribeReservedNodesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DescribeReservedNodesRequest(input *DescribeReservedNodesInput) (req *request.Request, output *DescribeReservedNodesOutput) { op := &request.Operation{ Name: opDescribeReservedNodes, @@ -1601,6 +2679,23 @@ func (c *Redshift) DescribeReservedNodes(input *DescribeReservedNodesInput) (*De return out, err } +// DescribeReservedNodesPages iterates over the pages of a DescribeReservedNodes operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See DescribeReservedNodes method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a DescribeReservedNodes operation. +// pageNum := 0 +// err := client.DescribeReservedNodesPages(params, +// func(page *DescribeReservedNodesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *Redshift) DescribeReservedNodesPages(input *DescribeReservedNodesInput, fn func(p *DescribeReservedNodesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.DescribeReservedNodesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1611,7 +2706,28 @@ func (c *Redshift) DescribeReservedNodesPages(input *DescribeReservedNodesInput, const opDescribeResize = "DescribeResize" -// DescribeResizeRequest generates a request for the DescribeResize operation. +// DescribeResizeRequest generates a "aws/request.Request" representing the +// client's request for the DescribeResize operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeResize method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeResizeRequest method. +// req, resp := client.DescribeResizeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DescribeResizeRequest(input *DescribeResizeInput) (req *request.Request, output *DescribeResizeOutput) { op := &request.Operation{ Name: opDescribeResize, @@ -1644,7 +2760,28 @@ func (c *Redshift) DescribeResize(input *DescribeResizeInput) (*DescribeResizeOu const opDescribeSnapshotCopyGrants = "DescribeSnapshotCopyGrants" -// DescribeSnapshotCopyGrantsRequest generates a request for the DescribeSnapshotCopyGrants operation. +// DescribeSnapshotCopyGrantsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeSnapshotCopyGrants operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeSnapshotCopyGrants method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeSnapshotCopyGrantsRequest method. +// req, resp := client.DescribeSnapshotCopyGrantsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DescribeSnapshotCopyGrantsRequest(input *DescribeSnapshotCopyGrantsInput) (req *request.Request, output *DescribeSnapshotCopyGrantsOutput) { op := &request.Operation{ Name: opDescribeSnapshotCopyGrants, @@ -1676,7 +2813,28 @@ func (c *Redshift) DescribeSnapshotCopyGrants(input *DescribeSnapshotCopyGrantsI const opDescribeTableRestoreStatus = "DescribeTableRestoreStatus" -// DescribeTableRestoreStatusRequest generates a request for the DescribeTableRestoreStatus operation. +// DescribeTableRestoreStatusRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTableRestoreStatus operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTableRestoreStatus method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTableRestoreStatusRequest method. +// req, resp := client.DescribeTableRestoreStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DescribeTableRestoreStatusRequest(input *DescribeTableRestoreStatusInput) (req *request.Request, output *DescribeTableRestoreStatusOutput) { op := &request.Operation{ Name: opDescribeTableRestoreStatus, @@ -1707,7 +2865,28 @@ func (c *Redshift) DescribeTableRestoreStatus(input *DescribeTableRestoreStatusI const opDescribeTags = "DescribeTags" -// DescribeTagsRequest generates a request for the DescribeTags operation. +// DescribeTagsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeTagsRequest method. +// req, resp := client.DescribeTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DescribeTagsRequest(input *DescribeTagsInput) (req *request.Request, output *DescribeTagsOutput) { op := &request.Operation{ Name: opDescribeTags, @@ -1752,7 +2931,28 @@ func (c *Redshift) DescribeTags(input *DescribeTagsInput) (*DescribeTagsOutput, const opDisableLogging = "DisableLogging" -// DisableLoggingRequest generates a request for the DisableLogging operation. +// DisableLoggingRequest generates a "aws/request.Request" representing the +// client's request for the DisableLogging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableLogging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableLoggingRequest method. +// req, resp := client.DisableLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DisableLoggingRequest(input *DisableLoggingInput) (req *request.Request, output *LoggingStatus) { op := &request.Operation{ Name: opDisableLogging, @@ -1780,7 +2980,28 @@ func (c *Redshift) DisableLogging(input *DisableLoggingInput) (*LoggingStatus, e const opDisableSnapshotCopy = "DisableSnapshotCopy" -// DisableSnapshotCopyRequest generates a request for the DisableSnapshotCopy operation. +// DisableSnapshotCopyRequest generates a "aws/request.Request" representing the +// client's request for the DisableSnapshotCopy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisableSnapshotCopy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisableSnapshotCopyRequest method. +// req, resp := client.DisableSnapshotCopyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) DisableSnapshotCopyRequest(input *DisableSnapshotCopyInput) (req *request.Request, output *DisableSnapshotCopyOutput) { op := &request.Operation{ Name: opDisableSnapshotCopy, @@ -1812,7 +3033,28 @@ func (c *Redshift) DisableSnapshotCopy(input *DisableSnapshotCopyInput) (*Disabl const opEnableLogging = "EnableLogging" -// EnableLoggingRequest generates a request for the EnableLogging operation. +// EnableLoggingRequest generates a "aws/request.Request" representing the +// client's request for the EnableLogging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableLogging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableLoggingRequest method. +// req, resp := client.EnableLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) EnableLoggingRequest(input *EnableLoggingInput) (req *request.Request, output *LoggingStatus) { op := &request.Operation{ Name: opEnableLogging, @@ -1840,7 +3082,28 @@ func (c *Redshift) EnableLogging(input *EnableLoggingInput) (*LoggingStatus, err const opEnableSnapshotCopy = "EnableSnapshotCopy" -// EnableSnapshotCopyRequest generates a request for the EnableSnapshotCopy operation. +// EnableSnapshotCopyRequest generates a "aws/request.Request" representing the +// client's request for the EnableSnapshotCopy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the EnableSnapshotCopy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the EnableSnapshotCopyRequest method. +// req, resp := client.EnableSnapshotCopyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) EnableSnapshotCopyRequest(input *EnableSnapshotCopyInput) (req *request.Request, output *EnableSnapshotCopyOutput) { op := &request.Operation{ Name: opEnableSnapshotCopy, @@ -1868,7 +3131,28 @@ func (c *Redshift) EnableSnapshotCopy(input *EnableSnapshotCopyInput) (*EnableSn const opModifyCluster = "ModifyCluster" -// ModifyClusterRequest generates a request for the ModifyCluster operation. +// ModifyClusterRequest generates a "aws/request.Request" representing the +// client's request for the ModifyCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyClusterRequest method. +// req, resp := client.ModifyClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) ModifyClusterRequest(input *ModifyClusterInput) (req *request.Request, output *ModifyClusterOutput) { op := &request.Operation{ Name: opModifyCluster, @@ -1905,7 +3189,28 @@ func (c *Redshift) ModifyCluster(input *ModifyClusterInput) (*ModifyClusterOutpu const opModifyClusterIamRoles = "ModifyClusterIamRoles" -// ModifyClusterIamRolesRequest generates a request for the ModifyClusterIamRoles operation. +// ModifyClusterIamRolesRequest generates a "aws/request.Request" representing the +// client's request for the ModifyClusterIamRoles operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyClusterIamRoles method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyClusterIamRolesRequest method. +// req, resp := client.ModifyClusterIamRolesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) ModifyClusterIamRolesRequest(input *ModifyClusterIamRolesInput) (req *request.Request, output *ModifyClusterIamRolesOutput) { op := &request.Operation{ Name: opModifyClusterIamRoles, @@ -1935,7 +3240,28 @@ func (c *Redshift) ModifyClusterIamRoles(input *ModifyClusterIamRolesInput) (*Mo const opModifyClusterParameterGroup = "ModifyClusterParameterGroup" -// ModifyClusterParameterGroupRequest generates a request for the ModifyClusterParameterGroup operation. +// ModifyClusterParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the ModifyClusterParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyClusterParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyClusterParameterGroupRequest method. +// req, resp := client.ModifyClusterParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) ModifyClusterParameterGroupRequest(input *ModifyClusterParameterGroupInput) (req *request.Request, output *ClusterParameterGroupNameMessage) { op := &request.Operation{ Name: opModifyClusterParameterGroup, @@ -1966,7 +3292,28 @@ func (c *Redshift) ModifyClusterParameterGroup(input *ModifyClusterParameterGrou const opModifyClusterSubnetGroup = "ModifyClusterSubnetGroup" -// ModifyClusterSubnetGroupRequest generates a request for the ModifyClusterSubnetGroup operation. +// ModifyClusterSubnetGroupRequest generates a "aws/request.Request" representing the +// client's request for the ModifyClusterSubnetGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyClusterSubnetGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyClusterSubnetGroupRequest method. +// req, resp := client.ModifyClusterSubnetGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) ModifyClusterSubnetGroupRequest(input *ModifyClusterSubnetGroupInput) (req *request.Request, output *ModifyClusterSubnetGroupOutput) { op := &request.Operation{ Name: opModifyClusterSubnetGroup, @@ -1995,7 +3342,28 @@ func (c *Redshift) ModifyClusterSubnetGroup(input *ModifyClusterSubnetGroupInput const opModifyEventSubscription = "ModifyEventSubscription" -// ModifyEventSubscriptionRequest generates a request for the ModifyEventSubscription operation. +// ModifyEventSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the ModifyEventSubscription operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifyEventSubscription method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifyEventSubscriptionRequest method. +// req, resp := client.ModifyEventSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) ModifyEventSubscriptionRequest(input *ModifyEventSubscriptionInput) (req *request.Request, output *ModifyEventSubscriptionOutput) { op := &request.Operation{ Name: opModifyEventSubscription, @@ -2022,7 +3390,28 @@ func (c *Redshift) ModifyEventSubscription(input *ModifyEventSubscriptionInput) const opModifySnapshotCopyRetentionPeriod = "ModifySnapshotCopyRetentionPeriod" -// ModifySnapshotCopyRetentionPeriodRequest generates a request for the ModifySnapshotCopyRetentionPeriod operation. +// ModifySnapshotCopyRetentionPeriodRequest generates a "aws/request.Request" representing the +// client's request for the ModifySnapshotCopyRetentionPeriod operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ModifySnapshotCopyRetentionPeriod method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ModifySnapshotCopyRetentionPeriodRequest method. +// req, resp := client.ModifySnapshotCopyRetentionPeriodRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) ModifySnapshotCopyRetentionPeriodRequest(input *ModifySnapshotCopyRetentionPeriodInput) (req *request.Request, output *ModifySnapshotCopyRetentionPeriodOutput) { op := &request.Operation{ Name: opModifySnapshotCopyRetentionPeriod, @@ -2050,7 +3439,28 @@ func (c *Redshift) ModifySnapshotCopyRetentionPeriod(input *ModifySnapshotCopyRe const opPurchaseReservedNodeOffering = "PurchaseReservedNodeOffering" -// PurchaseReservedNodeOfferingRequest generates a request for the PurchaseReservedNodeOffering operation. +// PurchaseReservedNodeOfferingRequest generates a "aws/request.Request" representing the +// client's request for the PurchaseReservedNodeOffering operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PurchaseReservedNodeOffering method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PurchaseReservedNodeOfferingRequest method. +// req, resp := client.PurchaseReservedNodeOfferingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) PurchaseReservedNodeOfferingRequest(input *PurchaseReservedNodeOfferingInput) (req *request.Request, output *PurchaseReservedNodeOfferingOutput) { op := &request.Operation{ Name: opPurchaseReservedNodeOffering, @@ -2085,7 +3495,28 @@ func (c *Redshift) PurchaseReservedNodeOffering(input *PurchaseReservedNodeOffer const opRebootCluster = "RebootCluster" -// RebootClusterRequest generates a request for the RebootCluster operation. +// RebootClusterRequest generates a "aws/request.Request" representing the +// client's request for the RebootCluster operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RebootCluster method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RebootClusterRequest method. +// req, resp := client.RebootClusterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) RebootClusterRequest(input *RebootClusterInput) (req *request.Request, output *RebootClusterOutput) { op := &request.Operation{ Name: opRebootCluster, @@ -2118,7 +3549,28 @@ func (c *Redshift) RebootCluster(input *RebootClusterInput) (*RebootClusterOutpu const opResetClusterParameterGroup = "ResetClusterParameterGroup" -// ResetClusterParameterGroupRequest generates a request for the ResetClusterParameterGroup operation. +// ResetClusterParameterGroupRequest generates a "aws/request.Request" representing the +// client's request for the ResetClusterParameterGroup operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ResetClusterParameterGroup method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ResetClusterParameterGroupRequest method. +// req, resp := client.ResetClusterParameterGroupRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) ResetClusterParameterGroupRequest(input *ResetClusterParameterGroupInput) (req *request.Request, output *ClusterParameterGroupNameMessage) { op := &request.Operation{ Name: opResetClusterParameterGroup, @@ -2148,7 +3600,28 @@ func (c *Redshift) ResetClusterParameterGroup(input *ResetClusterParameterGroupI const opRestoreFromClusterSnapshot = "RestoreFromClusterSnapshot" -// RestoreFromClusterSnapshotRequest generates a request for the RestoreFromClusterSnapshot operation. +// RestoreFromClusterSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the RestoreFromClusterSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RestoreFromClusterSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RestoreFromClusterSnapshotRequest method. +// req, resp := client.RestoreFromClusterSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) RestoreFromClusterSnapshotRequest(input *RestoreFromClusterSnapshotInput) (req *request.Request, output *RestoreFromClusterSnapshotOutput) { op := &request.Operation{ Name: opRestoreFromClusterSnapshot, @@ -2189,7 +3662,28 @@ func (c *Redshift) RestoreFromClusterSnapshot(input *RestoreFromClusterSnapshotI const opRestoreTableFromClusterSnapshot = "RestoreTableFromClusterSnapshot" -// RestoreTableFromClusterSnapshotRequest generates a request for the RestoreTableFromClusterSnapshot operation. +// RestoreTableFromClusterSnapshotRequest generates a "aws/request.Request" representing the +// client's request for the RestoreTableFromClusterSnapshot operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RestoreTableFromClusterSnapshot method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RestoreTableFromClusterSnapshotRequest method. +// req, resp := client.RestoreTableFromClusterSnapshotRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) RestoreTableFromClusterSnapshotRequest(input *RestoreTableFromClusterSnapshotInput) (req *request.Request, output *RestoreTableFromClusterSnapshotOutput) { op := &request.Operation{ Name: opRestoreTableFromClusterSnapshot, @@ -2228,7 +3722,28 @@ func (c *Redshift) RestoreTableFromClusterSnapshot(input *RestoreTableFromCluste const opRevokeClusterSecurityGroupIngress = "RevokeClusterSecurityGroupIngress" -// RevokeClusterSecurityGroupIngressRequest generates a request for the RevokeClusterSecurityGroupIngress operation. +// RevokeClusterSecurityGroupIngressRequest generates a "aws/request.Request" representing the +// client's request for the RevokeClusterSecurityGroupIngress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RevokeClusterSecurityGroupIngress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RevokeClusterSecurityGroupIngressRequest method. +// req, resp := client.RevokeClusterSecurityGroupIngressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) RevokeClusterSecurityGroupIngressRequest(input *RevokeClusterSecurityGroupIngressInput) (req *request.Request, output *RevokeClusterSecurityGroupIngressOutput) { op := &request.Operation{ Name: opRevokeClusterSecurityGroupIngress, @@ -2259,7 +3774,28 @@ func (c *Redshift) RevokeClusterSecurityGroupIngress(input *RevokeClusterSecurit const opRevokeSnapshotAccess = "RevokeSnapshotAccess" -// RevokeSnapshotAccessRequest generates a request for the RevokeSnapshotAccess operation. +// RevokeSnapshotAccessRequest generates a "aws/request.Request" representing the +// client's request for the RevokeSnapshotAccess operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RevokeSnapshotAccess method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RevokeSnapshotAccessRequest method. +// req, resp := client.RevokeSnapshotAccessRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) RevokeSnapshotAccessRequest(input *RevokeSnapshotAccessInput) (req *request.Request, output *RevokeSnapshotAccessOutput) { op := &request.Operation{ Name: opRevokeSnapshotAccess, @@ -2292,7 +3828,28 @@ func (c *Redshift) RevokeSnapshotAccess(input *RevokeSnapshotAccessInput) (*Revo const opRotateEncryptionKey = "RotateEncryptionKey" -// RotateEncryptionKeyRequest generates a request for the RotateEncryptionKey operation. +// RotateEncryptionKeyRequest generates a "aws/request.Request" representing the +// client's request for the RotateEncryptionKey operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RotateEncryptionKey method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RotateEncryptionKeyRequest method. +// req, resp := client.RotateEncryptionKeyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Redshift) RotateEncryptionKeyRequest(input *RotateEncryptionKeyInput) (req *request.Request, output *RotateEncryptionKeyOutput) { op := &request.Operation{ Name: opRotateEncryptionKey, diff --git a/vendor/github.com/aws/aws-sdk-go/service/redshift/service.go b/vendor/github.com/aws/aws-sdk-go/service/redshift/service.go index c25dff23b..f2870625d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/redshift/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/redshift/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/query" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // Overview This is an interface reference for Amazon Redshift. It contains @@ -79,7 +79,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(query.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/api.go b/vendor/github.com/aws/aws-sdk-go/service/route53/api.go index f0c313329..5a07be987 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/route53/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/api.go @@ -13,7 +13,28 @@ import ( const opAssociateVPCWithHostedZone = "AssociateVPCWithHostedZone" -// AssociateVPCWithHostedZoneRequest generates a request for the AssociateVPCWithHostedZone operation. +// AssociateVPCWithHostedZoneRequest generates a "aws/request.Request" representing the +// client's request for the AssociateVPCWithHostedZone operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssociateVPCWithHostedZone method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssociateVPCWithHostedZoneRequest method. +// req, resp := client.AssociateVPCWithHostedZoneRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) AssociateVPCWithHostedZoneRequest(input *AssociateVPCWithHostedZoneInput) (req *request.Request, output *AssociateVPCWithHostedZoneOutput) { op := &request.Operation{ Name: opAssociateVPCWithHostedZone, @@ -47,7 +68,28 @@ func (c *Route53) AssociateVPCWithHostedZone(input *AssociateVPCWithHostedZoneIn const opChangeResourceRecordSets = "ChangeResourceRecordSets" -// ChangeResourceRecordSetsRequest generates a request for the ChangeResourceRecordSets operation. +// ChangeResourceRecordSetsRequest generates a "aws/request.Request" representing the +// client's request for the ChangeResourceRecordSets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ChangeResourceRecordSets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ChangeResourceRecordSetsRequest method. +// req, resp := client.ChangeResourceRecordSetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) ChangeResourceRecordSetsRequest(input *ChangeResourceRecordSetsInput) (req *request.Request, output *ChangeResourceRecordSetsOutput) { op := &request.Operation{ Name: opChangeResourceRecordSets, @@ -98,7 +140,28 @@ func (c *Route53) ChangeResourceRecordSets(input *ChangeResourceRecordSetsInput) const opChangeTagsForResource = "ChangeTagsForResource" -// ChangeTagsForResourceRequest generates a request for the ChangeTagsForResource operation. +// ChangeTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ChangeTagsForResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ChangeTagsForResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ChangeTagsForResourceRequest method. +// req, resp := client.ChangeTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) ChangeTagsForResourceRequest(input *ChangeTagsForResourceInput) (req *request.Request, output *ChangeTagsForResourceOutput) { op := &request.Operation{ Name: opChangeTagsForResource, @@ -124,7 +187,28 @@ func (c *Route53) ChangeTagsForResource(input *ChangeTagsForResourceInput) (*Cha const opCreateHealthCheck = "CreateHealthCheck" -// CreateHealthCheckRequest generates a request for the CreateHealthCheck operation. +// CreateHealthCheckRequest generates a "aws/request.Request" representing the +// client's request for the CreateHealthCheck operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateHealthCheck method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateHealthCheckRequest method. +// req, resp := client.CreateHealthCheckRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) CreateHealthCheckRequest(input *CreateHealthCheckInput) (req *request.Request, output *CreateHealthCheckOutput) { op := &request.Operation{ Name: opCreateHealthCheck, @@ -156,7 +240,28 @@ func (c *Route53) CreateHealthCheck(input *CreateHealthCheckInput) (*CreateHealt const opCreateHostedZone = "CreateHostedZone" -// CreateHostedZoneRequest generates a request for the CreateHostedZone operation. +// CreateHostedZoneRequest generates a "aws/request.Request" representing the +// client's request for the CreateHostedZone operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateHostedZone method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateHostedZoneRequest method. +// req, resp := client.CreateHostedZoneRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) CreateHostedZoneRequest(input *CreateHostedZoneInput) (req *request.Request, output *CreateHostedZoneOutput) { op := &request.Operation{ Name: opCreateHostedZone, @@ -204,7 +309,28 @@ func (c *Route53) CreateHostedZone(input *CreateHostedZoneInput) (*CreateHostedZ const opCreateReusableDelegationSet = "CreateReusableDelegationSet" -// CreateReusableDelegationSetRequest generates a request for the CreateReusableDelegationSet operation. +// CreateReusableDelegationSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateReusableDelegationSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateReusableDelegationSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateReusableDelegationSetRequest method. +// req, resp := client.CreateReusableDelegationSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) CreateReusableDelegationSetRequest(input *CreateReusableDelegationSetInput) (req *request.Request, output *CreateReusableDelegationSetOutput) { op := &request.Operation{ Name: opCreateReusableDelegationSet, @@ -240,7 +366,28 @@ func (c *Route53) CreateReusableDelegationSet(input *CreateReusableDelegationSet const opCreateTrafficPolicy = "CreateTrafficPolicy" -// CreateTrafficPolicyRequest generates a request for the CreateTrafficPolicy operation. +// CreateTrafficPolicyRequest generates a "aws/request.Request" representing the +// client's request for the CreateTrafficPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateTrafficPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateTrafficPolicyRequest method. +// req, resp := client.CreateTrafficPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) CreateTrafficPolicyRequest(input *CreateTrafficPolicyInput) (req *request.Request, output *CreateTrafficPolicyOutput) { op := &request.Operation{ Name: opCreateTrafficPolicy, @@ -274,7 +421,28 @@ func (c *Route53) CreateTrafficPolicy(input *CreateTrafficPolicyInput) (*CreateT const opCreateTrafficPolicyInstance = "CreateTrafficPolicyInstance" -// CreateTrafficPolicyInstanceRequest generates a request for the CreateTrafficPolicyInstance operation. +// CreateTrafficPolicyInstanceRequest generates a "aws/request.Request" representing the +// client's request for the CreateTrafficPolicyInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateTrafficPolicyInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateTrafficPolicyInstanceRequest method. +// req, resp := client.CreateTrafficPolicyInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) CreateTrafficPolicyInstanceRequest(input *CreateTrafficPolicyInstanceInput) (req *request.Request, output *CreateTrafficPolicyInstanceOutput) { op := &request.Operation{ Name: opCreateTrafficPolicyInstance, @@ -312,7 +480,28 @@ func (c *Route53) CreateTrafficPolicyInstance(input *CreateTrafficPolicyInstance const opCreateTrafficPolicyVersion = "CreateTrafficPolicyVersion" -// CreateTrafficPolicyVersionRequest generates a request for the CreateTrafficPolicyVersion operation. +// CreateTrafficPolicyVersionRequest generates a "aws/request.Request" representing the +// client's request for the CreateTrafficPolicyVersion operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateTrafficPolicyVersion method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateTrafficPolicyVersionRequest method. +// req, resp := client.CreateTrafficPolicyVersionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) CreateTrafficPolicyVersionRequest(input *CreateTrafficPolicyVersionInput) (req *request.Request, output *CreateTrafficPolicyVersionOutput) { op := &request.Operation{ Name: opCreateTrafficPolicyVersion, @@ -349,7 +538,28 @@ func (c *Route53) CreateTrafficPolicyVersion(input *CreateTrafficPolicyVersionIn const opDeleteHealthCheck = "DeleteHealthCheck" -// DeleteHealthCheckRequest generates a request for the DeleteHealthCheck operation. +// DeleteHealthCheckRequest generates a "aws/request.Request" representing the +// client's request for the DeleteHealthCheck operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteHealthCheck method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteHealthCheckRequest method. +// req, resp := client.DeleteHealthCheckRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) DeleteHealthCheckRequest(input *DeleteHealthCheckInput) (req *request.Request, output *DeleteHealthCheckOutput) { op := &request.Operation{ Name: opDeleteHealthCheck, @@ -385,7 +595,28 @@ func (c *Route53) DeleteHealthCheck(input *DeleteHealthCheckInput) (*DeleteHealt const opDeleteHostedZone = "DeleteHostedZone" -// DeleteHostedZoneRequest generates a request for the DeleteHostedZone operation. +// DeleteHostedZoneRequest generates a "aws/request.Request" representing the +// client's request for the DeleteHostedZone operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteHostedZone method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteHostedZoneRequest method. +// req, resp := client.DeleteHostedZoneRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) DeleteHostedZoneRequest(input *DeleteHostedZoneInput) (req *request.Request, output *DeleteHostedZoneOutput) { op := &request.Operation{ Name: opDeleteHostedZone, @@ -406,14 +637,10 @@ func (c *Route53) DeleteHostedZoneRequest(input *DeleteHostedZoneInput) (req *re // This action deletes a hosted zone. To delete a hosted zone, send a DELETE // request to the /Route 53 API version/hostedzone/hosted zone ID resource. // -// For more information about deleting a hosted zone, see Deleting a Hosted -// Zone (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DeleteHostedZone.html) -// in the Amazon Route 53 Developer Guide. -// -// You can delete a hosted zone only if there are no resource record sets -// other than the default SOA record and NS resource record sets. If your hosted -// zone contains other resource record sets, you must delete them before you -// can delete your hosted zone. If you try to delete a hosted zone that contains +// You can delete a hosted zone only if there are no resource record sets other +// than the default SOA record and NS resource record sets. If your hosted zone +// contains other resource record sets, you must delete them before you can +// delete your hosted zone. If you try to delete a hosted zone that contains // other resource record sets, Amazon Route 53 will deny your request with a // HostedZoneNotEmpty error. For information about deleting records from your // hosted zone, see ChangeResourceRecordSets. @@ -425,7 +652,28 @@ func (c *Route53) DeleteHostedZone(input *DeleteHostedZoneInput) (*DeleteHostedZ const opDeleteReusableDelegationSet = "DeleteReusableDelegationSet" -// DeleteReusableDelegationSetRequest generates a request for the DeleteReusableDelegationSet operation. +// DeleteReusableDelegationSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteReusableDelegationSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteReusableDelegationSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteReusableDelegationSetRequest method. +// req, resp := client.DeleteReusableDelegationSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) DeleteReusableDelegationSetRequest(input *DeleteReusableDelegationSetInput) (req *request.Request, output *DeleteReusableDelegationSetOutput) { op := &request.Operation{ Name: opDeleteReusableDelegationSet, @@ -461,7 +709,28 @@ func (c *Route53) DeleteReusableDelegationSet(input *DeleteReusableDelegationSet const opDeleteTrafficPolicy = "DeleteTrafficPolicy" -// DeleteTrafficPolicyRequest generates a request for the DeleteTrafficPolicy operation. +// DeleteTrafficPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTrafficPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteTrafficPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteTrafficPolicyRequest method. +// req, resp := client.DeleteTrafficPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) DeleteTrafficPolicyRequest(input *DeleteTrafficPolicyInput) (req *request.Request, output *DeleteTrafficPolicyOutput) { op := &request.Operation{ Name: opDeleteTrafficPolicy, @@ -489,7 +758,28 @@ func (c *Route53) DeleteTrafficPolicy(input *DeleteTrafficPolicyInput) (*DeleteT const opDeleteTrafficPolicyInstance = "DeleteTrafficPolicyInstance" -// DeleteTrafficPolicyInstanceRequest generates a request for the DeleteTrafficPolicyInstance operation. +// DeleteTrafficPolicyInstanceRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTrafficPolicyInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteTrafficPolicyInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteTrafficPolicyInstanceRequest method. +// req, resp := client.DeleteTrafficPolicyInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) DeleteTrafficPolicyInstanceRequest(input *DeleteTrafficPolicyInstanceInput) (req *request.Request, output *DeleteTrafficPolicyInstanceOutput) { op := &request.Operation{ Name: opDeleteTrafficPolicyInstance, @@ -524,7 +814,28 @@ func (c *Route53) DeleteTrafficPolicyInstance(input *DeleteTrafficPolicyInstance const opDisassociateVPCFromHostedZone = "DisassociateVPCFromHostedZone" -// DisassociateVPCFromHostedZoneRequest generates a request for the DisassociateVPCFromHostedZone operation. +// DisassociateVPCFromHostedZoneRequest generates a "aws/request.Request" representing the +// client's request for the DisassociateVPCFromHostedZone operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DisassociateVPCFromHostedZone method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DisassociateVPCFromHostedZoneRequest method. +// req, resp := client.DisassociateVPCFromHostedZoneRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) DisassociateVPCFromHostedZoneRequest(input *DisassociateVPCFromHostedZoneInput) (req *request.Request, output *DisassociateVPCFromHostedZoneOutput) { op := &request.Operation{ Name: opDisassociateVPCFromHostedZone, @@ -558,7 +869,28 @@ func (c *Route53) DisassociateVPCFromHostedZone(input *DisassociateVPCFromHosted const opGetChange = "GetChange" -// GetChangeRequest generates a request for the GetChange operation. +// GetChangeRequest generates a "aws/request.Request" representing the +// client's request for the GetChange operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetChange method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetChangeRequest method. +// req, resp := client.GetChangeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) GetChangeRequest(input *GetChangeInput) (req *request.Request, output *GetChangeOutput) { op := &request.Operation{ Name: opGetChange, @@ -593,7 +925,28 @@ func (c *Route53) GetChange(input *GetChangeInput) (*GetChangeOutput, error) { const opGetChangeDetails = "GetChangeDetails" -// GetChangeDetailsRequest generates a request for the GetChangeDetails operation. +// GetChangeDetailsRequest generates a "aws/request.Request" representing the +// client's request for the GetChangeDetails operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetChangeDetails method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetChangeDetailsRequest method. +// req, resp := client.GetChangeDetailsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) GetChangeDetailsRequest(input *GetChangeDetailsInput) (req *request.Request, output *GetChangeDetailsOutput) { if c.Client.Config.Logger != nil { c.Client.Config.Logger.Log("This operation, GetChangeDetails, has been deprecated") @@ -623,7 +976,28 @@ func (c *Route53) GetChangeDetails(input *GetChangeDetailsInput) (*GetChangeDeta const opGetCheckerIpRanges = "GetCheckerIpRanges" -// GetCheckerIpRangesRequest generates a request for the GetCheckerIpRanges operation. +// GetCheckerIpRangesRequest generates a "aws/request.Request" representing the +// client's request for the GetCheckerIpRanges operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetCheckerIpRanges method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetCheckerIpRangesRequest method. +// req, resp := client.GetCheckerIpRangesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) GetCheckerIpRangesRequest(input *GetCheckerIpRangesInput) (req *request.Request, output *GetCheckerIpRangesOutput) { op := &request.Operation{ Name: opGetCheckerIpRanges, @@ -654,7 +1028,28 @@ func (c *Route53) GetCheckerIpRanges(input *GetCheckerIpRangesInput) (*GetChecke const opGetGeoLocation = "GetGeoLocation" -// GetGeoLocationRequest generates a request for the GetGeoLocation operation. +// GetGeoLocationRequest generates a "aws/request.Request" representing the +// client's request for the GetGeoLocation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetGeoLocation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetGeoLocationRequest method. +// req, resp := client.GetGeoLocationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) GetGeoLocationRequest(input *GetGeoLocationInput) (req *request.Request, output *GetGeoLocationOutput) { op := &request.Operation{ Name: opGetGeoLocation, @@ -683,7 +1078,28 @@ func (c *Route53) GetGeoLocation(input *GetGeoLocationInput) (*GetGeoLocationOut const opGetHealthCheck = "GetHealthCheck" -// GetHealthCheckRequest generates a request for the GetHealthCheck operation. +// GetHealthCheckRequest generates a "aws/request.Request" representing the +// client's request for the GetHealthCheck operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetHealthCheck method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetHealthCheckRequest method. +// req, resp := client.GetHealthCheckRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) GetHealthCheckRequest(input *GetHealthCheckInput) (req *request.Request, output *GetHealthCheckOutput) { op := &request.Operation{ Name: opGetHealthCheck, @@ -711,7 +1127,28 @@ func (c *Route53) GetHealthCheck(input *GetHealthCheckInput) (*GetHealthCheckOut const opGetHealthCheckCount = "GetHealthCheckCount" -// GetHealthCheckCountRequest generates a request for the GetHealthCheckCount operation. +// GetHealthCheckCountRequest generates a "aws/request.Request" representing the +// client's request for the GetHealthCheckCount operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetHealthCheckCount method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetHealthCheckCountRequest method. +// req, resp := client.GetHealthCheckCountRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) GetHealthCheckCountRequest(input *GetHealthCheckCountInput) (req *request.Request, output *GetHealthCheckCountOutput) { op := &request.Operation{ Name: opGetHealthCheckCount, @@ -739,7 +1176,28 @@ func (c *Route53) GetHealthCheckCount(input *GetHealthCheckCountInput) (*GetHeal const opGetHealthCheckLastFailureReason = "GetHealthCheckLastFailureReason" -// GetHealthCheckLastFailureReasonRequest generates a request for the GetHealthCheckLastFailureReason operation. +// GetHealthCheckLastFailureReasonRequest generates a "aws/request.Request" representing the +// client's request for the GetHealthCheckLastFailureReason operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetHealthCheckLastFailureReason method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetHealthCheckLastFailureReasonRequest method. +// req, resp := client.GetHealthCheckLastFailureReasonRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) GetHealthCheckLastFailureReasonRequest(input *GetHealthCheckLastFailureReasonInput) (req *request.Request, output *GetHealthCheckLastFailureReasonOutput) { op := &request.Operation{ Name: opGetHealthCheckLastFailureReason, @@ -769,7 +1227,28 @@ func (c *Route53) GetHealthCheckLastFailureReason(input *GetHealthCheckLastFailu const opGetHealthCheckStatus = "GetHealthCheckStatus" -// GetHealthCheckStatusRequest generates a request for the GetHealthCheckStatus operation. +// GetHealthCheckStatusRequest generates a "aws/request.Request" representing the +// client's request for the GetHealthCheckStatus operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetHealthCheckStatus method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetHealthCheckStatusRequest method. +// req, resp := client.GetHealthCheckStatusRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) GetHealthCheckStatusRequest(input *GetHealthCheckStatusInput) (req *request.Request, output *GetHealthCheckStatusOutput) { op := &request.Operation{ Name: opGetHealthCheckStatus, @@ -798,7 +1277,28 @@ func (c *Route53) GetHealthCheckStatus(input *GetHealthCheckStatusInput) (*GetHe const opGetHostedZone = "GetHostedZone" -// GetHostedZoneRequest generates a request for the GetHostedZone operation. +// GetHostedZoneRequest generates a "aws/request.Request" representing the +// client's request for the GetHostedZone operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetHostedZone method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetHostedZoneRequest method. +// req, resp := client.GetHostedZoneRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) GetHostedZoneRequest(input *GetHostedZoneInput) (req *request.Request, output *GetHostedZoneOutput) { op := &request.Operation{ Name: opGetHostedZone, @@ -828,7 +1328,28 @@ func (c *Route53) GetHostedZone(input *GetHostedZoneInput) (*GetHostedZoneOutput const opGetHostedZoneCount = "GetHostedZoneCount" -// GetHostedZoneCountRequest generates a request for the GetHostedZoneCount operation. +// GetHostedZoneCountRequest generates a "aws/request.Request" representing the +// client's request for the GetHostedZoneCount operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetHostedZoneCount method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetHostedZoneCountRequest method. +// req, resp := client.GetHostedZoneCountRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) GetHostedZoneCountRequest(input *GetHostedZoneCountInput) (req *request.Request, output *GetHostedZoneCountOutput) { op := &request.Operation{ Name: opGetHostedZoneCount, @@ -856,7 +1377,28 @@ func (c *Route53) GetHostedZoneCount(input *GetHostedZoneCountInput) (*GetHosted const opGetReusableDelegationSet = "GetReusableDelegationSet" -// GetReusableDelegationSetRequest generates a request for the GetReusableDelegationSet operation. +// GetReusableDelegationSetRequest generates a "aws/request.Request" representing the +// client's request for the GetReusableDelegationSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetReusableDelegationSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetReusableDelegationSetRequest method. +// req, resp := client.GetReusableDelegationSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) GetReusableDelegationSetRequest(input *GetReusableDelegationSetInput) (req *request.Request, output *GetReusableDelegationSetOutput) { op := &request.Operation{ Name: opGetReusableDelegationSet, @@ -884,7 +1426,28 @@ func (c *Route53) GetReusableDelegationSet(input *GetReusableDelegationSetInput) const opGetTrafficPolicy = "GetTrafficPolicy" -// GetTrafficPolicyRequest generates a request for the GetTrafficPolicy operation. +// GetTrafficPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetTrafficPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetTrafficPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetTrafficPolicyRequest method. +// req, resp := client.GetTrafficPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) GetTrafficPolicyRequest(input *GetTrafficPolicyInput) (req *request.Request, output *GetTrafficPolicyOutput) { op := &request.Operation{ Name: opGetTrafficPolicy, @@ -912,7 +1475,28 @@ func (c *Route53) GetTrafficPolicy(input *GetTrafficPolicyInput) (*GetTrafficPol const opGetTrafficPolicyInstance = "GetTrafficPolicyInstance" -// GetTrafficPolicyInstanceRequest generates a request for the GetTrafficPolicyInstance operation. +// GetTrafficPolicyInstanceRequest generates a "aws/request.Request" representing the +// client's request for the GetTrafficPolicyInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetTrafficPolicyInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetTrafficPolicyInstanceRequest method. +// req, resp := client.GetTrafficPolicyInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) GetTrafficPolicyInstanceRequest(input *GetTrafficPolicyInstanceInput) (req *request.Request, output *GetTrafficPolicyInstanceOutput) { op := &request.Operation{ Name: opGetTrafficPolicyInstance, @@ -947,7 +1531,28 @@ func (c *Route53) GetTrafficPolicyInstance(input *GetTrafficPolicyInstanceInput) const opGetTrafficPolicyInstanceCount = "GetTrafficPolicyInstanceCount" -// GetTrafficPolicyInstanceCountRequest generates a request for the GetTrafficPolicyInstanceCount operation. +// GetTrafficPolicyInstanceCountRequest generates a "aws/request.Request" representing the +// client's request for the GetTrafficPolicyInstanceCount operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetTrafficPolicyInstanceCount method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetTrafficPolicyInstanceCountRequest method. +// req, resp := client.GetTrafficPolicyInstanceCountRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) GetTrafficPolicyInstanceCountRequest(input *GetTrafficPolicyInstanceCountInput) (req *request.Request, output *GetTrafficPolicyInstanceCountOutput) { op := &request.Operation{ Name: opGetTrafficPolicyInstanceCount, @@ -978,7 +1583,28 @@ func (c *Route53) GetTrafficPolicyInstanceCount(input *GetTrafficPolicyInstanceC const opListChangeBatchesByHostedZone = "ListChangeBatchesByHostedZone" -// ListChangeBatchesByHostedZoneRequest generates a request for the ListChangeBatchesByHostedZone operation. +// ListChangeBatchesByHostedZoneRequest generates a "aws/request.Request" representing the +// client's request for the ListChangeBatchesByHostedZone operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListChangeBatchesByHostedZone method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListChangeBatchesByHostedZoneRequest method. +// req, resp := client.ListChangeBatchesByHostedZoneRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) ListChangeBatchesByHostedZoneRequest(input *ListChangeBatchesByHostedZoneInput) (req *request.Request, output *ListChangeBatchesByHostedZoneOutput) { if c.Client.Config.Logger != nil { c.Client.Config.Logger.Log("This operation, ListChangeBatchesByHostedZone, has been deprecated") @@ -1009,7 +1635,28 @@ func (c *Route53) ListChangeBatchesByHostedZone(input *ListChangeBatchesByHosted const opListChangeBatchesByRRSet = "ListChangeBatchesByRRSet" -// ListChangeBatchesByRRSetRequest generates a request for the ListChangeBatchesByRRSet operation. +// ListChangeBatchesByRRSetRequest generates a "aws/request.Request" representing the +// client's request for the ListChangeBatchesByRRSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListChangeBatchesByRRSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListChangeBatchesByRRSetRequest method. +// req, resp := client.ListChangeBatchesByRRSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) ListChangeBatchesByRRSetRequest(input *ListChangeBatchesByRRSetInput) (req *request.Request, output *ListChangeBatchesByRRSetOutput) { if c.Client.Config.Logger != nil { c.Client.Config.Logger.Log("This operation, ListChangeBatchesByRRSet, has been deprecated") @@ -1040,7 +1687,28 @@ func (c *Route53) ListChangeBatchesByRRSet(input *ListChangeBatchesByRRSetInput) const opListGeoLocations = "ListGeoLocations" -// ListGeoLocationsRequest generates a request for the ListGeoLocations operation. +// ListGeoLocationsRequest generates a "aws/request.Request" representing the +// client's request for the ListGeoLocations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListGeoLocations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListGeoLocationsRequest method. +// req, resp := client.ListGeoLocationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) ListGeoLocationsRequest(input *ListGeoLocationsInput) (req *request.Request, output *ListGeoLocationsOutput) { op := &request.Operation{ Name: opListGeoLocations, @@ -1079,7 +1747,28 @@ func (c *Route53) ListGeoLocations(input *ListGeoLocationsInput) (*ListGeoLocati const opListHealthChecks = "ListHealthChecks" -// ListHealthChecksRequest generates a request for the ListHealthChecks operation. +// ListHealthChecksRequest generates a "aws/request.Request" representing the +// client's request for the ListHealthChecks operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListHealthChecks method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListHealthChecksRequest method. +// req, resp := client.ListHealthChecksRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) ListHealthChecksRequest(input *ListHealthChecksInput) (req *request.Request, output *ListHealthChecksOutput) { op := &request.Operation{ Name: opListHealthChecks, @@ -1119,6 +1808,23 @@ func (c *Route53) ListHealthChecks(input *ListHealthChecksInput) (*ListHealthChe return out, err } +// ListHealthChecksPages iterates over the pages of a ListHealthChecks operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListHealthChecks method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListHealthChecks operation. +// pageNum := 0 +// err := client.ListHealthChecksPages(params, +// func(page *ListHealthChecksOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *Route53) ListHealthChecksPages(input *ListHealthChecksInput, fn func(p *ListHealthChecksOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListHealthChecksRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1129,7 +1835,28 @@ func (c *Route53) ListHealthChecksPages(input *ListHealthChecksInput, fn func(p const opListHostedZones = "ListHostedZones" -// ListHostedZonesRequest generates a request for the ListHostedZones operation. +// ListHostedZonesRequest generates a "aws/request.Request" representing the +// client's request for the ListHostedZones operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListHostedZones method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListHostedZonesRequest method. +// req, resp := client.ListHostedZonesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) ListHostedZonesRequest(input *ListHostedZonesInput) (req *request.Request, output *ListHostedZonesOutput) { op := &request.Operation{ Name: opListHostedZones, @@ -1169,6 +1896,23 @@ func (c *Route53) ListHostedZones(input *ListHostedZonesInput) (*ListHostedZones return out, err } +// ListHostedZonesPages iterates over the pages of a ListHostedZones operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListHostedZones method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListHostedZones operation. +// pageNum := 0 +// err := client.ListHostedZonesPages(params, +// func(page *ListHostedZonesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *Route53) ListHostedZonesPages(input *ListHostedZonesInput, fn func(p *ListHostedZonesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListHostedZonesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1179,7 +1923,28 @@ func (c *Route53) ListHostedZonesPages(input *ListHostedZonesInput, fn func(p *L const opListHostedZonesByName = "ListHostedZonesByName" -// ListHostedZonesByNameRequest generates a request for the ListHostedZonesByName operation. +// ListHostedZonesByNameRequest generates a "aws/request.Request" representing the +// client's request for the ListHostedZonesByName operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListHostedZonesByName method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListHostedZonesByNameRequest method. +// req, resp := client.ListHostedZonesByNameRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) ListHostedZonesByNameRequest(input *ListHostedZonesByNameInput) (req *request.Request, output *ListHostedZonesByNameOutput) { op := &request.Operation{ Name: opListHostedZonesByName, @@ -1216,7 +1981,28 @@ func (c *Route53) ListHostedZonesByName(input *ListHostedZonesByNameInput) (*Lis const opListResourceRecordSets = "ListResourceRecordSets" -// ListResourceRecordSetsRequest generates a request for the ListResourceRecordSets operation. +// ListResourceRecordSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListResourceRecordSets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListResourceRecordSets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListResourceRecordSetsRequest method. +// req, resp := client.ListResourceRecordSetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) ListResourceRecordSetsRequest(input *ListResourceRecordSetsInput) (req *request.Request, output *ListResourceRecordSetsOutput) { op := &request.Operation{ Name: opListResourceRecordSets, @@ -1240,50 +2026,62 @@ func (c *Route53) ListResourceRecordSetsRequest(input *ListResourceRecordSetsInp return } -// Imagine all the resource record sets in a zone listed out in front of you. -// Imagine them sorted lexicographically first by DNS name (with the labels -// reversed, like "com.amazon.www" for example), and secondarily, lexicographically -// by record type. This operation retrieves at most MaxItems resource record -// sets from this list, in order, starting at a position specified by the Name -// and Type arguments: +// List the resource record sets in a specified hosted zone. Send a GET request +// to the 2013-04-01/hostedzone/hosted zone ID/rrset resource. // -// If both Name and Type are omitted, this means start the results at the -// first RRSET in the HostedZone. If Name is specified but Type is omitted, -// this means start the results at the first RRSET in the list whose name is -// greater than or equal to Name. If both Name and Type are specified, this -// means start the results at the first RRSET in the list whose name is greater -// than or equal to Name and whose type is greater than or equal to Type. It -// is an error to specify the Type but not the Name. Use ListResourceRecordSets -// to retrieve a single known record set by specifying the record set's name -// and type, and setting MaxItems = 1 +// ListResourceRecordSets returns up to 100 resource record sets at a time +// in ASCII order, beginning at a position specified by the name and type elements. +// The action sorts results first by DNS name with the labels reversed, for +// example: // -// To retrieve all the records in a HostedZone, first pause any processes making -// calls to ChangeResourceRecordSets. Initially call ListResourceRecordSets -// without a Name and Type to get the first page of record sets. For subsequent -// calls, set Name and Type to the NextName and NextType values returned by -// the previous response. +// com.example.www. // -// In the presence of concurrent ChangeResourceRecordSets calls, there is no -// consistency of results across calls to ListResourceRecordSets. The only way -// to get a consistent multi-page snapshot of all RRSETs in a zone is to stop -// making changes while pagination is in progress. +// Note the trailing dot, which can change the sort order in some circumstances. +// When multiple records have the same DNS name, the action sorts results by +// the record type. // -// However, the results from ListResourceRecordSets are consistent within a -// page. If MakeChange calls are taking place concurrently, the result of each -// one will either be completely visible in your results or not at all. You -// will not see partial changes, or changes that do not ultimately succeed. -// (This follows from the fact that MakeChange is atomic) +// You can use the name and type elements to adjust the beginning position +// of the list of resource record sets returned: // -// The results from ListResourceRecordSets are strongly consistent with ChangeResourceRecordSets. -// To be precise, if a single process makes a call to ChangeResourceRecordSets -// and receives a successful response, the effects of that change will be visible -// in a subsequent call to ListResourceRecordSets by that process. +// If you do not specify Name or Type: The results begin with the first resource +// record set that the hosted zone contains. If you specify Name but not Type: +// The results begin with the first resource record set in the list whose name +// is greater than or equal to Name. If you specify Type but not Name: Amazon +// Route 53 returns the InvalidInput error. If you specify both Name and Type: +// The results begin with the first resource record set in the list whose name +// is greater than or equal to Name, and whose type is greater than or equal +// to Type. This action returns the most current version of the records. This +// includes records that are PENDING, and that are not yet available on all +// Amazon Route 53 DNS servers. +// +// To ensure that you get an accurate listing of the resource record sets for +// a hosted zone at a point in time, do not submit a ChangeResourceRecordSets +// request while you are paging through the results of a ListResourceRecordSets +// request. If you do, some pages may display results without the latest changes +// while other pages display results with the latest changes. func (c *Route53) ListResourceRecordSets(input *ListResourceRecordSetsInput) (*ListResourceRecordSetsOutput, error) { req, out := c.ListResourceRecordSetsRequest(input) err := req.Send() return out, err } +// ListResourceRecordSetsPages iterates over the pages of a ListResourceRecordSets operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListResourceRecordSets method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListResourceRecordSets operation. +// pageNum := 0 +// err := client.ListResourceRecordSetsPages(params, +// func(page *ListResourceRecordSetsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *Route53) ListResourceRecordSetsPages(input *ListResourceRecordSetsInput, fn func(p *ListResourceRecordSetsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListResourceRecordSetsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1294,7 +2092,28 @@ func (c *Route53) ListResourceRecordSetsPages(input *ListResourceRecordSetsInput const opListReusableDelegationSets = "ListReusableDelegationSets" -// ListReusableDelegationSetsRequest generates a request for the ListReusableDelegationSets operation. +// ListReusableDelegationSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListReusableDelegationSets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListReusableDelegationSets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListReusableDelegationSetsRequest method. +// req, resp := client.ListReusableDelegationSetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) ListReusableDelegationSetsRequest(input *ListReusableDelegationSetsInput) (req *request.Request, output *ListReusableDelegationSetsOutput) { op := &request.Operation{ Name: opListReusableDelegationSets, @@ -1330,7 +2149,28 @@ func (c *Route53) ListReusableDelegationSets(input *ListReusableDelegationSetsIn const opListTagsForResource = "ListTagsForResource" -// ListTagsForResourceRequest generates a request for the ListTagsForResource operation. +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTagsForResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { op := &request.Operation{ Name: opListTagsForResource, @@ -1356,7 +2196,28 @@ func (c *Route53) ListTagsForResource(input *ListTagsForResourceInput) (*ListTag const opListTagsForResources = "ListTagsForResources" -// ListTagsForResourcesRequest generates a request for the ListTagsForResources operation. +// ListTagsForResourcesRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResources operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTagsForResources method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTagsForResourcesRequest method. +// req, resp := client.ListTagsForResourcesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) ListTagsForResourcesRequest(input *ListTagsForResourcesInput) (req *request.Request, output *ListTagsForResourcesOutput) { op := &request.Operation{ Name: opListTagsForResources, @@ -1382,7 +2243,28 @@ func (c *Route53) ListTagsForResources(input *ListTagsForResourcesInput) (*ListT const opListTrafficPolicies = "ListTrafficPolicies" -// ListTrafficPoliciesRequest generates a request for the ListTrafficPolicies operation. +// ListTrafficPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListTrafficPolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTrafficPolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTrafficPoliciesRequest method. +// req, resp := client.ListTrafficPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) ListTrafficPoliciesRequest(input *ListTrafficPoliciesInput) (req *request.Request, output *ListTrafficPoliciesOutput) { op := &request.Operation{ Name: opListTrafficPolicies, @@ -1436,7 +2318,28 @@ func (c *Route53) ListTrafficPolicies(input *ListTrafficPoliciesInput) (*ListTra const opListTrafficPolicyInstances = "ListTrafficPolicyInstances" -// ListTrafficPolicyInstancesRequest generates a request for the ListTrafficPolicyInstances operation. +// ListTrafficPolicyInstancesRequest generates a "aws/request.Request" representing the +// client's request for the ListTrafficPolicyInstances operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTrafficPolicyInstances method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTrafficPolicyInstancesRequest method. +// req, resp := client.ListTrafficPolicyInstancesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) ListTrafficPolicyInstancesRequest(input *ListTrafficPolicyInstancesInput) (req *request.Request, output *ListTrafficPolicyInstancesOutput) { op := &request.Operation{ Name: opListTrafficPolicyInstances, @@ -1495,7 +2398,28 @@ func (c *Route53) ListTrafficPolicyInstances(input *ListTrafficPolicyInstancesIn const opListTrafficPolicyInstancesByHostedZone = "ListTrafficPolicyInstancesByHostedZone" -// ListTrafficPolicyInstancesByHostedZoneRequest generates a request for the ListTrafficPolicyInstancesByHostedZone operation. +// ListTrafficPolicyInstancesByHostedZoneRequest generates a "aws/request.Request" representing the +// client's request for the ListTrafficPolicyInstancesByHostedZone operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTrafficPolicyInstancesByHostedZone method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTrafficPolicyInstancesByHostedZoneRequest method. +// req, resp := client.ListTrafficPolicyInstancesByHostedZoneRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) ListTrafficPolicyInstancesByHostedZoneRequest(input *ListTrafficPolicyInstancesByHostedZoneInput) (req *request.Request, output *ListTrafficPolicyInstancesByHostedZoneOutput) { op := &request.Operation{ Name: opListTrafficPolicyInstancesByHostedZone, @@ -1554,7 +2478,28 @@ func (c *Route53) ListTrafficPolicyInstancesByHostedZone(input *ListTrafficPolic const opListTrafficPolicyInstancesByPolicy = "ListTrafficPolicyInstancesByPolicy" -// ListTrafficPolicyInstancesByPolicyRequest generates a request for the ListTrafficPolicyInstancesByPolicy operation. +// ListTrafficPolicyInstancesByPolicyRequest generates a "aws/request.Request" representing the +// client's request for the ListTrafficPolicyInstancesByPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTrafficPolicyInstancesByPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTrafficPolicyInstancesByPolicyRequest method. +// req, resp := client.ListTrafficPolicyInstancesByPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) ListTrafficPolicyInstancesByPolicyRequest(input *ListTrafficPolicyInstancesByPolicyInput) (req *request.Request, output *ListTrafficPolicyInstancesByPolicyOutput) { op := &request.Operation{ Name: opListTrafficPolicyInstancesByPolicy, @@ -1614,7 +2559,28 @@ func (c *Route53) ListTrafficPolicyInstancesByPolicy(input *ListTrafficPolicyIns const opListTrafficPolicyVersions = "ListTrafficPolicyVersions" -// ListTrafficPolicyVersionsRequest generates a request for the ListTrafficPolicyVersions operation. +// ListTrafficPolicyVersionsRequest generates a "aws/request.Request" representing the +// client's request for the ListTrafficPolicyVersions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTrafficPolicyVersions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTrafficPolicyVersionsRequest method. +// req, resp := client.ListTrafficPolicyVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) ListTrafficPolicyVersionsRequest(input *ListTrafficPolicyVersionsInput) (req *request.Request, output *ListTrafficPolicyVersionsOutput) { op := &request.Operation{ Name: opListTrafficPolicyVersions, @@ -1667,7 +2633,28 @@ func (c *Route53) ListTrafficPolicyVersions(input *ListTrafficPolicyVersionsInpu const opUpdateHealthCheck = "UpdateHealthCheck" -// UpdateHealthCheckRequest generates a request for the UpdateHealthCheck operation. +// UpdateHealthCheckRequest generates a "aws/request.Request" representing the +// client's request for the UpdateHealthCheck operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateHealthCheck method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateHealthCheckRequest method. +// req, resp := client.UpdateHealthCheckRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) UpdateHealthCheckRequest(input *UpdateHealthCheckInput) (req *request.Request, output *UpdateHealthCheckOutput) { op := &request.Operation{ Name: opUpdateHealthCheck, @@ -1699,7 +2686,28 @@ func (c *Route53) UpdateHealthCheck(input *UpdateHealthCheckInput) (*UpdateHealt const opUpdateHostedZoneComment = "UpdateHostedZoneComment" -// UpdateHostedZoneCommentRequest generates a request for the UpdateHostedZoneComment operation. +// UpdateHostedZoneCommentRequest generates a "aws/request.Request" representing the +// client's request for the UpdateHostedZoneComment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateHostedZoneComment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateHostedZoneCommentRequest method. +// req, resp := client.UpdateHostedZoneCommentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) UpdateHostedZoneCommentRequest(input *UpdateHostedZoneCommentInput) (req *request.Request, output *UpdateHostedZoneCommentOutput) { op := &request.Operation{ Name: opUpdateHostedZoneComment, @@ -1731,7 +2739,28 @@ func (c *Route53) UpdateHostedZoneComment(input *UpdateHostedZoneCommentInput) ( const opUpdateTrafficPolicyComment = "UpdateTrafficPolicyComment" -// UpdateTrafficPolicyCommentRequest generates a request for the UpdateTrafficPolicyComment operation. +// UpdateTrafficPolicyCommentRequest generates a "aws/request.Request" representing the +// client's request for the UpdateTrafficPolicyComment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateTrafficPolicyComment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateTrafficPolicyCommentRequest method. +// req, resp := client.UpdateTrafficPolicyCommentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) UpdateTrafficPolicyCommentRequest(input *UpdateTrafficPolicyCommentInput) (req *request.Request, output *UpdateTrafficPolicyCommentOutput) { op := &request.Operation{ Name: opUpdateTrafficPolicyComment, @@ -1764,7 +2793,28 @@ func (c *Route53) UpdateTrafficPolicyComment(input *UpdateTrafficPolicyCommentIn const opUpdateTrafficPolicyInstance = "UpdateTrafficPolicyInstance" -// UpdateTrafficPolicyInstanceRequest generates a request for the UpdateTrafficPolicyInstance operation. +// UpdateTrafficPolicyInstanceRequest generates a "aws/request.Request" representing the +// client's request for the UpdateTrafficPolicyInstance operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateTrafficPolicyInstance method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateTrafficPolicyInstanceRequest method. +// req, resp := client.UpdateTrafficPolicyInstanceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *Route53) UpdateTrafficPolicyInstanceRequest(input *UpdateTrafficPolicyInstanceInput) (req *request.Request, output *UpdateTrafficPolicyInstanceOutput) { op := &request.Operation{ Name: opUpdateTrafficPolicyInstance, @@ -1865,10 +2915,7 @@ func (s *AlarmIdentifier) Validate() error { // record sets in the same private hosted zone. Creating alias resource record // sets for CloudFront distributions, ELB load balancers, and Amazon S3 buckets // is not supported. You can't create alias resource record sets for failover, -// geolocation, or latency resource record sets in a private hosted zone. For -// more information and an example, see Example: Creating Alias Resource Record -// Sets (http://docs.aws.amazon.com/Route53/latest/APIReference/CreateAliasRRSAPI.html) -// in the Amazon Route 53 API Reference. +// geolocation, or latency resource record sets in a private hosted zone. type AliasTarget struct { _ struct{} `type:"structure"` @@ -1900,9 +2947,7 @@ type AliasTarget struct { // S3 (http://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html) in // the Amazon Simple Storage Service Developer Guide. Another Amazon Route 53 // resource record set: Specify the value of the Name element for a resource - // record set in the current hosted zone. For more information and an example, - // see Example: Creating Alias Resource Record Sets (http://docs.aws.amazon.com/Route53/latest/APIReference/CreateAliasRRSAPI.html) - // in the Amazon Route 53 API Reference. + // record set in the current hosted zone. DNSName *string `type:"string" required:"true"` // Alias resource record sets only: If you set the value of EvaluateTargetHealth @@ -1935,19 +2980,17 @@ type AliasTarget struct { // record set or a group of resource record sets (for example, a group of weighted // resource record sets), but it is not another alias resource record set, we // recommend that you associate a health check with all of the resource record - // sets in the alias target. For more information, see What Happens When You - // Omit Health Checks? (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-complex-configs.html#dns-failover-complex-configs-hc-omitting) - // in the Amazon Route 53 Developer Guide. If you specify an ELB load balancer - // in AliasTarget, Elastic Load Balancing routes queries only to the healthy - // Amazon EC2 instances that are registered with the load balancer. If no Amazon - // EC2 instances are healthy or if the load balancer itself is unhealthy, and - // if EvaluateTargetHealth is true for the corresponding alias resource record - // set, Amazon Route 53 routes queries to other resources. When you create a - // load balancer, you configure settings for Elastic Load Balancing health checks; - // they're not Amazon Route 53 health checks, but they perform a similar function. - // Do not create Amazon Route 53 health checks for the Amazon EC2 instances - // that you register with an ELB load balancer. For more information, see How - // Health Checks Work in More Complex Amazon Route 53 Configurations (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-complex-configs.html) + // sets in the alias target. If you specify an ELB load balancer in AliasTarget, + // Elastic Load Balancing routes queries only to the healthy Amazon EC2 instances + // that are registered with the load balancer. If no Amazon EC2 instances are + // healthy or if the load balancer itself is unhealthy, and if EvaluateTargetHealth + // is true for the corresponding alias resource record set, Amazon Route 53 + // routes queries to other resources. When you create a load balancer, you configure + // settings for Elastic Load Balancing health checks; they're not Amazon Route + // 53 health checks, but they perform a similar function. Do not create Amazon + // Route 53 health checks for the Amazon EC2 instances that you register with + // an ELB load balancer. For more information, see How Health Checks Work in + // More Complex Amazon Route 53 Configurations (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-complex-configs.html) // in the Amazon Route 53 Developer Guide. We recommend that you set EvaluateTargetHealth // to true only when you have enough idle capacity to handle the failure of // one or more endpoints. @@ -1973,9 +3016,7 @@ type AliasTarget struct { // in the Amazon Web Services General Reference. Another Amazon Route 53 resource // record set in your hosted zone: Specify the hosted zone ID of your hosted // zone. (An alias resource record set cannot reference a resource record set - // in a different hosted zone.) For more information and an example, see Example: - // Creating Alias Resource Record Sets (http://docs.aws.amazon.com/Route53/latest/APIReference/CreateAliasRRSAPI.html) - // in the Amazon Route 53 API Reference. + // in a different hosted zone.) HostedZoneId *string `type:"string" required:"true"` } @@ -2707,7 +3748,9 @@ type CreateTrafficPolicyInput struct { // Any comments that you want to include about the traffic policy. Comment *string `type:"string"` - // The definition of this traffic policy in JSON format. + // The definition of this traffic policy in JSON format. For more information, + // see Traffic Policy Document Format (http://docs.aws.amazon.com/Route53/latest/APIReference/api-policies-traffic-policy-document-format.html) + // in the Amazon Route 53 API Reference. Document *string `type:"string" required:"true"` // The name of the traffic policy. @@ -2858,7 +3901,9 @@ type CreateTrafficPolicyVersionInput struct { // The definition of a new traffic policy version, in JSON format. You must // specify the full definition of the new traffic policy. You cannot specify - // just the differences between the new version and a previous version. + // just the differences between the new version and a previous version. For + // more information, see Traffic Policy Document Format (http://docs.aws.amazon.com/Route53/latest/APIReference/api-policies-traffic-policy-document-format.html) + // in the Amazon Route 53 API Reference. Document *string `type:"string" required:"true"` // The ID of the traffic policy for which you want to create a new version. @@ -4362,6 +5407,12 @@ type HostedZoneConfig struct { // XML document. Comment *string `type:"string"` + // GetHostedZone and ListHostedZone responses: A Boolean value that indicates + // whether a hosted zone is private. + // + // CreateHostedZone requests: When you're creating a private hosted zone (when + // you specify values for VPCId and VPCRegion), you can optionally specify true + // for PrivateZone. PrivateZone *bool `type:"boolean"` } @@ -4623,9 +5674,8 @@ type ListGeoLocationsOutput struct { // A flag that indicates whether there are more geo locations to be listed. // If your results were truncated, you can make a follow-up request for the - // next page of results by using the values included in the ListGeoLocationsResponse$NextContinentCode, - // ListGeoLocationsResponse$NextCountryCode and ListGeoLocationsResponse$NextSubdivisionCode - // elements. + // next page of results by using the values included in the NextContinentCode, + // NextCountryCode, and NextSubdivisionCode elements. // // Valid Values: true | false IsTruncated *bool `type:"boolean" required:"true"` @@ -4635,18 +5685,18 @@ type ListGeoLocationsOutput struct { MaxItems *string `type:"string" required:"true"` // If the results were truncated, the continent code of the next geo location - // in the list. This element is present only if ListGeoLocationsResponse$IsTruncated - // is true and the next geo location to list is a continent location. + // in the list. This element is present only if IsTruncated is true and the + // next geo location to list is a continent location. NextContinentCode *string `min:"2" type:"string"` // If the results were truncated, the country code of the next geo location - // in the list. This element is present only if ListGeoLocationsResponse$IsTruncated - // is true and the next geo location to list is not a continent location. + // in the list. This element is present only if IsTruncated is true and the + // next geo location to list is not a continent location. NextCountryCode *string `min:"1" type:"string"` // If the results were truncated, the subdivision code of the next geo location - // in the list. This element is present only if ListGeoLocationsResponse$IsTruncated - // is true and the next geo location has a subdivision. + // in the list. This element is present only if IsTruncated is true and the + // next geo location has a subdivision. NextSubdivisionCode *string `min:"1" type:"string"` } @@ -4714,14 +5764,14 @@ type ListHealthChecksOutput struct { // The maximum number of health checks to be included in the response body. // If the number of health checks associated with this AWS account exceeds MaxItems, - // the value of ListHealthChecksResponse$IsTruncated in the response is true. - // Call ListHealthChecks again and specify the value of ListHealthChecksResponse$NextMarker - // in the ListHostedZonesRequest$Marker element to get the next page of results. + // the value of IsTruncated in the response is true. Call ListHealthChecks again + // and specify the value of NextMarker from the last response in the Marker + // element of the next request to get the next page of results. MaxItems *string `type:"string" required:"true"` - // Indicates where to continue listing health checks. If ListHealthChecksResponse$IsTruncated - // is true, make another request to ListHealthChecks and include the value of - // the NextMarker element in the Marker element to get the next page of results. + // Indicates where to continue listing health checks. If IsTruncated is true, + // make another request to ListHealthChecks and include the value of the NextMarker + // element in the Marker element to get the next page of results. NextMarker *string `type:"string"` } @@ -4743,10 +5793,6 @@ func (s ListHealthChecksOutput) GoString() string { // of the page that is displayed by using the MaxItems parameter. You can use // the DNSName and HostedZoneId parameters to control the hosted zone that the // list begins with. -// -// For more information about listing hosted zones, see Listing the Hosted -// Zones for an AWS Account (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ListInfoOnHostedZone.html) -// in the Amazon Route 53 Developer Guide. type ListHostedZonesByNameInput struct { _ struct{} `type:"structure"` @@ -4800,24 +5846,23 @@ type ListHostedZonesByNameOutput struct { // The maximum number of hosted zones to be included in the response body. If // the number of hosted zones associated with this AWS account exceeds MaxItems, - // the value of ListHostedZonesByNameResponse$IsTruncated in the response is - // true. Call ListHostedZonesByName again and specify the value of ListHostedZonesByNameResponse$NextDNSName - // and ListHostedZonesByNameResponse$NextHostedZoneId elements respectively - // to get the next page of results. + // the value of IsTruncated in the ListHostedZonesByNameResponse is true. Call + // ListHostedZonesByName again and specify the value of NextDNSName and NextHostedZoneId + // elements from the previous response to get the next page of results. MaxItems *string `type:"string" required:"true"` - // If ListHostedZonesByNameResponse$IsTruncated is true, there are more hosted - // zones associated with the current AWS account. To get the next page of results, - // make another request to ListHostedZonesByName. Specify the value of ListHostedZonesByNameResponse$NextDNSName - // in the ListHostedZonesByNameRequest$DNSName element and ListHostedZonesByNameResponse$NextHostedZoneId - // in the ListHostedZonesByNameRequest$HostedZoneId element. + // If the value of IsTruncated in the ListHostedZonesByNameResponse is true, + // there are more hosted zones associated with the current AWS account. To get + // the next page of results, make another request to ListHostedZonesByName. + // Specify the value of NextDNSName in the DNSName parameter. Specify NextHostedZoneId + // in the HostedZoneId parameter. NextDNSName *string `type:"string"` - // If ListHostedZonesByNameResponse$IsTruncated is true, there are more hosted - // zones associated with the current AWS account. To get the next page of results, - // make another request to ListHostedZonesByName. Specify the value of ListHostedZonesByNameResponse$NextDNSName - // in the ListHostedZonesByNameRequest$DNSName element and ListHostedZonesByNameResponse$NextHostedZoneId - // in the ListHostedZonesByNameRequest$HostedZoneId element. + // If the value of IsTruncated in the ListHostedZonesByNameResponse is true, + // there are more hosted zones associated with the current AWS account. To get + // the next page of results, make another request to ListHostedZonesByName. + // Specify the value of NextDNSName in the DNSName parameter. Specify NextHostedZoneId + // in the HostedZoneId parameter. NextHostedZoneId *string `type:"string"` } @@ -4837,12 +5882,10 @@ func (s ListHostedZonesByNameOutput) GoString() string { // the list of hosted zones is displayed on a single page. You can control the // length of the page that is displayed by using the MaxItems parameter. You // can use the Marker parameter to control the hosted zone that the list begins -// with. For more information about listing hosted zones, see Listing the Hosted -// Zones for an AWS Account (http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/ListInfoOnHostedZone.html) -// in the Amazon Route 53 Developer Guide. +// with. // -// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to -// a value greater than 100, Amazon Route 53 returns only the first 100. +// Amazon Route 53 returns a maximum of 100 items. If you set MaxItems to a +// value greater than 100, Amazon Route 53 returns only the first 100. type ListHostedZonesInput struct { _ struct{} `type:"structure"` @@ -4889,14 +5932,14 @@ type ListHostedZonesOutput struct { // The maximum number of hosted zones to be included in the response body. If // the number of hosted zones associated with this AWS account exceeds MaxItems, - // the value of ListHostedZonesResponse$IsTruncated in the response is true. - // Call ListHostedZones again and specify the value of ListHostedZonesResponse$NextMarker - // in the ListHostedZonesRequest$Marker element to get the next page of results. + // the value of IsTruncated in the response is true. Call ListHostedZones again + // and specify the value of NextMarker in the Marker parameter to get the next + // page of results. MaxItems *string `type:"string" required:"true"` - // Indicates where to continue listing hosted zones. If ListHostedZonesResponse$IsTruncated - // is true, make another request to ListHostedZones and include the value of - // the NextMarker element in the Marker element to get the next page of results. + // Indicates where to continue listing hosted zones. If IsTruncated is true, + // make another request to ListHostedZones and include the value of the NextMarker + // element in the Marker element to get the next page of results. NextMarker *string `type:"string"` } @@ -4922,9 +5965,9 @@ type ListResourceRecordSetsInput struct { MaxItems *string `location:"querystring" locationName:"maxitems" type:"string"` // Weighted resource record sets only: If results were truncated for a given - // DNS name and type, specify the value of ListResourceRecordSetsResponse$NextRecordIdentifier - // from the previous response to get the next resource record set that has the - // current DNS name and type. + // DNS name and type, specify the value of NextRecordIdentifier from the previous + // response to get the next resource record set that has the current DNS name + // and type. StartRecordIdentifier *string `location:"querystring" locationName:"identifier" min:"1" type:"string"` // The first name in the lexicographic ordering of domain names that you want @@ -4979,8 +6022,7 @@ type ListResourceRecordSetsOutput struct { // A flag that indicates whether there are more resource record sets to be listed. // If your results were truncated, you can make a follow-up request for the - // next page of results by using the ListResourceRecordSetsResponse$NextRecordName - // element. + // next page of results by using the NextRecordName element. // // Valid Values: true | false IsTruncated *bool `type:"boolean" required:"true"` @@ -4995,13 +6037,11 @@ type ListResourceRecordSetsOutput struct { NextRecordIdentifier *string `min:"1" type:"string"` // If the results were truncated, the name of the next record in the list. This - // element is present only if ListResourceRecordSetsResponse$IsTruncated is - // true. + // element is present only if IsTruncated is true. NextRecordName *string `type:"string"` // If the results were truncated, the type of the next record in the list. This - // element is present only if ListResourceRecordSetsResponse$IsTruncated is - // true. + // element is present only if IsTruncated is true. NextRecordType *string `type:"string" enum:"RRType"` // A complex type that contains information about the resource record sets that @@ -5074,16 +6114,16 @@ type ListReusableDelegationSetsOutput struct { // The maximum number of reusable delegation sets to be included in the response // body. If the number of reusable delegation sets associated with this AWS - // account exceeds MaxItems, the value of ListReusablDelegationSetsResponse$IsTruncated - // in the response is true. Call ListReusableDelegationSets again and specify - // the value of ListReusableDelegationSetsResponse$NextMarker in the ListReusableDelegationSetsRequest$Marker - // element to get the next page of results. + // account exceeds MaxItems, the value of IsTruncated in the response is true. + // To get the next page of results, call ListReusableDelegationSets again and + // specify the value of NextMarker from the previous response in the Marker + // element of the request. MaxItems *string `type:"string" required:"true"` - // Indicates where to continue listing reusable delegation sets. If ListReusableDelegationSetsResponse$IsTruncated + // Indicates where to continue listing reusable delegation sets. If IsTruncated // is true, make another request to ListReusableDelegationSets and include the - // value of the NextMarker element in the Marker element to get the next page - // of results. + // value of the NextMarker element in the Marker element of the previous response + // to get the next page of results. NextMarker *string `type:"string"` } @@ -5917,13 +6957,14 @@ type ResourceRecordSet struct { // The cache time to live for the current resource record set. Note the following: // - // If you're creating an alias resource record set, omit TTL. Amazon Route - // 53 uses the value of TTL for the alias target. If you're associating this - // resource record set with a health check (if you're adding a HealthCheckId - // element), we recommend that you specify a TTL of 60 seconds or less so clients - // respond quickly to changes in health status. All of the resource record sets - // in a group of weighted, latency, geolocation, or failover resource record - // sets must have the same value for TTL. If a group of weighted resource record + // If you're creating a non-alias resource record set, TTL is required. If + // you're creating an alias resource record set, omit TTL. Amazon Route 53 uses + // the value of TTL for the alias target. If you're associating this resource + // record set with a health check (if you're adding a HealthCheckId element), + // we recommend that you specify a TTL of 60 seconds or less so clients respond + // quickly to changes in health status. All of the resource record sets in a + // group of weighted, latency, geolocation, or failover resource record sets + // must have the same value for TTL. If a group of weighted resource record // sets includes one or more weighted alias resource record sets for which the // alias target is an ELB load balancer, we recommend that you specify a TTL // of 60 seconds for all of the non-alias weighted resource record sets that @@ -6737,6 +7778,8 @@ const ( ResourceRecordSetRegionSaEast1 = "sa-east-1" // @enum ResourceRecordSetRegion ResourceRecordSetRegionCnNorth1 = "cn-north-1" + // @enum ResourceRecordSetRegion + ResourceRecordSetRegionApSouth1 = "ap-south-1" ) const ( @@ -6775,6 +7818,8 @@ const ( // @enum VPCRegion VPCRegionApSoutheast2 = "ap-southeast-2" // @enum VPCRegion + VPCRegionApSouth1 = "ap-south-1" + // @enum VPCRegion VPCRegionApNortheast1 = "ap-northeast-1" // @enum VPCRegion VPCRegionApNortheast2 = "ap-northeast-2" diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/service.go b/vendor/github.com/aws/aws-sdk-go/service/route53/service.go index f4a82bde5..269c4db36 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/route53/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/restxml" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // Route53 is a client for Route 53. @@ -58,7 +58,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index f46df025e..5132954f3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -16,7 +16,28 @@ import ( const opAbortMultipartUpload = "AbortMultipartUpload" -// AbortMultipartUploadRequest generates a request for the AbortMultipartUpload operation. +// AbortMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the AbortMultipartUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AbortMultipartUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AbortMultipartUploadRequest method. +// req, resp := client.AbortMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { op := &request.Operation{ Name: opAbortMultipartUpload, @@ -47,7 +68,28 @@ func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMulti const opCompleteMultipartUpload = "CompleteMultipartUpload" -// CompleteMultipartUploadRequest generates a request for the CompleteMultipartUpload operation. +// CompleteMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the CompleteMultipartUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CompleteMultipartUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CompleteMultipartUploadRequest method. +// req, resp := client.CompleteMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) { op := &request.Operation{ Name: opCompleteMultipartUpload, @@ -74,7 +116,28 @@ func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*Comp const opCopyObject = "CopyObject" -// CopyObjectRequest generates a request for the CopyObject operation. +// CopyObjectRequest generates a "aws/request.Request" representing the +// client's request for the CopyObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CopyObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CopyObjectRequest method. +// req, resp := client.CopyObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) { op := &request.Operation{ Name: opCopyObject, @@ -101,7 +164,28 @@ func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { const opCreateBucket = "CreateBucket" -// CreateBucketRequest generates a request for the CreateBucket operation. +// CreateBucketRequest generates a "aws/request.Request" representing the +// client's request for the CreateBucket operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateBucket method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateBucketRequest method. +// req, resp := client.CreateBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) { op := &request.Operation{ Name: opCreateBucket, @@ -128,7 +212,28 @@ func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) const opCreateMultipartUpload = "CreateMultipartUpload" -// CreateMultipartUploadRequest generates a request for the CreateMultipartUpload operation. +// CreateMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the CreateMultipartUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateMultipartUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateMultipartUploadRequest method. +// req, resp := client.CreateMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) { op := &request.Operation{ Name: opCreateMultipartUpload, @@ -161,7 +266,28 @@ func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMu const opDeleteBucket = "DeleteBucket" -// DeleteBucketRequest generates a request for the DeleteBucket operation. +// DeleteBucketRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucket operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucket method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketRequest method. +// req, resp := client.DeleteBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) { op := &request.Operation{ Name: opDeleteBucket, @@ -191,7 +317,28 @@ func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) const opDeleteBucketCors = "DeleteBucketCors" -// DeleteBucketCorsRequest generates a request for the DeleteBucketCors operation. +// DeleteBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketCors operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketCors method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketCorsRequest method. +// req, resp := client.DeleteBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) { op := &request.Operation{ Name: opDeleteBucketCors, @@ -220,7 +367,28 @@ func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOu const opDeleteBucketLifecycle = "DeleteBucketLifecycle" -// DeleteBucketLifecycleRequest generates a request for the DeleteBucketLifecycle operation. +// DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketLifecycle operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketLifecycle method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketLifecycleRequest method. +// req, resp := client.DeleteBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) { op := &request.Operation{ Name: opDeleteBucketLifecycle, @@ -249,7 +417,28 @@ func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBu const opDeleteBucketPolicy = "DeleteBucketPolicy" -// DeleteBucketPolicyRequest generates a request for the DeleteBucketPolicy operation. +// DeleteBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketPolicyRequest method. +// req, resp := client.DeleteBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) { op := &request.Operation{ Name: opDeleteBucketPolicy, @@ -278,7 +467,28 @@ func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPo const opDeleteBucketReplication = "DeleteBucketReplication" -// DeleteBucketReplicationRequest generates a request for the DeleteBucketReplication operation. +// DeleteBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketReplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketReplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketReplicationRequest method. +// req, resp := client.DeleteBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) { op := &request.Operation{ Name: opDeleteBucketReplication, @@ -307,7 +517,28 @@ func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*Dele const opDeleteBucketTagging = "DeleteBucketTagging" -// DeleteBucketTaggingRequest generates a request for the DeleteBucketTagging operation. +// DeleteBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketTagging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketTagging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketTaggingRequest method. +// req, resp := client.DeleteBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) { op := &request.Operation{ Name: opDeleteBucketTagging, @@ -336,7 +567,28 @@ func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucket const opDeleteBucketWebsite = "DeleteBucketWebsite" -// DeleteBucketWebsiteRequest generates a request for the DeleteBucketWebsite operation. +// DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketWebsite operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketWebsite method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketWebsiteRequest method. +// req, resp := client.DeleteBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) { op := &request.Operation{ Name: opDeleteBucketWebsite, @@ -365,7 +617,28 @@ func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucket const opDeleteObject = "DeleteObject" -// DeleteObjectRequest generates a request for the DeleteObject operation. +// DeleteObjectRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteObjectRequest method. +// req, resp := client.DeleteObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) { op := &request.Operation{ Name: opDeleteObject, @@ -394,7 +667,28 @@ func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) const opDeleteObjects = "DeleteObjects" -// DeleteObjectsRequest generates a request for the DeleteObjects operation. +// DeleteObjectsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObjects operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteObjects method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteObjectsRequest method. +// req, resp := client.DeleteObjectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) { op := &request.Operation{ Name: opDeleteObjects, @@ -422,7 +716,28 @@ func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, err const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration" -// GetBucketAccelerateConfigurationRequest generates a request for the GetBucketAccelerateConfiguration operation. +// GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAccelerateConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketAccelerateConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketAccelerateConfigurationRequest method. +// req, resp := client.GetBucketAccelerateConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateConfigurationInput) (req *request.Request, output *GetBucketAccelerateConfigurationOutput) { op := &request.Operation{ Name: opGetBucketAccelerateConfiguration, @@ -449,7 +764,28 @@ func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigur const opGetBucketAcl = "GetBucketAcl" -// GetBucketAclRequest generates a request for the GetBucketAcl operation. +// GetBucketAclRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAcl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketAcl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketAclRequest method. +// req, resp := client.GetBucketAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) { op := &request.Operation{ Name: opGetBucketAcl, @@ -476,7 +812,28 @@ func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) const opGetBucketCors = "GetBucketCors" -// GetBucketCorsRequest generates a request for the GetBucketCors operation. +// GetBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketCors operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketCors method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketCorsRequest method. +// req, resp := client.GetBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) { op := &request.Operation{ Name: opGetBucketCors, @@ -503,7 +860,28 @@ func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, err const opGetBucketLifecycle = "GetBucketLifecycle" -// GetBucketLifecycleRequest generates a request for the GetBucketLifecycle operation. +// GetBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLifecycle operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketLifecycle method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketLifecycleRequest method. +// req, resp := client.GetBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) { if c.Client.Config.Logger != nil { c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated") @@ -533,7 +911,28 @@ func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifec const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" -// GetBucketLifecycleConfigurationRequest generates a request for the GetBucketLifecycleConfiguration operation. +// GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLifecycleConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketLifecycleConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketLifecycleConfigurationRequest method. +// req, resp := client.GetBucketLifecycleConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) { op := &request.Operation{ Name: opGetBucketLifecycleConfiguration, @@ -560,7 +959,28 @@ func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurat const opGetBucketLocation = "GetBucketLocation" -// GetBucketLocationRequest generates a request for the GetBucketLocation operation. +// GetBucketLocationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLocation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketLocation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketLocationRequest method. +// req, resp := client.GetBucketLocationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) { op := &request.Operation{ Name: opGetBucketLocation, @@ -587,7 +1007,28 @@ func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocatio const opGetBucketLogging = "GetBucketLogging" -// GetBucketLoggingRequest generates a request for the GetBucketLogging operation. +// GetBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLogging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketLogging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketLoggingRequest method. +// req, resp := client.GetBucketLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) { op := &request.Operation{ Name: opGetBucketLogging, @@ -615,7 +1056,28 @@ func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOu const opGetBucketNotification = "GetBucketNotification" -// GetBucketNotificationRequest generates a request for the GetBucketNotification operation. +// GetBucketNotificationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketNotification operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketNotification method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketNotificationRequest method. +// req, resp := client.GetBucketNotificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) { if c.Client.Config.Logger != nil { c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated") @@ -645,7 +1107,28 @@ func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequ const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration" -// GetBucketNotificationConfigurationRequest generates a request for the GetBucketNotificationConfiguration operation. +// GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketNotificationConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketNotificationConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketNotificationConfigurationRequest method. +// req, resp := client.GetBucketNotificationConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) { op := &request.Operation{ Name: opGetBucketNotificationConfiguration, @@ -672,7 +1155,28 @@ func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConf const opGetBucketPolicy = "GetBucketPolicy" -// GetBucketPolicyRequest generates a request for the GetBucketPolicy operation. +// GetBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketPolicyRequest method. +// req, resp := client.GetBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) { op := &request.Operation{ Name: opGetBucketPolicy, @@ -699,7 +1203,28 @@ func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutpu const opGetBucketReplication = "GetBucketReplication" -// GetBucketReplicationRequest generates a request for the GetBucketReplication operation. +// GetBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketReplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketReplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketReplicationRequest method. +// req, resp := client.GetBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) { op := &request.Operation{ Name: opGetBucketReplication, @@ -726,7 +1251,28 @@ func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketR const opGetBucketRequestPayment = "GetBucketRequestPayment" -// GetBucketRequestPaymentRequest generates a request for the GetBucketRequestPayment operation. +// GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketRequestPayment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketRequestPayment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketRequestPaymentRequest method. +// req, resp := client.GetBucketRequestPaymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) { op := &request.Operation{ Name: opGetBucketRequestPayment, @@ -753,7 +1299,28 @@ func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetB const opGetBucketTagging = "GetBucketTagging" -// GetBucketTaggingRequest generates a request for the GetBucketTagging operation. +// GetBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketTagging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketTagging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketTaggingRequest method. +// req, resp := client.GetBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) { op := &request.Operation{ Name: opGetBucketTagging, @@ -780,7 +1347,28 @@ func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOu const opGetBucketVersioning = "GetBucketVersioning" -// GetBucketVersioningRequest generates a request for the GetBucketVersioning operation. +// GetBucketVersioningRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketVersioning operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketVersioning method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketVersioningRequest method. +// req, resp := client.GetBucketVersioningRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) { op := &request.Operation{ Name: opGetBucketVersioning, @@ -807,7 +1395,28 @@ func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVer const opGetBucketWebsite = "GetBucketWebsite" -// GetBucketWebsiteRequest generates a request for the GetBucketWebsite operation. +// GetBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketWebsite operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketWebsite method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketWebsiteRequest method. +// req, resp := client.GetBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) { op := &request.Operation{ Name: opGetBucketWebsite, @@ -834,7 +1443,28 @@ func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOu const opGetObject = "GetObject" -// GetObjectRequest generates a request for the GetObject operation. +// GetObjectRequest generates a "aws/request.Request" representing the +// client's request for the GetObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetObjectRequest method. +// req, resp := client.GetObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) { op := &request.Operation{ Name: opGetObject, @@ -861,7 +1491,28 @@ func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { const opGetObjectAcl = "GetObjectAcl" -// GetObjectAclRequest generates a request for the GetObjectAcl operation. +// GetObjectAclRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectAcl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetObjectAcl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetObjectAclRequest method. +// req, resp := client.GetObjectAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) { op := &request.Operation{ Name: opGetObjectAcl, @@ -888,7 +1539,28 @@ func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) const opGetObjectTorrent = "GetObjectTorrent" -// GetObjectTorrentRequest generates a request for the GetObjectTorrent operation. +// GetObjectTorrentRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectTorrent operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetObjectTorrent method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetObjectTorrentRequest method. +// req, resp := client.GetObjectTorrentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { op := &request.Operation{ Name: opGetObjectTorrent, @@ -915,7 +1587,28 @@ func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOu const opHeadBucket = "HeadBucket" -// HeadBucketRequest generates a request for the HeadBucket operation. +// HeadBucketRequest generates a "aws/request.Request" representing the +// client's request for the HeadBucket operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the HeadBucket method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the HeadBucketRequest method. +// req, resp := client.HeadBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { op := &request.Operation{ Name: opHeadBucket, @@ -945,7 +1638,28 @@ func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { const opHeadObject = "HeadObject" -// HeadObjectRequest generates a request for the HeadObject operation. +// HeadObjectRequest generates a "aws/request.Request" representing the +// client's request for the HeadObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the HeadObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the HeadObjectRequest method. +// req, resp := client.HeadObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { op := &request.Operation{ Name: opHeadObject, @@ -974,7 +1688,28 @@ func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { const opListBuckets = "ListBuckets" -// ListBucketsRequest generates a request for the ListBuckets operation. +// ListBucketsRequest generates a "aws/request.Request" representing the +// client's request for the ListBuckets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListBuckets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListBucketsRequest method. +// req, resp := client.ListBucketsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) { op := &request.Operation{ Name: opListBuckets, @@ -1001,7 +1736,28 @@ func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { const opListMultipartUploads = "ListMultipartUploads" -// ListMultipartUploadsRequest generates a request for the ListMultipartUploads operation. +// ListMultipartUploadsRequest generates a "aws/request.Request" representing the +// client's request for the ListMultipartUploads operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListMultipartUploads method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListMultipartUploadsRequest method. +// req, resp := client.ListMultipartUploadsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { op := &request.Operation{ Name: opListMultipartUploads, @@ -1032,6 +1788,23 @@ func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultip return out, err } +// ListMultipartUploadsPages iterates over the pages of a ListMultipartUploads operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListMultipartUploads method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListMultipartUploads operation. +// pageNum := 0 +// err := client.ListMultipartUploadsPages(params, +// func(page *ListMultipartUploadsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(p *ListMultipartUploadsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListMultipartUploadsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1042,7 +1815,28 @@ func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func const opListObjectVersions = "ListObjectVersions" -// ListObjectVersionsRequest generates a request for the ListObjectVersions operation. +// ListObjectVersionsRequest generates a "aws/request.Request" representing the +// client's request for the ListObjectVersions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListObjectVersions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListObjectVersionsRequest method. +// req, resp := client.ListObjectVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) { op := &request.Operation{ Name: opListObjectVersions, @@ -1073,6 +1867,23 @@ func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVers return out, err } +// ListObjectVersionsPages iterates over the pages of a ListObjectVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjectVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjectVersions operation. +// pageNum := 0 +// err := client.ListObjectVersionsPages(params, +// func(page *ListObjectVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(p *ListObjectVersionsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListObjectVersionsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1083,7 +1894,28 @@ func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(p * const opListObjects = "ListObjects" -// ListObjectsRequest generates a request for the ListObjects operation. +// ListObjectsRequest generates a "aws/request.Request" representing the +// client's request for the ListObjects operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListObjects method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListObjectsRequest method. +// req, resp := client.ListObjectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) { op := &request.Operation{ Name: opListObjects, @@ -1116,6 +1948,23 @@ func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { return out, err } +// ListObjectsPages iterates over the pages of a ListObjects operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjects method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjects operation. +// pageNum := 0 +// err := client.ListObjectsPages(params, +// func(page *ListObjectsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(p *ListObjectsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListObjectsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1124,9 +1973,112 @@ func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(p *ListObjectsOut }) } +const opListObjectsV2 = "ListObjectsV2" + +// ListObjectsV2Request generates a "aws/request.Request" representing the +// client's request for the ListObjectsV2 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListObjectsV2 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListObjectsV2Request method. +// req, resp := client.ListObjectsV2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) { + op := &request.Operation{ + Name: opListObjectsV2, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?list-type=2", + Paginator: &request.Paginator{ + InputTokens: []string{"ContinuationToken"}, + OutputTokens: []string{"NextContinuationToken"}, + LimitToken: "MaxKeys", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListObjectsV2Input{} + } + + req = c.newRequest(op, input, output) + output = &ListObjectsV2Output{} + req.Data = output + return +} + +// Returns some or all (up to 1000) of the objects in a bucket. You can use +// the request parameters as selection criteria to return a subset of the objects +// in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend +// you use this revised API for new application development. +func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) { + req, out := c.ListObjectsV2Request(input) + err := req.Send() + return out, err +} + +// ListObjectsV2Pages iterates over the pages of a ListObjectsV2 operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjectsV2 method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjectsV2 operation. +// pageNum := 0 +// err := client.ListObjectsV2Pages(params, +// func(page *ListObjectsV2Output, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(p *ListObjectsV2Output, lastPage bool) (shouldContinue bool)) error { + page, _ := c.ListObjectsV2Request(input) + page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) + return page.EachPage(func(p interface{}, lastPage bool) bool { + return fn(p.(*ListObjectsV2Output), lastPage) + }) +} + const opListParts = "ListParts" -// ListPartsRequest generates a request for the ListParts operation. +// ListPartsRequest generates a "aws/request.Request" representing the +// client's request for the ListParts operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListParts method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListPartsRequest method. +// req, resp := client.ListPartsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { op := &request.Operation{ Name: opListParts, @@ -1157,6 +2109,23 @@ func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { return out, err } +// ListPartsPages iterates over the pages of a ListParts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListParts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListParts operation. +// pageNum := 0 +// err := client.ListPartsPages(params, +// func(page *ListPartsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *S3) ListPartsPages(input *ListPartsInput, fn func(p *ListPartsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListPartsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -1167,7 +2136,28 @@ func (c *S3) ListPartsPages(input *ListPartsInput, fn func(p *ListPartsOutput, l const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" -// PutBucketAccelerateConfigurationRequest generates a request for the PutBucketAccelerateConfiguration operation. +// PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAccelerateConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketAccelerateConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketAccelerateConfigurationRequest method. +// req, resp := client.PutBucketAccelerateConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) { op := &request.Operation{ Name: opPutBucketAccelerateConfiguration, @@ -1196,7 +2186,28 @@ func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigur const opPutBucketAcl = "PutBucketAcl" -// PutBucketAclRequest generates a request for the PutBucketAcl operation. +// PutBucketAclRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAcl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketAcl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketAclRequest method. +// req, resp := client.PutBucketAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) { op := &request.Operation{ Name: opPutBucketAcl, @@ -1225,7 +2236,28 @@ func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) const opPutBucketCors = "PutBucketCors" -// PutBucketCorsRequest generates a request for the PutBucketCors operation. +// PutBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketCors operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketCors method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketCorsRequest method. +// req, resp := client.PutBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) { op := &request.Operation{ Name: opPutBucketCors, @@ -1254,7 +2286,28 @@ func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, err const opPutBucketLifecycle = "PutBucketLifecycle" -// PutBucketLifecycleRequest generates a request for the PutBucketLifecycle operation. +// PutBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycle operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketLifecycle method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketLifecycleRequest method. +// req, resp := client.PutBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) { if c.Client.Config.Logger != nil { c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated") @@ -1286,7 +2339,28 @@ func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifec const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" -// PutBucketLifecycleConfigurationRequest generates a request for the PutBucketLifecycleConfiguration operation. +// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycleConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketLifecycleConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketLifecycleConfigurationRequest method. +// req, resp := client.PutBucketLifecycleConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { op := &request.Operation{ Name: opPutBucketLifecycleConfiguration, @@ -1316,7 +2390,28 @@ func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurat const opPutBucketLogging = "PutBucketLogging" -// PutBucketLoggingRequest generates a request for the PutBucketLogging operation. +// PutBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLogging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketLogging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketLoggingRequest method. +// req, resp := client.PutBucketLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { op := &request.Operation{ Name: opPutBucketLogging, @@ -1347,7 +2442,28 @@ func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOu const opPutBucketNotification = "PutBucketNotification" -// PutBucketNotificationRequest generates a request for the PutBucketNotification operation. +// PutBucketNotificationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketNotification operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketNotification method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketNotificationRequest method. +// req, resp := client.PutBucketNotificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) { if c.Client.Config.Logger != nil { c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated") @@ -1379,7 +2495,28 @@ func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucke const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration" -// PutBucketNotificationConfigurationRequest generates a request for the PutBucketNotificationConfiguration operation. +// PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketNotificationConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketNotificationConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketNotificationConfigurationRequest method. +// req, resp := client.PutBucketNotificationConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) { op := &request.Operation{ Name: opPutBucketNotificationConfiguration, @@ -1408,7 +2545,28 @@ func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConf const opPutBucketPolicy = "PutBucketPolicy" -// PutBucketPolicyRequest generates a request for the PutBucketPolicy operation. +// PutBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketPolicyRequest method. +// req, resp := client.PutBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { op := &request.Operation{ Name: opPutBucketPolicy, @@ -1438,7 +2596,28 @@ func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutpu const opPutBucketReplication = "PutBucketReplication" -// PutBucketReplicationRequest generates a request for the PutBucketReplication operation. +// PutBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketReplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketReplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketReplicationRequest method. +// req, resp := client.PutBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) { op := &request.Operation{ Name: opPutBucketReplication, @@ -1468,7 +2647,28 @@ func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketR const opPutBucketRequestPayment = "PutBucketRequestPayment" -// PutBucketRequestPaymentRequest generates a request for the PutBucketRequestPayment operation. +// PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketRequestPayment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketRequestPayment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketRequestPaymentRequest method. +// req, resp := client.PutBucketRequestPaymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) { op := &request.Operation{ Name: opPutBucketRequestPayment, @@ -1501,7 +2701,28 @@ func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutB const opPutBucketTagging = "PutBucketTagging" -// PutBucketTaggingRequest generates a request for the PutBucketTagging operation. +// PutBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketTagging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketTagging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketTaggingRequest method. +// req, resp := client.PutBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) { op := &request.Operation{ Name: opPutBucketTagging, @@ -1530,7 +2751,28 @@ func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOu const opPutBucketVersioning = "PutBucketVersioning" -// PutBucketVersioningRequest generates a request for the PutBucketVersioning operation. +// PutBucketVersioningRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketVersioning operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketVersioning method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketVersioningRequest method. +// req, resp := client.PutBucketVersioningRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) { op := &request.Operation{ Name: opPutBucketVersioning, @@ -1560,7 +2802,28 @@ func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVer const opPutBucketWebsite = "PutBucketWebsite" -// PutBucketWebsiteRequest generates a request for the PutBucketWebsite operation. +// PutBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketWebsite operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketWebsite method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketWebsiteRequest method. +// req, resp := client.PutBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) { op := &request.Operation{ Name: opPutBucketWebsite, @@ -1589,7 +2852,28 @@ func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOu const opPutObject = "PutObject" -// PutObjectRequest generates a request for the PutObject operation. +// PutObjectRequest generates a "aws/request.Request" representing the +// client's request for the PutObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutObjectRequest method. +// req, resp := client.PutObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { op := &request.Operation{ Name: opPutObject, @@ -1616,7 +2900,28 @@ func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { const opPutObjectAcl = "PutObjectAcl" -// PutObjectAclRequest generates a request for the PutObjectAcl operation. +// PutObjectAclRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectAcl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutObjectAcl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutObjectAclRequest method. +// req, resp := client.PutObjectAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { op := &request.Operation{ Name: opPutObjectAcl, @@ -1644,7 +2949,28 @@ func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) const opRestoreObject = "RestoreObject" -// RestoreObjectRequest generates a request for the RestoreObject operation. +// RestoreObjectRequest generates a "aws/request.Request" representing the +// client's request for the RestoreObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RestoreObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RestoreObjectRequest method. +// req, resp := client.RestoreObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { op := &request.Operation{ Name: opRestoreObject, @@ -1671,7 +2997,28 @@ func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, err const opUploadPart = "UploadPart" -// UploadPartRequest generates a request for the UploadPart operation. +// UploadPartRequest generates a "aws/request.Request" representing the +// client's request for the UploadPart operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UploadPart method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UploadPartRequest method. +// req, resp := client.UploadPartRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { op := &request.Operation{ Name: opUploadPart, @@ -1704,7 +3051,28 @@ func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { const opUploadPartCopy = "UploadPartCopy" -// UploadPartCopyRequest generates a request for the UploadPartCopy operation. +// UploadPartCopyRequest generates a "aws/request.Request" representing the +// client's request for the UploadPartCopy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UploadPartCopy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UploadPartCopyRequest method. +// req, resp := client.UploadPartCopyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { op := &request.Operation{ Name: opUploadPartCopy, @@ -4218,7 +5586,7 @@ type GetObjectOutput struct { ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` // Size of the body in bytes. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` // The portion of the object returned in the response. ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` @@ -4587,7 +5955,7 @@ type HeadObjectOutput struct { ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` // Size of the body in bytes. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` // A standard MIME type describing the format of the object data. ContentType *string `location:"header" locationName:"Content-Type" type:"string"` @@ -5253,6 +6621,124 @@ func (s ListObjectsOutput) GoString() string { return s.String() } +type ListObjectsV2Input struct { + _ struct{} `type:"structure"` + + // Name of the bucket to list. + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // ContinuationToken indicates Amazon S3 that the list is being continued on + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // The owner field is not present in listV2 by default, if you want to return + // owner field with each key in the result then set the fetch owner field to + // true + FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. StartAfter can be any key in the bucket + StartAfter *string `location:"querystring" locationName:"start-after" type:"string"` +} + +// String returns the string representation +func (s ListObjectsV2Input) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsV2Input) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectsV2Input) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectsV2Input"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListObjectsV2Output struct { + _ struct{} `type:"structure"` + + // CommonPrefixes contains all (if there are any) keys between Prefix and the + // next occurrence of the string specified by delimiter + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Metadata about each object returned. + Contents []*Object `type:"list" flattened:"true"` + + // ContinuationToken indicates Amazon S3 that the list is being continued on + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key + ContinuationToken *string `type:"string"` + + // A delimiter is a character you use to group keys. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. + IsTruncated *bool `type:"boolean"` + + // KeyCount is the number of keys returned with this request. KeyCount will + // always be less than equals to MaxKeys field. Say you ask for 50 keys, your + // result will include less than equals 50 keys + KeyCount *int64 `type:"integer"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `type:"integer"` + + // Name of the bucket to list. + Name *string `type:"string"` + + // NextContinuationToken is sent when isTruncated is true which means there + // are more keys in the bucket that can be listed. The next list requests to + // Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken + // is obfuscated and is not a real key + NextContinuationToken *string `type:"string"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `type:"string"` + + // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. StartAfter can be any key in the bucket + StartAfter *string `type:"string"` +} + +// String returns the string representation +func (s ListObjectsV2Output) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsV2Output) GoString() string { + return s.String() +} + type ListPartsInput struct { _ struct{} `type:"structure"` @@ -5457,8 +6943,8 @@ type NoncurrentVersionExpiration struct { // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. For information about the noncurrent days // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (/AmazonS3/latest/dev/s3-access-control.html) in the Amazon Simple Storage - // Service Developer Guide. + // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in + // the Amazon Simple Storage Service Developer Guide. NoncurrentDays *int64 `type:"integer"` } @@ -5483,8 +6969,8 @@ type NoncurrentVersionTransition struct { // Specifies the number of days an object is noncurrent before Amazon S3 can // perform the associated action. For information about the noncurrent days // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (/AmazonS3/latest/dev/s3-access-control.html) in the Amazon Simple Storage - // Service Developer Guide. + // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) in + // the Amazon Simple Storage Service Developer Guide. NoncurrentDays *int64 `type:"integer"` // The class of storage used to store the object. @@ -6609,7 +8095,7 @@ type PutObjectInput struct { // Size of the body in bytes. This parameter is useful when the size of the // body cannot be determined automatically. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` // A standard MIME type describing the format of the object data. ContentType *string `location:"header" locationName:"Content-Type" type:"string"` @@ -7601,7 +9087,7 @@ type UploadPartInput struct { // Size of the body in bytes. This parameter is useful when the size of the // body cannot be determined automatically. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` // Object key for which the multipart upload was initiated. Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` @@ -7817,6 +9303,8 @@ const ( // @enum BucketLocationConstraint BucketLocationConstraintUsWest2 = "us-west-2" // @enum BucketLocationConstraint + BucketLocationConstraintApSouth1 = "ap-south-1" + // @enum BucketLocationConstraint BucketLocationConstraintApSoutheast1 = "ap-southeast-1" // @enum BucketLocationConstraint BucketLocationConstraintApSoutheast2 = "ap-southeast-2" diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go index cf01da535..5833952a2 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/restxml" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // S3 is a client for Amazon S3. @@ -58,7 +58,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ses/api.go b/vendor/github.com/aws/aws-sdk-go/service/ses/api.go index 814640917..bf5b3fb76 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ses/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ses/api.go @@ -4,6 +4,7 @@ package ses import ( + "fmt" "time" "github.com/aws/aws-sdk-go/aws/awsutil" @@ -14,7 +15,28 @@ import ( const opCloneReceiptRuleSet = "CloneReceiptRuleSet" -// CloneReceiptRuleSetRequest generates a request for the CloneReceiptRuleSet operation. +// CloneReceiptRuleSetRequest generates a "aws/request.Request" representing the +// client's request for the CloneReceiptRuleSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CloneReceiptRuleSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CloneReceiptRuleSetRequest method. +// req, resp := client.CloneReceiptRuleSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) CloneReceiptRuleSetRequest(input *CloneReceiptRuleSetInput) (req *request.Request, output *CloneReceiptRuleSetOutput) { op := &request.Operation{ Name: opCloneReceiptRuleSet, @@ -48,7 +70,28 @@ func (c *SES) CloneReceiptRuleSet(input *CloneReceiptRuleSetInput) (*CloneReceip const opCreateReceiptFilter = "CreateReceiptFilter" -// CreateReceiptFilterRequest generates a request for the CreateReceiptFilter operation. +// CreateReceiptFilterRequest generates a "aws/request.Request" representing the +// client's request for the CreateReceiptFilter operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateReceiptFilter method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateReceiptFilterRequest method. +// req, resp := client.CreateReceiptFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) CreateReceiptFilterRequest(input *CreateReceiptFilterInput) (req *request.Request, output *CreateReceiptFilterOutput) { op := &request.Operation{ Name: opCreateReceiptFilter, @@ -80,7 +123,28 @@ func (c *SES) CreateReceiptFilter(input *CreateReceiptFilterInput) (*CreateRecei const opCreateReceiptRule = "CreateReceiptRule" -// CreateReceiptRuleRequest generates a request for the CreateReceiptRule operation. +// CreateReceiptRuleRequest generates a "aws/request.Request" representing the +// client's request for the CreateReceiptRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateReceiptRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateReceiptRuleRequest method. +// req, resp := client.CreateReceiptRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) CreateReceiptRuleRequest(input *CreateReceiptRuleInput) (req *request.Request, output *CreateReceiptRuleOutput) { op := &request.Operation{ Name: opCreateReceiptRule, @@ -112,7 +176,28 @@ func (c *SES) CreateReceiptRule(input *CreateReceiptRuleInput) (*CreateReceiptRu const opCreateReceiptRuleSet = "CreateReceiptRuleSet" -// CreateReceiptRuleSetRequest generates a request for the CreateReceiptRuleSet operation. +// CreateReceiptRuleSetRequest generates a "aws/request.Request" representing the +// client's request for the CreateReceiptRuleSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateReceiptRuleSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateReceiptRuleSetRequest method. +// req, resp := client.CreateReceiptRuleSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) CreateReceiptRuleSetRequest(input *CreateReceiptRuleSetInput) (req *request.Request, output *CreateReceiptRuleSetOutput) { op := &request.Operation{ Name: opCreateReceiptRuleSet, @@ -144,7 +229,28 @@ func (c *SES) CreateReceiptRuleSet(input *CreateReceiptRuleSetInput) (*CreateRec const opDeleteIdentity = "DeleteIdentity" -// DeleteIdentityRequest generates a request for the DeleteIdentity operation. +// DeleteIdentityRequest generates a "aws/request.Request" representing the +// client's request for the DeleteIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteIdentityRequest method. +// req, resp := client.DeleteIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) DeleteIdentityRequest(input *DeleteIdentityInput) (req *request.Request, output *DeleteIdentityOutput) { op := &request.Operation{ Name: opDeleteIdentity, @@ -162,8 +268,8 @@ func (c *SES) DeleteIdentityRequest(input *DeleteIdentityInput) (req *request.Re return } -// Deletes the specified identity (email address or domain) from the list of -// verified identities. +// Deletes the specified identity (an email address or a domain) from the list +// of verified identities. // // This action is throttled at one request per second. func (c *SES) DeleteIdentity(input *DeleteIdentityInput) (*DeleteIdentityOutput, error) { @@ -174,7 +280,28 @@ func (c *SES) DeleteIdentity(input *DeleteIdentityInput) (*DeleteIdentityOutput, const opDeleteIdentityPolicy = "DeleteIdentityPolicy" -// DeleteIdentityPolicyRequest generates a request for the DeleteIdentityPolicy operation. +// DeleteIdentityPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteIdentityPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteIdentityPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteIdentityPolicyRequest method. +// req, resp := client.DeleteIdentityPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) DeleteIdentityPolicyRequest(input *DeleteIdentityPolicyInput) (req *request.Request, output *DeleteIdentityPolicyOutput) { op := &request.Operation{ Name: opDeleteIdentityPolicy, @@ -193,13 +320,15 @@ func (c *SES) DeleteIdentityPolicyRequest(input *DeleteIdentityPolicyInput) (req } // Deletes the specified sending authorization policy for the given identity -// (email address or domain). This API returns successfully even if a policy +// (an email address or a domain). This API returns successfully even if a policy // with the specified name does not exist. // -// This API is for the identity owner only. If you have not verified the identity, -// this API will return an error. Sending authorization is a feature that enables -// an identity owner to authorize other senders to use its identities. For information -// about using sending authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +// This API is for the identity owner only. If you have not verified the identity, +// this API will return an error. +// +// Sending authorization is a feature that enables an identity owner to authorize +// other senders to use its identities. For information about using sending +// authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). // // This action is throttled at one request per second. func (c *SES) DeleteIdentityPolicy(input *DeleteIdentityPolicyInput) (*DeleteIdentityPolicyOutput, error) { @@ -210,7 +339,28 @@ func (c *SES) DeleteIdentityPolicy(input *DeleteIdentityPolicyInput) (*DeleteIde const opDeleteReceiptFilter = "DeleteReceiptFilter" -// DeleteReceiptFilterRequest generates a request for the DeleteReceiptFilter operation. +// DeleteReceiptFilterRequest generates a "aws/request.Request" representing the +// client's request for the DeleteReceiptFilter operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteReceiptFilter method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteReceiptFilterRequest method. +// req, resp := client.DeleteReceiptFilterRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) DeleteReceiptFilterRequest(input *DeleteReceiptFilterInput) (req *request.Request, output *DeleteReceiptFilterOutput) { op := &request.Operation{ Name: opDeleteReceiptFilter, @@ -242,7 +392,28 @@ func (c *SES) DeleteReceiptFilter(input *DeleteReceiptFilterInput) (*DeleteRecei const opDeleteReceiptRule = "DeleteReceiptRule" -// DeleteReceiptRuleRequest generates a request for the DeleteReceiptRule operation. +// DeleteReceiptRuleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteReceiptRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteReceiptRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteReceiptRuleRequest method. +// req, resp := client.DeleteReceiptRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) DeleteReceiptRuleRequest(input *DeleteReceiptRuleInput) (req *request.Request, output *DeleteReceiptRuleOutput) { op := &request.Operation{ Name: opDeleteReceiptRule, @@ -274,7 +445,28 @@ func (c *SES) DeleteReceiptRule(input *DeleteReceiptRuleInput) (*DeleteReceiptRu const opDeleteReceiptRuleSet = "DeleteReceiptRuleSet" -// DeleteReceiptRuleSetRequest generates a request for the DeleteReceiptRuleSet operation. +// DeleteReceiptRuleSetRequest generates a "aws/request.Request" representing the +// client's request for the DeleteReceiptRuleSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteReceiptRuleSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteReceiptRuleSetRequest method. +// req, resp := client.DeleteReceiptRuleSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) DeleteReceiptRuleSetRequest(input *DeleteReceiptRuleSetInput) (req *request.Request, output *DeleteReceiptRuleSetOutput) { op := &request.Operation{ Name: opDeleteReceiptRuleSet, @@ -294,8 +486,10 @@ func (c *SES) DeleteReceiptRuleSetRequest(input *DeleteReceiptRuleSetInput) (req // Deletes the specified receipt rule set and all of the receipt rules it contains. // -// The currently active rule set cannot be deleted. For information about managing -// receipt rule sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). +// The currently active rule set cannot be deleted. +// +// For information about managing receipt rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). // // This action is throttled at one request per second. func (c *SES) DeleteReceiptRuleSet(input *DeleteReceiptRuleSetInput) (*DeleteReceiptRuleSetOutput, error) { @@ -306,7 +500,28 @@ func (c *SES) DeleteReceiptRuleSet(input *DeleteReceiptRuleSetInput) (*DeleteRec const opDeleteVerifiedEmailAddress = "DeleteVerifiedEmailAddress" -// DeleteVerifiedEmailAddressRequest generates a request for the DeleteVerifiedEmailAddress operation. +// DeleteVerifiedEmailAddressRequest generates a "aws/request.Request" representing the +// client's request for the DeleteVerifiedEmailAddress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteVerifiedEmailAddress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteVerifiedEmailAddressRequest method. +// req, resp := client.DeleteVerifiedEmailAddressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) DeleteVerifiedEmailAddressRequest(input *DeleteVerifiedEmailAddressInput) (req *request.Request, output *DeleteVerifiedEmailAddressOutput) { op := &request.Operation{ Name: opDeleteVerifiedEmailAddress, @@ -328,9 +543,10 @@ func (c *SES) DeleteVerifiedEmailAddressRequest(input *DeleteVerifiedEmailAddres // Deletes the specified email address from the list of verified addresses. // -// The DeleteVerifiedEmailAddress action is deprecated as of the May 15, 2012 +// The DeleteVerifiedEmailAddress action is deprecated as of the May 15, 2012 // release of Domain Verification. The DeleteIdentity action is now preferred. -// This action is throttled at one request per second. +// +// This action is throttled at one request per second. func (c *SES) DeleteVerifiedEmailAddress(input *DeleteVerifiedEmailAddressInput) (*DeleteVerifiedEmailAddressOutput, error) { req, out := c.DeleteVerifiedEmailAddressRequest(input) err := req.Send() @@ -339,7 +555,28 @@ func (c *SES) DeleteVerifiedEmailAddress(input *DeleteVerifiedEmailAddressInput) const opDescribeActiveReceiptRuleSet = "DescribeActiveReceiptRuleSet" -// DescribeActiveReceiptRuleSetRequest generates a request for the DescribeActiveReceiptRuleSet operation. +// DescribeActiveReceiptRuleSetRequest generates a "aws/request.Request" representing the +// client's request for the DescribeActiveReceiptRuleSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeActiveReceiptRuleSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeActiveReceiptRuleSetRequest method. +// req, resp := client.DescribeActiveReceiptRuleSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) DescribeActiveReceiptRuleSetRequest(input *DescribeActiveReceiptRuleSetInput) (req *request.Request, output *DescribeActiveReceiptRuleSetOutput) { op := &request.Operation{ Name: opDescribeActiveReceiptRuleSet, @@ -372,7 +609,28 @@ func (c *SES) DescribeActiveReceiptRuleSet(input *DescribeActiveReceiptRuleSetIn const opDescribeReceiptRule = "DescribeReceiptRule" -// DescribeReceiptRuleRequest generates a request for the DescribeReceiptRule operation. +// DescribeReceiptRuleRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReceiptRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeReceiptRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeReceiptRuleRequest method. +// req, resp := client.DescribeReceiptRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) DescribeReceiptRuleRequest(input *DescribeReceiptRuleInput) (req *request.Request, output *DescribeReceiptRuleOutput) { op := &request.Operation{ Name: opDescribeReceiptRule, @@ -404,7 +662,28 @@ func (c *SES) DescribeReceiptRule(input *DescribeReceiptRuleInput) (*DescribeRec const opDescribeReceiptRuleSet = "DescribeReceiptRuleSet" -// DescribeReceiptRuleSetRequest generates a request for the DescribeReceiptRuleSet operation. +// DescribeReceiptRuleSetRequest generates a "aws/request.Request" representing the +// client's request for the DescribeReceiptRuleSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeReceiptRuleSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeReceiptRuleSetRequest method. +// req, resp := client.DescribeReceiptRuleSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) DescribeReceiptRuleSetRequest(input *DescribeReceiptRuleSetInput) (req *request.Request, output *DescribeReceiptRuleSetOutput) { op := &request.Operation{ Name: opDescribeReceiptRuleSet, @@ -436,7 +715,28 @@ func (c *SES) DescribeReceiptRuleSet(input *DescribeReceiptRuleSetInput) (*Descr const opGetIdentityDkimAttributes = "GetIdentityDkimAttributes" -// GetIdentityDkimAttributesRequest generates a request for the GetIdentityDkimAttributes operation. +// GetIdentityDkimAttributesRequest generates a "aws/request.Request" representing the +// client's request for the GetIdentityDkimAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetIdentityDkimAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetIdentityDkimAttributesRequest method. +// req, resp := client.GetIdentityDkimAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) GetIdentityDkimAttributesRequest(input *GetIdentityDkimAttributesInput) (req *request.Request, output *GetIdentityDkimAttributesOutput) { op := &request.Operation{ Name: opGetIdentityDkimAttributes, @@ -462,13 +762,17 @@ func (c *SES) GetIdentityDkimAttributesRequest(input *GetIdentityDkimAttributesI // This action takes a list of identities as input and returns the following // information for each: // -// Whether Easy DKIM signing is enabled or disabled. A set of DKIM tokens -// that represent the identity. If the identity is an email address, the tokens -// represent the domain of that address. Whether Amazon SES has successfully -// verified the DKIM tokens published in the domain's DNS. This information -// is only returned for domain name identities, not for email addresses. This -// action is throttled at one request per second and can only get DKIM attributes -// for up to 100 identities at a time. +// Whether Easy DKIM signing is enabled or disabled. +// +// A set of DKIM tokens that represent the identity. If the identity is an +// email address, the tokens represent the domain of that address. +// +// Whether Amazon SES has successfully verified the DKIM tokens published +// in the domain's DNS. This information is only returned for domain name identities, +// not for email addresses. +// +// This action is throttled at one request per second and can only get DKIM +// attributes for up to 100 identities at a time. // // For more information about creating DNS records using DKIM tokens, go to // the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim-dns-records.html). @@ -478,9 +782,82 @@ func (c *SES) GetIdentityDkimAttributes(input *GetIdentityDkimAttributesInput) ( return out, err } +const opGetIdentityMailFromDomainAttributes = "GetIdentityMailFromDomainAttributes" + +// GetIdentityMailFromDomainAttributesRequest generates a "aws/request.Request" representing the +// client's request for the GetIdentityMailFromDomainAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetIdentityMailFromDomainAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetIdentityMailFromDomainAttributesRequest method. +// req, resp := client.GetIdentityMailFromDomainAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) GetIdentityMailFromDomainAttributesRequest(input *GetIdentityMailFromDomainAttributesInput) (req *request.Request, output *GetIdentityMailFromDomainAttributesOutput) { + op := &request.Operation{ + Name: opGetIdentityMailFromDomainAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetIdentityMailFromDomainAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetIdentityMailFromDomainAttributesOutput{} + req.Data = output + return +} + +// Returns the custom MAIL FROM attributes for a list of identities (email addresses +// and/or domains). +// +// This action is throttled at one request per second and can only get custom +// MAIL FROM attributes for up to 100 identities at a time. +func (c *SES) GetIdentityMailFromDomainAttributes(input *GetIdentityMailFromDomainAttributesInput) (*GetIdentityMailFromDomainAttributesOutput, error) { + req, out := c.GetIdentityMailFromDomainAttributesRequest(input) + err := req.Send() + return out, err +} + const opGetIdentityNotificationAttributes = "GetIdentityNotificationAttributes" -// GetIdentityNotificationAttributesRequest generates a request for the GetIdentityNotificationAttributes operation. +// GetIdentityNotificationAttributesRequest generates a "aws/request.Request" representing the +// client's request for the GetIdentityNotificationAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetIdentityNotificationAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetIdentityNotificationAttributesRequest method. +// req, resp := client.GetIdentityNotificationAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) GetIdentityNotificationAttributesRequest(input *GetIdentityNotificationAttributesInput) (req *request.Request, output *GetIdentityNotificationAttributesOutput) { op := &request.Operation{ Name: opGetIdentityNotificationAttributes, @@ -514,7 +891,28 @@ func (c *SES) GetIdentityNotificationAttributes(input *GetIdentityNotificationAt const opGetIdentityPolicies = "GetIdentityPolicies" -// GetIdentityPoliciesRequest generates a request for the GetIdentityPolicies operation. +// GetIdentityPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the GetIdentityPolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetIdentityPolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetIdentityPoliciesRequest method. +// req, resp := client.GetIdentityPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) GetIdentityPoliciesRequest(input *GetIdentityPoliciesInput) (req *request.Request, output *GetIdentityPoliciesOutput) { op := &request.Operation{ Name: opGetIdentityPolicies, @@ -533,13 +931,16 @@ func (c *SES) GetIdentityPoliciesRequest(input *GetIdentityPoliciesInput) (req * } // Returns the requested sending authorization policies for the given identity -// (email address or domain). The policies are returned as a map of policy names -// to policy contents. You can retrieve a maximum of 20 policies at a time. +// (an email address or a domain). The policies are returned as a map of policy +// names to policy contents. You can retrieve a maximum of 20 policies at a +// time. // -// This API is for the identity owner only. If you have not verified the identity, -// this API will return an error. Sending authorization is a feature that enables -// an identity owner to authorize other senders to use its identities. For information -// about using sending authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +// This API is for the identity owner only. If you have not verified the identity, +// this API will return an error. +// +// Sending authorization is a feature that enables an identity owner to authorize +// other senders to use its identities. For information about using sending +// authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). // // This action is throttled at one request per second. func (c *SES) GetIdentityPolicies(input *GetIdentityPoliciesInput) (*GetIdentityPoliciesOutput, error) { @@ -550,7 +951,28 @@ func (c *SES) GetIdentityPolicies(input *GetIdentityPoliciesInput) (*GetIdentity const opGetIdentityVerificationAttributes = "GetIdentityVerificationAttributes" -// GetIdentityVerificationAttributesRequest generates a request for the GetIdentityVerificationAttributes operation. +// GetIdentityVerificationAttributesRequest generates a "aws/request.Request" representing the +// client's request for the GetIdentityVerificationAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetIdentityVerificationAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetIdentityVerificationAttributesRequest method. +// req, resp := client.GetIdentityVerificationAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) GetIdentityVerificationAttributesRequest(input *GetIdentityVerificationAttributesInput) (req *request.Request, output *GetIdentityVerificationAttributesOutput) { op := &request.Operation{ Name: opGetIdentityVerificationAttributes, @@ -582,7 +1004,28 @@ func (c *SES) GetIdentityVerificationAttributes(input *GetIdentityVerificationAt const opGetSendQuota = "GetSendQuota" -// GetSendQuotaRequest generates a request for the GetSendQuota operation. +// GetSendQuotaRequest generates a "aws/request.Request" representing the +// client's request for the GetSendQuota operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetSendQuota method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetSendQuotaRequest method. +// req, resp := client.GetSendQuotaRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) GetSendQuotaRequest(input *GetSendQuotaInput) (req *request.Request, output *GetSendQuotaOutput) { op := &request.Operation{ Name: opGetSendQuota, @@ -611,7 +1054,28 @@ func (c *SES) GetSendQuota(input *GetSendQuotaInput) (*GetSendQuotaOutput, error const opGetSendStatistics = "GetSendStatistics" -// GetSendStatisticsRequest generates a request for the GetSendStatistics operation. +// GetSendStatisticsRequest generates a "aws/request.Request" representing the +// client's request for the GetSendStatistics operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetSendStatistics method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetSendStatisticsRequest method. +// req, resp := client.GetSendStatisticsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) GetSendStatisticsRequest(input *GetSendStatisticsInput) (req *request.Request, output *GetSendStatisticsOutput) { op := &request.Operation{ Name: opGetSendStatistics, @@ -643,7 +1107,28 @@ func (c *SES) GetSendStatistics(input *GetSendStatisticsInput) (*GetSendStatisti const opListIdentities = "ListIdentities" -// ListIdentitiesRequest generates a request for the ListIdentities operation. +// ListIdentitiesRequest generates a "aws/request.Request" representing the +// client's request for the ListIdentities operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListIdentities method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListIdentitiesRequest method. +// req, resp := client.ListIdentitiesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) ListIdentitiesRequest(input *ListIdentitiesInput) (req *request.Request, output *ListIdentitiesOutput) { op := &request.Operation{ Name: opListIdentities, @@ -668,7 +1153,7 @@ func (c *SES) ListIdentitiesRequest(input *ListIdentitiesInput) (req *request.Re } // Returns a list containing all of the identities (email addresses and domains) -// for a specific AWS Account, regardless of verification status. +// for your AWS account, regardless of verification status. // // This action is throttled at one request per second. func (c *SES) ListIdentities(input *ListIdentitiesInput) (*ListIdentitiesOutput, error) { @@ -677,6 +1162,23 @@ func (c *SES) ListIdentities(input *ListIdentitiesInput) (*ListIdentitiesOutput, return out, err } +// ListIdentitiesPages iterates over the pages of a ListIdentities operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListIdentities method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListIdentities operation. +// pageNum := 0 +// err := client.ListIdentitiesPages(params, +// func(page *ListIdentitiesOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *SES) ListIdentitiesPages(input *ListIdentitiesInput, fn func(p *ListIdentitiesOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListIdentitiesRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -687,7 +1189,28 @@ func (c *SES) ListIdentitiesPages(input *ListIdentitiesInput, fn func(p *ListIde const opListIdentityPolicies = "ListIdentityPolicies" -// ListIdentityPoliciesRequest generates a request for the ListIdentityPolicies operation. +// ListIdentityPoliciesRequest generates a "aws/request.Request" representing the +// client's request for the ListIdentityPolicies operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListIdentityPolicies method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListIdentityPoliciesRequest method. +// req, resp := client.ListIdentityPoliciesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) ListIdentityPoliciesRequest(input *ListIdentityPoliciesInput) (req *request.Request, output *ListIdentityPoliciesOutput) { op := &request.Operation{ Name: opListIdentityPolicies, @@ -706,13 +1229,15 @@ func (c *SES) ListIdentityPoliciesRequest(input *ListIdentityPoliciesInput) (req } // Returns a list of sending authorization policies that are attached to the -// given identity (email address or domain). This API returns only a list. If -// you want the actual policy content, you can use GetIdentityPolicies. +// given identity (an email address or a domain). This API returns only a list. +// If you want the actual policy content, you can use GetIdentityPolicies. // -// This API is for the identity owner only. If you have not verified the identity, -// this API will return an error. Sending authorization is a feature that enables -// an identity owner to authorize other senders to use its identities. For information -// about using sending authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +// This API is for the identity owner only. If you have not verified the identity, +// this API will return an error. +// +// Sending authorization is a feature that enables an identity owner to authorize +// other senders to use its identities. For information about using sending +// authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). // // This action is throttled at one request per second. func (c *SES) ListIdentityPolicies(input *ListIdentityPoliciesInput) (*ListIdentityPoliciesOutput, error) { @@ -723,7 +1248,28 @@ func (c *SES) ListIdentityPolicies(input *ListIdentityPoliciesInput) (*ListIdent const opListReceiptFilters = "ListReceiptFilters" -// ListReceiptFiltersRequest generates a request for the ListReceiptFilters operation. +// ListReceiptFiltersRequest generates a "aws/request.Request" representing the +// client's request for the ListReceiptFilters operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListReceiptFilters method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListReceiptFiltersRequest method. +// req, resp := client.ListReceiptFiltersRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) ListReceiptFiltersRequest(input *ListReceiptFiltersInput) (req *request.Request, output *ListReceiptFiltersOutput) { op := &request.Operation{ Name: opListReceiptFilters, @@ -741,7 +1287,7 @@ func (c *SES) ListReceiptFiltersRequest(input *ListReceiptFiltersInput) (req *re return } -// Lists the IP address filters associated with your account. +// Lists the IP address filters associated with your AWS account. // // For information about managing IP address filters, see the Amazon SES Developer // Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-ip-filters.html). @@ -755,7 +1301,28 @@ func (c *SES) ListReceiptFilters(input *ListReceiptFiltersInput) (*ListReceiptFi const opListReceiptRuleSets = "ListReceiptRuleSets" -// ListReceiptRuleSetsRequest generates a request for the ListReceiptRuleSets operation. +// ListReceiptRuleSetsRequest generates a "aws/request.Request" representing the +// client's request for the ListReceiptRuleSets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListReceiptRuleSets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListReceiptRuleSetsRequest method. +// req, resp := client.ListReceiptRuleSetsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) ListReceiptRuleSetsRequest(input *ListReceiptRuleSetsInput) (req *request.Request, output *ListReceiptRuleSetsOutput) { op := &request.Operation{ Name: opListReceiptRuleSets, @@ -790,7 +1357,28 @@ func (c *SES) ListReceiptRuleSets(input *ListReceiptRuleSetsInput) (*ListReceipt const opListVerifiedEmailAddresses = "ListVerifiedEmailAddresses" -// ListVerifiedEmailAddressesRequest generates a request for the ListVerifiedEmailAddresses operation. +// ListVerifiedEmailAddressesRequest generates a "aws/request.Request" representing the +// client's request for the ListVerifiedEmailAddresses operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListVerifiedEmailAddresses method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListVerifiedEmailAddressesRequest method. +// req, resp := client.ListVerifiedEmailAddressesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) ListVerifiedEmailAddressesRequest(input *ListVerifiedEmailAddressesInput) (req *request.Request, output *ListVerifiedEmailAddressesOutput) { op := &request.Operation{ Name: opListVerifiedEmailAddresses, @@ -810,9 +1398,10 @@ func (c *SES) ListVerifiedEmailAddressesRequest(input *ListVerifiedEmailAddresse // Returns a list containing all of the email addresses that have been verified. // -// The ListVerifiedEmailAddresses action is deprecated as of the May 15, 2012 +// The ListVerifiedEmailAddresses action is deprecated as of the May 15, 2012 // release of Domain Verification. The ListIdentities action is now preferred. -// This action is throttled at one request per second. +// +// This action is throttled at one request per second. func (c *SES) ListVerifiedEmailAddresses(input *ListVerifiedEmailAddressesInput) (*ListVerifiedEmailAddressesOutput, error) { req, out := c.ListVerifiedEmailAddressesRequest(input) err := req.Send() @@ -821,7 +1410,28 @@ func (c *SES) ListVerifiedEmailAddresses(input *ListVerifiedEmailAddressesInput) const opPutIdentityPolicy = "PutIdentityPolicy" -// PutIdentityPolicyRequest generates a request for the PutIdentityPolicy operation. +// PutIdentityPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutIdentityPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutIdentityPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutIdentityPolicyRequest method. +// req, resp := client.PutIdentityPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) PutIdentityPolicyRequest(input *PutIdentityPolicyInput) (req *request.Request, output *PutIdentityPolicyOutput) { op := &request.Operation{ Name: opPutIdentityPolicy, @@ -840,12 +1450,14 @@ func (c *SES) PutIdentityPolicyRequest(input *PutIdentityPolicyInput) (req *requ } // Adds or updates a sending authorization policy for the specified identity -// (email address or domain). +// (an email address or a domain). // -// This API is for the identity owner only. If you have not verified the identity, -// this API will return an error. Sending authorization is a feature that enables -// an identity owner to authorize other senders to use its identities. For information -// about using sending authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +// This API is for the identity owner only. If you have not verified the identity, +// this API will return an error. +// +// Sending authorization is a feature that enables an identity owner to authorize +// other senders to use its identities. For information about using sending +// authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). // // This action is throttled at one request per second. func (c *SES) PutIdentityPolicy(input *PutIdentityPolicyInput) (*PutIdentityPolicyOutput, error) { @@ -856,7 +1468,28 @@ func (c *SES) PutIdentityPolicy(input *PutIdentityPolicyInput) (*PutIdentityPoli const opReorderReceiptRuleSet = "ReorderReceiptRuleSet" -// ReorderReceiptRuleSetRequest generates a request for the ReorderReceiptRuleSet operation. +// ReorderReceiptRuleSetRequest generates a "aws/request.Request" representing the +// client's request for the ReorderReceiptRuleSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ReorderReceiptRuleSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ReorderReceiptRuleSetRequest method. +// req, resp := client.ReorderReceiptRuleSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) ReorderReceiptRuleSetRequest(input *ReorderReceiptRuleSetInput) (req *request.Request, output *ReorderReceiptRuleSetOutput) { op := &request.Operation{ Name: opReorderReceiptRuleSet, @@ -876,10 +1509,12 @@ func (c *SES) ReorderReceiptRuleSetRequest(input *ReorderReceiptRuleSetInput) (r // Reorders the receipt rules within a receipt rule set. // -// All of the rules in the rule set must be represented in this request. That -// is, this API will return an error if the reorder request doesn’t explicitly -// position all of the rules. For information about managing receipt rule sets, -// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). +// All of the rules in the rule set must be represented in this request. That +// is, this API will return an error if the reorder request doesn't explicitly +// position all of the rules. +// +// For information about managing receipt rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). // // This action is throttled at one request per second. func (c *SES) ReorderReceiptRuleSet(input *ReorderReceiptRuleSetInput) (*ReorderReceiptRuleSetOutput, error) { @@ -890,7 +1525,28 @@ func (c *SES) ReorderReceiptRuleSet(input *ReorderReceiptRuleSetInput) (*Reorder const opSendBounce = "SendBounce" -// SendBounceRequest generates a request for the SendBounce operation. +// SendBounceRequest generates a "aws/request.Request" representing the +// client's request for the SendBounce operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SendBounce method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SendBounceRequest method. +// req, resp := client.SendBounceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) SendBounceRequest(input *SendBounceInput) (req *request.Request, output *SendBounceOutput) { op := &request.Operation{ Name: opSendBounce, @@ -912,9 +1568,11 @@ func (c *SES) SendBounceRequest(input *SendBounceInput) (req *request.Request, o // through Amazon SES. You can only use this API on an email up to 24 hours // after you receive it. // -// You cannot use this API to send generic bounces for mail that was not received -// by Amazon SES. For information about receiving email through Amazon SES, -// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email.html). +// You cannot use this API to send generic bounces for mail that was not received +// by Amazon SES. +// +// For information about receiving email through Amazon SES, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email.html). // // This action is throttled at one request per second. func (c *SES) SendBounce(input *SendBounceInput) (*SendBounceOutput, error) { @@ -925,7 +1583,28 @@ func (c *SES) SendBounce(input *SendBounceInput) (*SendBounceOutput, error) { const opSendEmail = "SendEmail" -// SendEmailRequest generates a request for the SendEmail operation. +// SendEmailRequest generates a "aws/request.Request" representing the +// client's request for the SendEmail operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SendEmail method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SendEmailRequest method. +// req, resp := client.SendEmailRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) SendEmailRequest(input *SendEmailInput) (req *request.Request, output *SendEmailOutput) { op := &request.Operation{ Name: opSendEmail, @@ -948,21 +1627,25 @@ func (c *SES) SendEmailRequest(input *SendEmailInput) (req *request.Request, out // // There are several important points to know about SendEmail: // -// You can only send email from verified email addresses and domains; otherwise, +// You can only send email from verified email addresses and domains; otherwise, // you will get an "Email address not verified" error. If your account is still // in the Amazon SES sandbox, you must also verify every recipient email address // except for the recipients provided by the Amazon SES mailbox simulator. For // more information, go to the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html). -// The total size of the message cannot exceed 10 MB. This includes any attachments -// that are part of the message. Amazon SES has a limit on the total number -// of recipients per message. The combined number of To:, CC: and BCC: email -// addresses cannot exceed 50. If you need to send an email message to a larger -// audience, you can divide your recipient list into groups of 50 or fewer, -// and then call Amazon SES repeatedly to send the message to each group. For -// every message that you send, the total number of recipients (To:, CC: and -// BCC:) is counted against your sending quota - the maximum number of emails -// you can send in a 24-hour period. For information about your sending quota, -// go to the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/manage-sending-limits.html). +// +// The total size of the message cannot exceed 10 MB. This includes any attachments +// that are part of the message. +// +// Amazon SES has a limit on the total number of recipients per message. +// The combined number of To:, CC: and BCC: email addresses cannot exceed 50. +// If you need to send an email message to a larger audience, you can divide +// your recipient list into groups of 50 or fewer, and then call Amazon SES +// repeatedly to send the message to each group. +// +// For every message that you send, the total number of recipients (To:, +// CC: and BCC:) is counted against your sending quota - the maximum number +// of emails you can send in a 24-hour period. For information about your sending +// quota, go to the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/manage-sending-limits.html). func (c *SES) SendEmail(input *SendEmailInput) (*SendEmailOutput, error) { req, out := c.SendEmailRequest(input) err := req.Send() @@ -971,7 +1654,28 @@ func (c *SES) SendEmail(input *SendEmailInput) (*SendEmailOutput, error) { const opSendRawEmail = "SendRawEmail" -// SendRawEmailRequest generates a request for the SendRawEmail operation. +// SendRawEmailRequest generates a "aws/request.Request" representing the +// client's request for the SendRawEmail operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SendRawEmail method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SendRawEmailRequest method. +// req, resp := client.SendRawEmailRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) SendRawEmailRequest(input *SendRawEmailInput) (req *request.Request, output *SendRawEmailOutput) { op := &request.Operation{ Name: opSendRawEmail, @@ -996,37 +1700,53 @@ func (c *SES) SendRawEmailRequest(input *SendRawEmailInput) (req *request.Reques // // There are several important points to know about SendRawEmail: // -// You can only send email from verified email addresses and domains; otherwise, +// You can only send email from verified email addresses and domains; otherwise, // you will get an "Email address not verified" error. If your account is still // in the Amazon SES sandbox, you must also verify every recipient email address // except for the recipients provided by the Amazon SES mailbox simulator. For // more information, go to the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html). -// The total size of the message cannot exceed 10 MB. This includes any attachments -// that are part of the message. Amazon SES has a limit on the total number -// of recipients per message. The combined number of To:, CC: and BCC: email -// addresses cannot exceed 50. If you need to send an email message to a larger -// audience, you can divide your recipient list into groups of 50 or fewer, -// and then call Amazon SES repeatedly to send the message to each group. The -// To:, CC:, and BCC: headers in the raw message can contain a group list. Note -// that each recipient in a group list counts towards the 50-recipient limit. -// For every message that you send, the total number of recipients (To:, CC: -// and BCC:) is counted against your sending quota - the maximum number of emails -// you can send in a 24-hour period. For information about your sending quota, -// go to the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/manage-sending-limits.html). -// If you are using sending authorization to send on behalf of another user, +// +// The total size of the message cannot exceed 10 MB. This includes any attachments +// that are part of the message. +// +// Amazon SES has a limit on the total number of recipients per message. +// The combined number of To:, CC: and BCC: email addresses cannot exceed 50. +// If you need to send an email message to a larger audience, you can divide +// your recipient list into groups of 50 or fewer, and then call Amazon SES +// repeatedly to send the message to each group. +// +// The To:, CC:, and BCC: headers in the raw message can contain a group +// list. Note that each recipient in a group list counts towards the 50-recipient +// limit. +// +// For every message that you send, the total number of recipients (To:, +// CC: and BCC:) is counted against your sending quota - the maximum number +// of emails you can send in a 24-hour period. For information about your sending +// quota, go to the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/manage-sending-limits.html). +// +// If you are using sending authorization to send on behalf of another user, // SendRawEmail enables you to specify the cross-account identity for the email's // "Source," "From," and "Return-Path" parameters in one of two ways: you can // pass optional parameters SourceArn, FromArn, and/or ReturnPathArn to the // API, or you can include the following X-headers in the header of your raw -// email: X-SES-SOURCE-ARN X-SES-FROM-ARN X-SES-RETURN-PATH-ARN Do not include -// these X-headers in the DKIM signature, because they are removed by Amazon -// SES before sending the email. For the most common sending authorization use -// case, we recommend that you specify the SourceIdentityArn and do not specify -// either the FromIdentityArn or ReturnPathIdentityArn. (The same note applies -// to the corresponding X-headers.) If you only specify the SourceIdentityArn, -// Amazon SES will simply set the "From" address and the "Return Path" address -// to the identity specified in SourceIdentityArn. For more information about -// sending authorization, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). +// email: +// +// X-SES-SOURCE-ARN +// +// X-SES-FROM-ARN +// +// X-SES-RETURN-PATH-ARN +// +// Do not include these X-headers in the DKIM signature, because they are +// removed by Amazon SES before sending the email. +// +// For the most common sending authorization use case, we recommend that you +// specify the SourceIdentityArn and do not specify either the FromIdentityArn +// or ReturnPathIdentityArn. (The same note applies to the corresponding X-headers.) +// If you only specify the SourceIdentityArn, Amazon SES will simply set the +// "From" address and the "Return Path" address to the identity specified in +// SourceIdentityArn. For more information about sending authorization, see +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). func (c *SES) SendRawEmail(input *SendRawEmailInput) (*SendRawEmailOutput, error) { req, out := c.SendRawEmailRequest(input) err := req.Send() @@ -1035,7 +1755,28 @@ func (c *SES) SendRawEmail(input *SendRawEmailInput) (*SendRawEmailOutput, error const opSetActiveReceiptRuleSet = "SetActiveReceiptRuleSet" -// SetActiveReceiptRuleSetRequest generates a request for the SetActiveReceiptRuleSet operation. +// SetActiveReceiptRuleSetRequest generates a "aws/request.Request" representing the +// client's request for the SetActiveReceiptRuleSet operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetActiveReceiptRuleSet method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetActiveReceiptRuleSetRequest method. +// req, resp := client.SetActiveReceiptRuleSetRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) SetActiveReceiptRuleSetRequest(input *SetActiveReceiptRuleSetInput) (req *request.Request, output *SetActiveReceiptRuleSetOutput) { op := &request.Operation{ Name: opSetActiveReceiptRuleSet, @@ -1055,9 +1796,11 @@ func (c *SES) SetActiveReceiptRuleSetRequest(input *SetActiveReceiptRuleSetInput // Sets the specified receipt rule set as the active receipt rule set. // -// To disable your email-receiving through Amazon SES completely, you can call -// this API with RuleSetName set to null. For information about managing receipt -// rule sets, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). +// To disable your email-receiving through Amazon SES completely, you can +// call this API with RuleSetName set to null. +// +// For information about managing receipt rule sets, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html). // // This action is throttled at one request per second. func (c *SES) SetActiveReceiptRuleSet(input *SetActiveReceiptRuleSetInput) (*SetActiveReceiptRuleSetOutput, error) { @@ -1068,7 +1811,28 @@ func (c *SES) SetActiveReceiptRuleSet(input *SetActiveReceiptRuleSetInput) (*Set const opSetIdentityDkimEnabled = "SetIdentityDkimEnabled" -// SetIdentityDkimEnabledRequest generates a request for the SetIdentityDkimEnabled operation. +// SetIdentityDkimEnabledRequest generates a "aws/request.Request" representing the +// client's request for the SetIdentityDkimEnabled operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetIdentityDkimEnabled method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetIdentityDkimEnabledRequest method. +// req, resp := client.SetIdentityDkimEnabledRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) SetIdentityDkimEnabledRequest(input *SetIdentityDkimEnabledInput) (req *request.Request, output *SetIdentityDkimEnabledOutput) { op := &request.Operation{ Name: opSetIdentityDkimEnabled, @@ -1088,13 +1852,16 @@ func (c *SES) SetIdentityDkimEnabledRequest(input *SetIdentityDkimEnabledInput) // Enables or disables Easy DKIM signing of email sent from an identity: // -// If Easy DKIM signing is enabled for a domain name identity (e.g., example.com), +// If Easy DKIM signing is enabled for a domain name identity (e.g., example.com), // then Amazon SES will DKIM-sign all email sent by addresses under that domain -// name (e.g., user@example.com). If Easy DKIM signing is enabled for an email -// address, then Amazon SES will DKIM-sign all email sent by that email address. -// For email addresses (e.g., user@example.com), you can only enable Easy DKIM -// signing if the corresponding domain (e.g., example.com) has been set up for -// Easy DKIM using the AWS Console or the VerifyDomainDkim action. +// name (e.g., user@example.com). +// +// If Easy DKIM signing is enabled for an email address, then Amazon SES +// will DKIM-sign all email sent by that email address. +// +// For email addresses (e.g., user@example.com), you can only enable Easy +// DKIM signing if the corresponding domain (e.g., example.com) has been set +// up for Easy DKIM using the AWS Console or the VerifyDomainDkim action. // // This action is throttled at one request per second. // @@ -1108,7 +1875,28 @@ func (c *SES) SetIdentityDkimEnabled(input *SetIdentityDkimEnabledInput) (*SetId const opSetIdentityFeedbackForwardingEnabled = "SetIdentityFeedbackForwardingEnabled" -// SetIdentityFeedbackForwardingEnabledRequest generates a request for the SetIdentityFeedbackForwardingEnabled operation. +// SetIdentityFeedbackForwardingEnabledRequest generates a "aws/request.Request" representing the +// client's request for the SetIdentityFeedbackForwardingEnabled operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetIdentityFeedbackForwardingEnabled method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetIdentityFeedbackForwardingEnabledRequest method. +// req, resp := client.SetIdentityFeedbackForwardingEnabledRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) SetIdentityFeedbackForwardingEnabledRequest(input *SetIdentityFeedbackForwardingEnabledInput) (req *request.Request, output *SetIdentityFeedbackForwardingEnabledOutput) { op := &request.Operation{ Name: opSetIdentityFeedbackForwardingEnabled, @@ -1126,14 +1914,15 @@ func (c *SES) SetIdentityFeedbackForwardingEnabledRequest(input *SetIdentityFeed return } -// Given an identity (email address or domain), enables or disables whether +// Given an identity (an email address or a domain), enables or disables whether // Amazon SES forwards bounce and complaint notifications as email. Feedback // forwarding can only be disabled when Amazon Simple Notification Service (Amazon // SNS) topics are specified for both bounces and complaints. // -// Feedback forwarding does not apply to delivery notifications. Delivery notifications -// are only available through Amazon SNS. This action is throttled at one request -// per second. +// Feedback forwarding does not apply to delivery notifications. Delivery +// notifications are only available through Amazon SNS. +// +// This action is throttled at one request per second. // // For more information about using notifications with Amazon SES, see the // Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications.html). @@ -1143,9 +1932,141 @@ func (c *SES) SetIdentityFeedbackForwardingEnabled(input *SetIdentityFeedbackFor return out, err } +const opSetIdentityHeadersInNotificationsEnabled = "SetIdentityHeadersInNotificationsEnabled" + +// SetIdentityHeadersInNotificationsEnabledRequest generates a "aws/request.Request" representing the +// client's request for the SetIdentityHeadersInNotificationsEnabled operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetIdentityHeadersInNotificationsEnabled method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetIdentityHeadersInNotificationsEnabledRequest method. +// req, resp := client.SetIdentityHeadersInNotificationsEnabledRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) SetIdentityHeadersInNotificationsEnabledRequest(input *SetIdentityHeadersInNotificationsEnabledInput) (req *request.Request, output *SetIdentityHeadersInNotificationsEnabledOutput) { + op := &request.Operation{ + Name: opSetIdentityHeadersInNotificationsEnabled, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetIdentityHeadersInNotificationsEnabledInput{} + } + + req = c.newRequest(op, input, output) + output = &SetIdentityHeadersInNotificationsEnabledOutput{} + req.Data = output + return +} + +// Given an identity (an email address or a domain), sets whether Amazon SES +// includes the original email headers in the Amazon Simple Notification Service +// (Amazon SNS) notifications of a specified type. +// +// This action is throttled at one request per second. +// +// For more information about using notifications with Amazon SES, see the +// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications.html). +func (c *SES) SetIdentityHeadersInNotificationsEnabled(input *SetIdentityHeadersInNotificationsEnabledInput) (*SetIdentityHeadersInNotificationsEnabledOutput, error) { + req, out := c.SetIdentityHeadersInNotificationsEnabledRequest(input) + err := req.Send() + return out, err +} + +const opSetIdentityMailFromDomain = "SetIdentityMailFromDomain" + +// SetIdentityMailFromDomainRequest generates a "aws/request.Request" representing the +// client's request for the SetIdentityMailFromDomain operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetIdentityMailFromDomain method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetIdentityMailFromDomainRequest method. +// req, resp := client.SetIdentityMailFromDomainRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SES) SetIdentityMailFromDomainRequest(input *SetIdentityMailFromDomainInput) (req *request.Request, output *SetIdentityMailFromDomainOutput) { + op := &request.Operation{ + Name: opSetIdentityMailFromDomain, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetIdentityMailFromDomainInput{} + } + + req = c.newRequest(op, input, output) + output = &SetIdentityMailFromDomainOutput{} + req.Data = output + return +} + +// Enables or disables the custom MAIL FROM domain setup for a verified identity +// (an email address or a domain). +// +// To send emails using the specified MAIL FROM domain, you must add an MX +// record to your MAIL FROM domain's DNS settings. If you want your emails to +// pass Sender Policy Framework (SPF) checks, you must also add or update an +// SPF record. For more information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from-set.html). +// +// This action is throttled at one request per second. +func (c *SES) SetIdentityMailFromDomain(input *SetIdentityMailFromDomainInput) (*SetIdentityMailFromDomainOutput, error) { + req, out := c.SetIdentityMailFromDomainRequest(input) + err := req.Send() + return out, err +} + const opSetIdentityNotificationTopic = "SetIdentityNotificationTopic" -// SetIdentityNotificationTopicRequest generates a request for the SetIdentityNotificationTopic operation. +// SetIdentityNotificationTopicRequest generates a "aws/request.Request" representing the +// client's request for the SetIdentityNotificationTopic operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetIdentityNotificationTopic method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetIdentityNotificationTopicRequest method. +// req, resp := client.SetIdentityNotificationTopicRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) SetIdentityNotificationTopicRequest(input *SetIdentityNotificationTopicInput) (req *request.Request, output *SetIdentityNotificationTopicOutput) { op := &request.Operation{ Name: opSetIdentityNotificationTopic, @@ -1163,12 +2084,14 @@ func (c *SES) SetIdentityNotificationTopicRequest(input *SetIdentityNotification return } -// Given an identity (email address or domain), sets the Amazon Simple Notification -// Service (Amazon SNS) topic to which Amazon SES will publish bounce, complaint, -// and/or delivery notifications for emails sent with that identity as the Source. +// Given an identity (an email address or a domain), sets the Amazon Simple +// Notification Service (Amazon SNS) topic to which Amazon SES will publish +// bounce, complaint, and/or delivery notifications for emails sent with that +// identity as the Source. // -// Unless feedback forwarding is enabled, you must specify Amazon SNS topics +// Unless feedback forwarding is enabled, you must specify Amazon SNS topics // for bounce and complaint notifications. For more information, see SetIdentityFeedbackForwardingEnabled. +// // This action is throttled at one request per second. // // For more information about feedback notification, see the Amazon SES Developer @@ -1181,7 +2104,28 @@ func (c *SES) SetIdentityNotificationTopic(input *SetIdentityNotificationTopicIn const opSetReceiptRulePosition = "SetReceiptRulePosition" -// SetReceiptRulePositionRequest generates a request for the SetReceiptRulePosition operation. +// SetReceiptRulePositionRequest generates a "aws/request.Request" representing the +// client's request for the SetReceiptRulePosition operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetReceiptRulePosition method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetReceiptRulePositionRequest method. +// req, resp := client.SetReceiptRulePositionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) SetReceiptRulePositionRequest(input *SetReceiptRulePositionInput) (req *request.Request, output *SetReceiptRulePositionOutput) { op := &request.Operation{ Name: opSetReceiptRulePosition, @@ -1213,7 +2157,28 @@ func (c *SES) SetReceiptRulePosition(input *SetReceiptRulePositionInput) (*SetRe const opUpdateReceiptRule = "UpdateReceiptRule" -// UpdateReceiptRuleRequest generates a request for the UpdateReceiptRule operation. +// UpdateReceiptRuleRequest generates a "aws/request.Request" representing the +// client's request for the UpdateReceiptRule operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UpdateReceiptRule method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UpdateReceiptRuleRequest method. +// req, resp := client.UpdateReceiptRuleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) UpdateReceiptRuleRequest(input *UpdateReceiptRuleInput) (req *request.Request, output *UpdateReceiptRuleOutput) { op := &request.Operation{ Name: opUpdateReceiptRule, @@ -1245,7 +2210,28 @@ func (c *SES) UpdateReceiptRule(input *UpdateReceiptRuleInput) (*UpdateReceiptRu const opVerifyDomainDkim = "VerifyDomainDkim" -// VerifyDomainDkimRequest generates a request for the VerifyDomainDkim operation. +// VerifyDomainDkimRequest generates a "aws/request.Request" representing the +// client's request for the VerifyDomainDkim operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the VerifyDomainDkim method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the VerifyDomainDkimRequest method. +// req, resp := client.VerifyDomainDkimRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) VerifyDomainDkimRequest(input *VerifyDomainDkimInput) (req *request.Request, output *VerifyDomainDkimOutput) { op := &request.Operation{ Name: opVerifyDomainDkim, @@ -1286,7 +2272,28 @@ func (c *SES) VerifyDomainDkim(input *VerifyDomainDkimInput) (*VerifyDomainDkimO const opVerifyDomainIdentity = "VerifyDomainIdentity" -// VerifyDomainIdentityRequest generates a request for the VerifyDomainIdentity operation. +// VerifyDomainIdentityRequest generates a "aws/request.Request" representing the +// client's request for the VerifyDomainIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the VerifyDomainIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the VerifyDomainIdentityRequest method. +// req, resp := client.VerifyDomainIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) VerifyDomainIdentityRequest(input *VerifyDomainIdentityInput) (req *request.Request, output *VerifyDomainIdentityOutput) { op := &request.Operation{ Name: opVerifyDomainIdentity, @@ -1315,7 +2322,28 @@ func (c *SES) VerifyDomainIdentity(input *VerifyDomainIdentityInput) (*VerifyDom const opVerifyEmailAddress = "VerifyEmailAddress" -// VerifyEmailAddressRequest generates a request for the VerifyEmailAddress operation. +// VerifyEmailAddressRequest generates a "aws/request.Request" representing the +// client's request for the VerifyEmailAddress operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the VerifyEmailAddress method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the VerifyEmailAddressRequest method. +// req, resp := client.VerifyEmailAddressRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) VerifyEmailAddressRequest(input *VerifyEmailAddressInput) (req *request.Request, output *VerifyEmailAddressOutput) { op := &request.Operation{ Name: opVerifyEmailAddress, @@ -1338,9 +2366,10 @@ func (c *SES) VerifyEmailAddressRequest(input *VerifyEmailAddressInput) (req *re // Verifies an email address. This action causes a confirmation email message // to be sent to the specified address. // -// The VerifyEmailAddress action is deprecated as of the May 15, 2012 release +// The VerifyEmailAddress action is deprecated as of the May 15, 2012 release // of Domain Verification. The VerifyEmailIdentity action is now preferred. -// This action is throttled at one request per second. +// +// This action is throttled at one request per second. func (c *SES) VerifyEmailAddress(input *VerifyEmailAddressInput) (*VerifyEmailAddressOutput, error) { req, out := c.VerifyEmailAddressRequest(input) err := req.Send() @@ -1349,7 +2378,28 @@ func (c *SES) VerifyEmailAddress(input *VerifyEmailAddressInput) (*VerifyEmailAd const opVerifyEmailIdentity = "VerifyEmailIdentity" -// VerifyEmailIdentityRequest generates a request for the VerifyEmailIdentity operation. +// VerifyEmailIdentityRequest generates a "aws/request.Request" representing the +// client's request for the VerifyEmailIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the VerifyEmailIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the VerifyEmailIdentityRequest method. +// req, resp := client.VerifyEmailIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SES) VerifyEmailIdentityRequest(input *VerifyEmailIdentityInput) (req *request.Request, output *VerifyEmailIdentityOutput) { op := &request.Operation{ Name: opVerifyEmailIdentity, @@ -1404,6 +2454,22 @@ func (s AddHeaderAction) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddHeaderAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddHeaderAction"} + if s.HeaderName == nil { + invalidParams.Add(request.NewErrParamRequired("HeaderName")) + } + if s.HeaderValue == nil { + invalidParams.Add(request.NewErrParamRequired("HeaderValue")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // Represents the body of the message. You can specify text, HTML, or both. // If you use both, then the message should display correctly in the widest // variety of email clients. @@ -1430,6 +2496,26 @@ func (s Body) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *Body) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Body"} + if s.Html != nil { + if err := s.Html.Validate(); err != nil { + invalidParams.AddNested("Html", err.(request.ErrInvalidParams)) + } + } + if s.Text != nil { + if err := s.Text.Validate(); err != nil { + invalidParams.AddNested("Text", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // When included in a receipt rule, this action rejects the received email by // returning a bounce response to the sender and, optionally, publishes a notification // to Amazon Simple Notification Service (Amazon SNS). @@ -1469,6 +2555,25 @@ func (s BounceAction) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *BounceAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BounceAction"} + if s.Message == nil { + invalidParams.Add(request.NewErrParamRequired("Message")) + } + if s.Sender == nil { + invalidParams.Add(request.NewErrParamRequired("Sender")) + } + if s.SmtpReplyCode == nil { + invalidParams.Add(request.NewErrParamRequired("SmtpReplyCode")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // Recipient-related information to include in the Delivery Status Notification // (DSN) when an email that Amazon SES receives on your behalf bounces. // @@ -1505,6 +2610,27 @@ func (s BouncedRecipientInfo) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *BouncedRecipientInfo) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BouncedRecipientInfo"} + if s.Recipient == nil { + invalidParams.Add(request.NewErrParamRequired("Recipient")) + } + if s.RecipientDsnFields != nil { + if err := s.RecipientDsnFields.Validate(); err != nil { + invalidParams.AddNested("RecipientDsnFields", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a request to create a receipt rule set by cloning an existing +// one. You use receipt rule sets to receive email with Amazon SES. For more +// information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). type CloneReceiptRuleSetInput struct { _ struct{} `type:"structure"` @@ -1513,9 +2639,12 @@ type CloneReceiptRuleSetInput struct { // The name of the rule set to create. The name must: // - // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores - // (_), or dashes (-). Start and end with a letter or number. Contain less than - // 64 characters. + // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores + // (_), or dashes (-). + // + // Start and end with a letter or number. + // + // Contain less than 64 characters. RuleSetName *string `type:"string" required:"true"` } @@ -1529,6 +2658,23 @@ func (s CloneReceiptRuleSetInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *CloneReceiptRuleSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CloneReceiptRuleSetInput"} + if s.OriginalRuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("OriginalRuleSetName")) + } + if s.RuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. type CloneReceiptRuleSetOutput struct { _ struct{} `type:"structure"` } @@ -1568,6 +2714,22 @@ func (s Content) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *Content) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Content"} + if s.Data == nil { + invalidParams.Add(request.NewErrParamRequired("Data")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a request to create a new IP address filter. You use IP address +// filters when you receive email with Amazon SES. For more information, see +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). type CreateReceiptFilterInput struct { _ struct{} `type:"structure"` @@ -1586,6 +2748,25 @@ func (s CreateReceiptFilterInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateReceiptFilterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateReceiptFilterInput"} + if s.Filter == nil { + invalidParams.Add(request.NewErrParamRequired("Filter")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. type CreateReceiptFilterOutput struct { _ struct{} `type:"structure"` } @@ -1600,6 +2781,9 @@ func (s CreateReceiptFilterOutput) GoString() string { return s.String() } +// Represents a request to create a receipt rule. You use receipt rules to receive +// email with Amazon SES. For more information, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). type CreateReceiptRuleInput struct { _ struct{} `type:"structure"` @@ -1626,6 +2810,28 @@ func (s CreateReceiptRuleInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateReceiptRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateReceiptRuleInput"} + if s.Rule == nil { + invalidParams.Add(request.NewErrParamRequired("Rule")) + } + if s.RuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetName")) + } + if s.Rule != nil { + if err := s.Rule.Validate(); err != nil { + invalidParams.AddNested("Rule", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. type CreateReceiptRuleOutput struct { _ struct{} `type:"structure"` } @@ -1640,14 +2846,20 @@ func (s CreateReceiptRuleOutput) GoString() string { return s.String() } +// Represents a request to create an empty receipt rule set. You use receipt +// rule sets to receive email with Amazon SES. For more information, see the +// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). type CreateReceiptRuleSetInput struct { _ struct{} `type:"structure"` // The name of the rule set to create. The name must: // - // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores - // (_), or dashes (-). Start and end with a letter or number. Contain less than - // 64 characters. + // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores + // (_), or dashes (-). + // + // Start and end with a letter or number. + // + // Contain less than 64 characters. RuleSetName *string `type:"string" required:"true"` } @@ -1661,6 +2873,20 @@ func (s CreateReceiptRuleSetInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateReceiptRuleSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateReceiptRuleSetInput"} + if s.RuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. type CreateReceiptRuleSetOutput struct { _ struct{} `type:"structure"` } @@ -1675,8 +2901,8 @@ func (s CreateReceiptRuleSetOutput) GoString() string { return s.String() } -// Represents a request instructing the service to delete an identity from the -// list of identities for the AWS Account. +// Represents a request to delete one of your Amazon SES identities (an email +// address or domain). type DeleteIdentityInput struct { _ struct{} `type:"structure"` @@ -1694,8 +2920,20 @@ func (s DeleteIdentityInput) GoString() string { return s.String() } -// An empty element. Receiving this element indicates that the request completed -// successfully. +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteIdentityInput"} + if s.Identity == nil { + invalidParams.Add(request.NewErrParamRequired("Identity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. type DeleteIdentityOutput struct { _ struct{} `type:"structure"` } @@ -1710,10 +2948,10 @@ func (s DeleteIdentityOutput) GoString() string { return s.String() } -// Represents a request instructing the service to delete an authorization policy -// applying to an identity. -// -// This request succeeds regardless of whether the specified policy exists. +// Represents a request to delete a sending authorization policy for an identity. +// Sending authorization is an Amazon SES feature that enables you to authorize +// other senders to use your identities. For information, see the Amazon SES +// Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). type DeleteIdentityPolicyInput struct { _ struct{} `type:"structure"` @@ -1738,8 +2976,26 @@ func (s DeleteIdentityPolicyInput) GoString() string { return s.String() } -// An empty element. Receiving this element indicates that the request completed -// successfully. +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteIdentityPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteIdentityPolicyInput"} + if s.Identity == nil { + invalidParams.Add(request.NewErrParamRequired("Identity")) + } + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. type DeleteIdentityPolicyOutput struct { _ struct{} `type:"structure"` } @@ -1754,6 +3010,9 @@ func (s DeleteIdentityPolicyOutput) GoString() string { return s.String() } +// Represents a request to delete an IP address filter. You use IP address filters +// when you receive email with Amazon SES. For more information, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). type DeleteReceiptFilterInput struct { _ struct{} `type:"structure"` @@ -1771,6 +3030,20 @@ func (s DeleteReceiptFilterInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteReceiptFilterInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteReceiptFilterInput"} + if s.FilterName == nil { + invalidParams.Add(request.NewErrParamRequired("FilterName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. type DeleteReceiptFilterOutput struct { _ struct{} `type:"structure"` } @@ -1785,6 +3058,9 @@ func (s DeleteReceiptFilterOutput) GoString() string { return s.String() } +// Represents a request to delete a receipt rule. You use receipt rules to receive +// email with Amazon SES. For more information, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). type DeleteReceiptRuleInput struct { _ struct{} `type:"structure"` @@ -1805,6 +3081,23 @@ func (s DeleteReceiptRuleInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteReceiptRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteReceiptRuleInput"} + if s.RuleName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleName")) + } + if s.RuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. type DeleteReceiptRuleOutput struct { _ struct{} `type:"structure"` } @@ -1819,6 +3112,9 @@ func (s DeleteReceiptRuleOutput) GoString() string { return s.String() } +// Represents a request to delete a receipt rule set and all of the receipt +// rules it contains. You use receipt rule sets to receive email with Amazon +// SES. For more information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). type DeleteReceiptRuleSetInput struct { _ struct{} `type:"structure"` @@ -1836,6 +3132,20 @@ func (s DeleteReceiptRuleSetInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteReceiptRuleSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteReceiptRuleSetInput"} + if s.RuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. type DeleteReceiptRuleSetOutput struct { _ struct{} `type:"structure"` } @@ -1850,8 +3160,8 @@ func (s DeleteReceiptRuleSetOutput) GoString() string { return s.String() } -// Represents a request instructing the service to delete an address from the -// list of verified email addresses. +// Represents a request to delete an email address from the list of email addresses +// you have attempted to verify under your AWS account. type DeleteVerifiedEmailAddressInput struct { _ struct{} `type:"structure"` @@ -1869,6 +3179,19 @@ func (s DeleteVerifiedEmailAddressInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVerifiedEmailAddressInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteVerifiedEmailAddressInput"} + if s.EmailAddress == nil { + invalidParams.Add(request.NewErrParamRequired("EmailAddress")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type DeleteVerifiedEmailAddressOutput struct { _ struct{} `type:"structure"` } @@ -1883,6 +3206,10 @@ func (s DeleteVerifiedEmailAddressOutput) GoString() string { return s.String() } +// Represents a request to return the metadata and receipt rules for the receipt +// rule set that is currently active. You use receipt rule sets to receive email +// with Amazon SES. For more information, see the Amazon SES Developer Guide +// (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). type DescribeActiveReceiptRuleSetInput struct { _ struct{} `type:"structure"` } @@ -1897,6 +3224,8 @@ func (s DescribeActiveReceiptRuleSetInput) GoString() string { return s.String() } +// Represents the metadata and receipt rules for the receipt rule set that is +// currently active. type DescribeActiveReceiptRuleSetOutput struct { _ struct{} `type:"structure"` @@ -1918,6 +3247,9 @@ func (s DescribeActiveReceiptRuleSetOutput) GoString() string { return s.String() } +// Represents a request to return the details of a receipt rule. You use receipt +// rules to receive email with Amazon SES. For more information, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). type DescribeReceiptRuleInput struct { _ struct{} `type:"structure"` @@ -1938,6 +3270,23 @@ func (s DescribeReceiptRuleInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeReceiptRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeReceiptRuleInput"} + if s.RuleName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleName")) + } + if s.RuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the details of a receipt rule. type DescribeReceiptRuleOutput struct { _ struct{} `type:"structure"` @@ -1957,6 +3306,9 @@ func (s DescribeReceiptRuleOutput) GoString() string { return s.String() } +// Represents a request to return the details of a receipt rule set. You use +// receipt rule sets to receive email with Amazon SES. For more information, +// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). type DescribeReceiptRuleSetInput struct { _ struct{} `type:"structure"` @@ -1974,6 +3326,20 @@ func (s DescribeReceiptRuleSetInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeReceiptRuleSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeReceiptRuleSetInput"} + if s.RuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the details of the specified receipt rule set. type DescribeReceiptRuleSetOutput struct { _ struct{} `type:"structure"` @@ -2052,12 +3418,27 @@ func (s ExtensionField) GoString() string { return s.String() } -// Given a list of verified identities, describes their DKIM attributes. The -// DKIM attributes of an email address identity includes whether DKIM signing -// is individually enabled or disabled for that address. The DKIM attributes -// of a domain name identity includes whether DKIM signing is enabled, as well -// as the DNS records (tokens) that must remain published in the domain name's -// DNS. +// Validate inspects the fields of the type to determine if they are valid. +func (s *ExtensionField) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExtensionField"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a request for the status of Amazon SES Easy DKIM signing for an +// identity. For domain identities, this request also returns the DKIM tokens +// that are required for Easy DKIM signing, and whether Amazon SES successfully +// verified that these tokens were published. For more information about Easy +// DKIM, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim.html). type GetIdentityDkimAttributesInput struct { _ struct{} `type:"structure"` @@ -2076,7 +3457,23 @@ func (s GetIdentityDkimAttributesInput) GoString() string { return s.String() } -// Represents a list of all the DKIM attributes for the specified identity. +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetIdentityDkimAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetIdentityDkimAttributesInput"} + if s.Identities == nil { + invalidParams.Add(request.NewErrParamRequired("Identities")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the status of Amazon SES Easy DKIM signing for an identity. For +// domain identities, this response also contains the DKIM tokens that are required +// for Easy DKIM signing, and whether Amazon SES successfully verified that +// these tokens were published. type GetIdentityDkimAttributesOutput struct { _ struct{} `type:"structure"` @@ -2094,6 +3491,60 @@ func (s GetIdentityDkimAttributesOutput) GoString() string { return s.String() } +// Represents a request to return the Amazon SES custom MAIL FROM attributes +// for a list of identities. For information about using a custom MAIL FROM +// domain, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from.html). +type GetIdentityMailFromDomainAttributesInput struct { + _ struct{} `type:"structure"` + + // A list of one or more identities. + Identities []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetIdentityMailFromDomainAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityMailFromDomainAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetIdentityMailFromDomainAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetIdentityMailFromDomainAttributesInput"} + if s.Identities == nil { + invalidParams.Add(request.NewErrParamRequired("Identities")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the custom MAIL FROM attributes for a list of identities. +type GetIdentityMailFromDomainAttributesOutput struct { + _ struct{} `type:"structure"` + + // A map of identities to custom MAIL FROM attributes. + MailFromDomainAttributes map[string]*IdentityMailFromDomainAttributes `type:"map" required:"true"` +} + +// String returns the string representation +func (s GetIdentityMailFromDomainAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetIdentityMailFromDomainAttributesOutput) GoString() string { + return s.String() +} + +// Represents a request to return the notification attributes for a list of +// identities you verified with Amazon SES. For information about Amazon SES +// notifications, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications.html). type GetIdentityNotificationAttributesInput struct { _ struct{} `type:"structure"` @@ -2113,10 +3564,20 @@ func (s GetIdentityNotificationAttributesInput) GoString() string { return s.String() } -// Describes whether an identity has Amazon Simple Notification Service (Amazon -// SNS) topics set for bounce, complaint, and/or delivery notifications, and -// specifies whether feedback forwarding is enabled for bounce and complaint -// notifications. +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetIdentityNotificationAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetIdentityNotificationAttributesInput"} + if s.Identities == nil { + invalidParams.Add(request.NewErrParamRequired("Identities")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the notification attributes for a list of identities. type GetIdentityNotificationAttributesOutput struct { _ struct{} `type:"structure"` @@ -2134,8 +3595,10 @@ func (s GetIdentityNotificationAttributesOutput) GoString() string { return s.String() } -// Represents a request instructing the service to retrieve the text of a list -// of authorization policies applying to an identity. +// Represents a request to return the requested sending authorization policies +// for an identity. Sending authorization is an Amazon SES feature that enables +// you to authorize other senders to use your identities. For information, see +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). type GetIdentityPoliciesInput struct { _ struct{} `type:"structure"` @@ -2162,8 +3625,23 @@ func (s GetIdentityPoliciesInput) GoString() string { return s.String() } -// Represents a map of policy names to policies returned from a successful GetIdentityPolicies -// request. +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetIdentityPoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetIdentityPoliciesInput"} + if s.Identity == nil { + invalidParams.Add(request.NewErrParamRequired("Identity")) + } + if s.PolicyNames == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyNames")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents the requested sending authorization policies. type GetIdentityPoliciesOutput struct { _ struct{} `type:"structure"` @@ -2181,8 +3659,10 @@ func (s GetIdentityPoliciesOutput) GoString() string { return s.String() } -// Represents a request instructing the service to provide the verification -// attributes for a list of identities. +// Represents a request to return the Amazon SES verification status of a list +// of identities. For domain identities, this request also returns the verification +// token. For information about verifying identities with Amazon SES, see the +// Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-addresses-and-domains.html). type GetIdentityVerificationAttributesInput struct { _ struct{} `type:"structure"` @@ -2200,7 +3680,21 @@ func (s GetIdentityVerificationAttributesInput) GoString() string { return s.String() } -// Represents the verification attributes for a list of identities. +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetIdentityVerificationAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetIdentityVerificationAttributesInput"} + if s.Identities == nil { + invalidParams.Add(request.NewErrParamRequired("Identities")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The Amazon SES verification status of a list of identities. For domain identities, +// this response also contains the verification token. type GetIdentityVerificationAttributesOutput struct { _ struct{} `type:"structure"` @@ -2232,8 +3726,8 @@ func (s GetSendQuotaInput) GoString() string { return s.String() } -// Represents the user's current activity limits returned from a successful -// GetSendQuota request. +// Represents your Amazon SES daily sending quota, maximum send rate, and the +// number of emails you have sent in the last 24 hours. type GetSendQuotaOutput struct { _ struct{} `type:"structure"` @@ -2244,8 +3738,8 @@ type GetSendQuotaOutput struct { // The maximum number of emails that Amazon SES can accept from the user's account // per second. // - // The rate at which Amazon SES accepts the user's messages might be less than - // the maximum send rate. + // The rate at which Amazon SES accepts the user's messages might be less + // than the maximum send rate. MaxSendRate *float64 `type:"double"` // The number of emails sent during the previous 24 hours. @@ -2276,9 +3770,8 @@ func (s GetSendStatisticsInput) GoString() string { return s.String() } -// Represents a list of SendDataPoint items returned from a successful GetSendStatistics -// request. This list contains aggregated data from the previous two weeks of -// sending activity. +// Represents a list of data points. This list contains aggregated data from +// the previous two weeks of your sending activity with Amazon SES. type GetSendStatisticsOutput struct { _ struct{} `type:"structure"` @@ -2331,6 +3824,43 @@ func (s IdentityDkimAttributes) GoString() string { return s.String() } +// Represents the custom MAIL FROM domain attributes of a verified identity +// (email address or domain). +type IdentityMailFromDomainAttributes struct { + _ struct{} `type:"structure"` + + // The action that Amazon SES takes if it cannot successfully read the required + // MX record when you send an email. A value of UseDefaultValue indicates that + // if Amazon SES cannot read the required MX record, it uses amazonses.com (or + // a subdomain of that) as the MAIL FROM domain. A value of RejectMessage indicates + // that if Amazon SES cannot read the required MX record, Amazon SES returns + // a MailFromDomainNotVerified error and does not send the email. + // + // The custom MAIL FROM setup states that result in this behavior are Pending, + // Failed, and TemporaryFailure. + BehaviorOnMXFailure *string `type:"string" required:"true" enum:"BehaviorOnMXFailure"` + + // The custom MAIL FROM domain that the identity is configured to use. + MailFromDomain *string `type:"string" required:"true"` + + // The state that indicates whether Amazon SES has successfully read the MX + // record required for custom MAIL FROM domain setup. If the state is Success, + // Amazon SES uses the specified custom MAIL FROM domain when the verified identity + // sends an email. All other states indicate that Amazon SES takes the action + // described by BehaviorOnMXFailure. + MailFromDomainStatus *string `type:"string" required:"true" enum:"CustomMailFromStatus"` +} + +// String returns the string representation +func (s IdentityMailFromDomainAttributes) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IdentityMailFromDomainAttributes) GoString() string { + return s.String() +} + // Represents the notification attributes of an identity, including whether // an identity has Amazon Simple Notification Service (Amazon SNS) topics set // for bounce, complaint, and/or delivery notifications, and whether feedback @@ -2355,6 +3885,24 @@ type IdentityNotificationAttributes struct { // notifications as email, while false indicates that bounce and complaint notifications // will be published only to the specified bounce and complaint Amazon SNS topics. ForwardingEnabled *bool `type:"boolean" required:"true"` + + // Describes whether Amazon SES includes the original email headers in Amazon + // SNS notifications of type Bounce. A value of true specifies that Amazon SES + // will include headers in bounce notifications, and a value of false specifies + // that Amazon SES will not include headers in bounce notifications. + HeadersInBounceNotificationsEnabled *bool `type:"boolean"` + + // Describes whether Amazon SES includes the original email headers in Amazon + // SNS notifications of type Complaint. A value of true specifies that Amazon + // SES will include headers in complaint notifications, and a value of false + // specifies that Amazon SES will not include headers in complaint notifications. + HeadersInComplaintNotificationsEnabled *bool `type:"boolean"` + + // Describes whether Amazon SES includes the original email headers in Amazon + // SNS notifications of type Delivery. A value of true specifies that Amazon + // SES will include headers in delivery notifications, and a value of false + // specifies that Amazon SES will not include headers in delivery notifications. + HeadersInDeliveryNotificationsEnabled *bool `type:"boolean"` } // String returns the string representation @@ -2415,7 +3963,7 @@ type LambdaAction struct { // The default value is Event. For information about AWS Lambda invocation types, // see the AWS Lambda Developer Guide (http://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html). // - // There is a 30-second timeout on RequestResponse invocations. You should + // There is a 30-second timeout on RequestResponse invocations. You should // use Event invocation in most cases. Use RequestResponse only when you want // to make a mail flow decision, such as whether to stop the receipt rule or // the receipt rule set. @@ -2438,8 +3986,22 @@ func (s LambdaAction) GoString() string { return s.String() } -// Represents a request instructing the service to list all identities for the -// AWS Account. +// Validate inspects the fields of the type to determine if they are valid. +func (s *LambdaAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LambdaAction"} + if s.FunctionArn == nil { + invalidParams.Add(request.NewErrParamRequired("FunctionArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a request to return a list of all identities (email addresses +// and domains) that you have attempted to verify under your AWS account, regardless +// of verification status. type ListIdentitiesInput struct { _ struct{} `type:"structure"` @@ -2464,7 +4026,8 @@ func (s ListIdentitiesInput) GoString() string { return s.String() } -// Represents a list of all verified identities for the AWS Account. +// A list of all identities that you have attempted to verify under your AWS +// account, regardless of verification status. type ListIdentitiesOutput struct { _ struct{} `type:"structure"` @@ -2485,8 +4048,10 @@ func (s ListIdentitiesOutput) GoString() string { return s.String() } -// Represents a request instructing the service to list all authorization policies, -// by name, applying to an identity. +// Represents a request to return a list of sending authorization policies that +// are attached to an identity. Sending authorization is an Amazon SES feature +// that enables you to authorize other senders to use your identities. For information, +// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). type ListIdentityPoliciesInput struct { _ struct{} `type:"structure"` @@ -2508,8 +4073,20 @@ func (s ListIdentityPoliciesInput) GoString() string { return s.String() } -// Represents a list of policy names returned from a successful ListIdentityPolicies -// request. +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListIdentityPoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListIdentityPoliciesInput"} + if s.Identity == nil { + invalidParams.Add(request.NewErrParamRequired("Identity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A list of names of sending authorization policies that apply to an identity. type ListIdentityPoliciesOutput struct { _ struct{} `type:"structure"` @@ -2527,6 +4104,9 @@ func (s ListIdentityPoliciesOutput) GoString() string { return s.String() } +// : Represents a request to list the IP address filters that exist under your +// AWS account. You use IP address filters when you receive email with Amazon +// SES. For more information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). type ListReceiptFiltersInput struct { _ struct{} `type:"structure"` } @@ -2541,6 +4121,7 @@ func (s ListReceiptFiltersInput) GoString() string { return s.String() } +// A list of IP address filters that exist under your AWS account. type ListReceiptFiltersOutput struct { _ struct{} `type:"structure"` @@ -2559,6 +4140,9 @@ func (s ListReceiptFiltersOutput) GoString() string { return s.String() } +// Represents a request to list the receipt rule sets that exist under your +// AWS account. You use receipt rule sets to receive email with Amazon SES. +// For more information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). type ListReceiptRuleSetsInput struct { _ struct{} `type:"structure"` @@ -2577,6 +4161,7 @@ func (s ListReceiptRuleSetsInput) GoString() string { return s.String() } +// A list of receipt rule sets that exist under your AWS account. type ListReceiptRuleSetsOutput struct { _ struct{} `type:"structure"` @@ -2614,7 +4199,8 @@ func (s ListVerifiedEmailAddressesInput) GoString() string { return s.String() } -// Represents a list of all the email addresses verified for the current user. +// A list of email addresses that you have verified with Amazon SES under your +// AWS account. type ListVerifiedEmailAddressesOutput struct { _ struct{} `type:"structure"` @@ -2654,6 +4240,32 @@ func (s Message) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *Message) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Message"} + if s.Body == nil { + invalidParams.Add(request.NewErrParamRequired("Body")) + } + if s.Subject == nil { + invalidParams.Add(request.NewErrParamRequired("Subject")) + } + if s.Body != nil { + if err := s.Body.Validate(); err != nil { + invalidParams.AddNested("Body", err.(request.ErrInvalidParams)) + } + } + if s.Subject != nil { + if err := s.Subject.Validate(); err != nil { + invalidParams.AddNested("Subject", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // Message-related information to include in the Delivery Status Notification // (DSN) when an email that Amazon SES receives on your behalf bounces. // @@ -2685,8 +4297,33 @@ func (s MessageDsn) GoString() string { return s.String() } -// Represents a request instructing the service to apply an authorization policy -// to an identity. +// Validate inspects the fields of the type to determine if they are valid. +func (s *MessageDsn) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MessageDsn"} + if s.ReportingMta == nil { + invalidParams.Add(request.NewErrParamRequired("ReportingMta")) + } + if s.ExtensionFields != nil { + for i, v := range s.ExtensionFields { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ExtensionFields", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a request to add or update a sending authorization policy for +// an identity. Sending authorization is an Amazon SES feature that enables +// you to authorize other senders to use your identities. For information, see +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization.html). type PutIdentityPolicyInput struct { _ struct{} `type:"structure"` @@ -2720,8 +4357,32 @@ func (s PutIdentityPolicyInput) GoString() string { return s.String() } -// An empty element. Receiving this element indicates that the request completed -// successfully. +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutIdentityPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutIdentityPolicyInput"} + if s.Identity == nil { + invalidParams.Add(request.NewErrParamRequired("Identity")) + } + if s.Policy == nil { + invalidParams.Add(request.NewErrParamRequired("Policy")) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. type PutIdentityPolicyOutput struct { _ struct{} `type:"structure"` } @@ -2750,9 +4411,12 @@ type RawMessage struct { // X-headers in the raw message to specify the "Source," "From," and "Return-Path" // addresses. For more information, see the documentation for SendRawEmail. // - // Do not include these X-headers in the DKIM signature, because they are removed - // by Amazon SES before sending the email. For more information, go to the Amazon - // SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-raw.html). + // Do not include these X-headers in the DKIM signature, because they are + // removed by Amazon SES before sending the email. + // + // For more information, go to the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-raw.html). + // + // Data is automatically base64 encoded/decoded by the SDK. Data []byte `type:"blob" required:"true"` } @@ -2766,6 +4430,19 @@ func (s RawMessage) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *RawMessage) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RawMessage"} + if s.Data == nil { + invalidParams.Add(request.NewErrParamRequired("Data")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // An action that Amazon SES can take when it receives an email on behalf of // one or more email addresses or domains that you own. An instance of this // data type can represent only one action. @@ -2813,6 +4490,51 @@ func (s ReceiptAction) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReceiptAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReceiptAction"} + if s.AddHeaderAction != nil { + if err := s.AddHeaderAction.Validate(); err != nil { + invalidParams.AddNested("AddHeaderAction", err.(request.ErrInvalidParams)) + } + } + if s.BounceAction != nil { + if err := s.BounceAction.Validate(); err != nil { + invalidParams.AddNested("BounceAction", err.(request.ErrInvalidParams)) + } + } + if s.LambdaAction != nil { + if err := s.LambdaAction.Validate(); err != nil { + invalidParams.AddNested("LambdaAction", err.(request.ErrInvalidParams)) + } + } + if s.S3Action != nil { + if err := s.S3Action.Validate(); err != nil { + invalidParams.AddNested("S3Action", err.(request.ErrInvalidParams)) + } + } + if s.SNSAction != nil { + if err := s.SNSAction.Validate(); err != nil { + invalidParams.AddNested("SNSAction", err.(request.ErrInvalidParams)) + } + } + if s.StopAction != nil { + if err := s.StopAction.Validate(); err != nil { + invalidParams.AddNested("StopAction", err.(request.ErrInvalidParams)) + } + } + if s.WorkmailAction != nil { + if err := s.WorkmailAction.Validate(); err != nil { + invalidParams.AddNested("WorkmailAction", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // A receipt IP address filter enables you to specify whether to accept or reject // mail originating from an IP address or range of IP addresses. // @@ -2827,9 +4549,12 @@ type ReceiptFilter struct { // The name of the IP address filter. The name must: // - // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores - // (_), or dashes (-). Start and end with a letter or number. Contain less than - // 64 characters. + // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores + // (_), or dashes (-). + // + // Start and end with a letter or number. + // + // Contain less than 64 characters. Name *string `type:"string" required:"true"` } @@ -2843,6 +4568,27 @@ func (s ReceiptFilter) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReceiptFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReceiptFilter"} + if s.IpFilter == nil { + invalidParams.Add(request.NewErrParamRequired("IpFilter")) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.IpFilter != nil { + if err := s.IpFilter.Validate(); err != nil { + invalidParams.AddNested("IpFilter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // A receipt IP address filter enables you to specify whether to accept or reject // mail originating from an IP address or range of IP addresses. // @@ -2871,6 +4617,22 @@ func (s ReceiptIpFilter) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReceiptIpFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReceiptIpFilter"} + if s.Cidr == nil { + invalidParams.Add(request.NewErrParamRequired("Cidr")) + } + if s.Policy == nil { + invalidParams.Add(request.NewErrParamRequired("Policy")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // Receipt rules enable you to specify which actions Amazon SES should take // when it receives mail on behalf of one or more email addresses or domains // that you own. @@ -2889,14 +4651,17 @@ type ReceiptRule struct { // of the recipient email addresses or domains specified in the receipt rule. Actions []*ReceiptAction `type:"list"` - // If true, the receipt rule is active. The default value is true. + // If true, the receipt rule is active. The default value is false. Enabled *bool `type:"boolean"` // The name of the receipt rule. The name must: // - // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores - // (_), or dashes (-). Start and end with a letter or number. Contain less than - // 64 characters. + // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores + // (_), or dashes (-). + // + // Start and end with a letter or number. + // + // Contain less than 64 characters. Name *string `type:"string" required:"true"` // The recipient domains and email addresses to which the receipt rule applies. @@ -2905,7 +4670,7 @@ type ReceiptRule struct { Recipients []*string `type:"list"` // If true, then messages to which this receipt rule applies are scanned for - // spam and viruses. The default value is true. + // spam and viruses. The default value is false. ScanEnabled *bool `type:"boolean"` // Specifies whether Amazon SES should require that incoming email is delivered @@ -2925,6 +4690,29 @@ func (s ReceiptRule) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReceiptRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReceiptRule"} + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Actions != nil { + for i, v := range s.Actions { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Actions", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // Information about a receipt rule set. // // A receipt rule set is a collection of rules that specify what Amazon SES @@ -2940,9 +4728,12 @@ type ReceiptRuleSetMetadata struct { // The name of the receipt rule set. The name must: // - // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores - // (_), or dashes (-). Start and end with a letter or number. Contain less than - // 64 characters. + // Contain only ASCII letters (a-z, A-Z), numbers (0-9), periods (.), underscores + // (_), or dashes (-). + // + // Start and end with a letter or number. + // + // Contain less than 64 characters. Name *string `type:"string"` } @@ -2983,7 +4774,7 @@ type RecipientDsnFields struct { // Either FinalRecipient or the recipient in BouncedRecipientInfo must be a // recipient of the original bounced message. // - // Do not prepend the FinalRecipient email address with rfc 822;, as described + // Do not prepend the FinalRecipient email address with rfc 822;, as described // in RFC 3798 (https://tools.ietf.org/html/rfc3798). FinalRecipient *string `type:"string"` @@ -3012,6 +4803,35 @@ func (s RecipientDsnFields) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *RecipientDsnFields) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RecipientDsnFields"} + if s.Action == nil { + invalidParams.Add(request.NewErrParamRequired("Action")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.ExtensionFields != nil { + for i, v := range s.ExtensionFields { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ExtensionFields", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a request to reorder the receipt rules within a receipt rule set. +// You use receipt rule sets to receive email with Amazon SES. For more information, +// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). type ReorderReceiptRuleSetInput struct { _ struct{} `type:"structure"` @@ -3033,6 +4853,23 @@ func (s ReorderReceiptRuleSetInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReorderReceiptRuleSetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReorderReceiptRuleSetInput"} + if s.RuleNames == nil { + invalidParams.Add(request.NewErrParamRequired("RuleNames")) + } + if s.RuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. type ReorderReceiptRuleSetOutput struct { _ struct{} `type:"structure"` } @@ -3056,10 +4893,11 @@ func (s ReorderReceiptRuleSetOutput) GoString() string { // account, Amazon SES must have permission to access those resources. For information // about giving permissions, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). // -// When you save your emails to an Amazon S3 bucket, the maximum email size -// (including headers) is 30 MB. Emails larger than that will bounce. For information -// about specifying Amazon S3 actions in receipt rules, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-s3.html). +// When you save your emails to an Amazon S3 bucket, the maximum email size +// (including headers) is 30 MB. Emails larger than that will bounce. +// +// For information about specifying Amazon S3 actions in receipt rules, see +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-s3.html). type S3Action struct { _ struct{} `type:"structure"` @@ -3070,27 +4908,29 @@ type S3Action struct { // before saving them to the Amazon S3 bucket. You can use the default master // key or a custom master key you created in AWS KMS as follows: // - // To use the default master key, provide an ARN in the form of arn:aws:kms:REGION:ACCOUNT-ID-WITHOUT-HYPHENS:alias/aws/ses. + // To use the default master key, provide an ARN in the form of arn:aws:kms:REGION:ACCOUNT-ID-WITHOUT-HYPHENS:alias/aws/ses. // For example, if your AWS account ID is 123456789012 and you want to use the // default master key in the US West (Oregon) region, the ARN of the default // master key would be arn:aws:kms:us-west-2:123456789012:alias/aws/ses. If // you use the default master key, you don't need to perform any extra steps - // to give Amazon SES permission to use the key. To use a custom master key - // you created in AWS KMS, provide the ARN of the master key and ensure that - // you add a statement to your key's policy to give Amazon SES permission to - // use it. For more information about giving permissions, see the Amazon SES - // Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). - // For more information about key policies, see the AWS KMS Developer Guide + // to give Amazon SES permission to use the key. + // + // To use a custom master key you created in AWS KMS, provide the ARN of + // the master key and ensure that you add a statement to your key's policy to + // give Amazon SES permission to use it. For more information about giving permissions, + // see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). + // + // For more information about key policies, see the AWS KMS Developer Guide // (http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html). If // you do not specify a master key, Amazon SES will not encrypt your emails. // - // Your mail is encrypted by Amazon SES using the Amazon S3 encryption client + // Your mail is encrypted by Amazon SES using the Amazon S3 encryption client // before the mail is submitted to Amazon S3 for storage. It is not encrypted // using Amazon S3 server-side encryption. This means that you must use the // Amazon S3 encryption client to decrypt the email after retrieving it from // Amazon S3, as the service has no access to use your AWS KMS keys for decryption. - // This encryption client is currently available with the AWS Java SDK (https://aws.amazon.com/sdk-for-java/) - // and AWS Ruby SDK (https://aws.amazon.com/sdk-for-ruby/) only. For more information + // This encryption client is currently available with the AWS Java SDK (http://aws.amazon.com/sdk-for-java/) + // and AWS Ruby SDK (http://aws.amazon.com/sdk-for-ruby/) only. For more information // about client-side encryption using AWS KMS master keys, see the Amazon S3 // Developer Guide (http://alpha-docs-aws.amazon.com/AmazonS3/latest/dev/UsingClientSideEncryption.html). KmsKeyArn *string `type:"string"` @@ -3117,6 +4957,19 @@ func (s S3Action) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *S3Action) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "S3Action"} + if s.BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("BucketName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // When included in a receipt rule, this action publishes a notification to // Amazon Simple Notification Service (Amazon SNS). This action includes a complete // copy of the email content in the Amazon SNS notifications. Amazon SNS notifications @@ -3129,14 +4982,21 @@ func (s S3Action) GoString() string { // to access it. For information about giving permissions, see the Amazon SES // Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-permissions.html). // -// You can only publish emails that are 150 KB or less (including the header) +// You can only publish emails that are 150 KB or less (including the header) // to Amazon SNS. Larger emails will bounce. If you anticipate emails larger -// than 150 KB, use the S3 action instead. For information about using a receipt -// rule to publish an Amazon SNS notification, see the Amazon SES Developer -// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-sns.html). +// than 150 KB, use the S3 action instead. +// +// For information about using a receipt rule to publish an Amazon SNS notification, +// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-action-sns.html). type SNSAction struct { _ struct{} `type:"structure"` + // The encoding to use for the email within the Amazon SNS notification. UTF-8 + // is easier to use, but may not preserve all special characters when a message + // was encoded with a different encoding format. Base64 preserves all special + // characters. The default value is UTF-8. + Encoding *string `type:"string" enum:"SNSActionEncoding"` + // The Amazon Resource Name (ARN) of the Amazon SNS topic to notify. An example // of an Amazon SNS topic ARN is arn:aws:sns:us-west-2:123456789012:MyTopic. // For more information about Amazon SNS topics, see the Amazon SNS Developer @@ -3154,8 +5014,21 @@ func (s SNSAction) GoString() string { return s.String() } -// Request object for sending a simple/complex bounce. It contains all of the -// information needed to generate a basic DSN or a fully-customized DSN. +// Validate inspects the fields of the type to determine if they are valid. +func (s *SNSAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SNSAction"} + if s.TopicArn == nil { + invalidParams.Add(request.NewErrParamRequired("TopicArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a request to send a bounce message to the sender of an email you +// received through Amazon SES. type SendBounceInput struct { _ struct{} `type:"structure"` @@ -3197,6 +5070,41 @@ func (s SendBounceInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *SendBounceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SendBounceInput"} + if s.BounceSender == nil { + invalidParams.Add(request.NewErrParamRequired("BounceSender")) + } + if s.BouncedRecipientInfoList == nil { + invalidParams.Add(request.NewErrParamRequired("BouncedRecipientInfoList")) + } + if s.OriginalMessageId == nil { + invalidParams.Add(request.NewErrParamRequired("OriginalMessageId")) + } + if s.BouncedRecipientInfoList != nil { + for i, v := range s.BouncedRecipientInfoList { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "BouncedRecipientInfoList", i), err.(request.ErrInvalidParams)) + } + } + } + if s.MessageDsn != nil { + if err := s.MessageDsn.Validate(); err != nil { + invalidParams.AddNested("MessageDsn", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a unique message ID. type SendBounceOutput struct { _ struct{} `type:"structure"` @@ -3245,11 +5153,8 @@ func (s SendDataPoint) GoString() string { return s.String() } -// Represents a request instructing the service to send a single email message. -// -// This datatype can be used in application code to compose a message consisting -// of source, destination, message, reply-to, and return-path parts. This object -// can then be sent using the SendEmail action. +// Represents a request to send a single formatted email using Amazon SES. For +// more information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-formatted.html). type SendEmailInput struct { _ struct{} `type:"structure"` @@ -3326,7 +5231,31 @@ func (s SendEmailInput) GoString() string { return s.String() } -// Represents a unique message ID returned from a successful SendEmail request. +// Validate inspects the fields of the type to determine if they are valid. +func (s *SendEmailInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SendEmailInput"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Message == nil { + invalidParams.Add(request.NewErrParamRequired("Message")) + } + if s.Source == nil { + invalidParams.Add(request.NewErrParamRequired("Source")) + } + if s.Message != nil { + if err := s.Message.Validate(); err != nil { + invalidParams.AddNested("Message", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a unique message ID. type SendEmailOutput struct { _ struct{} `type:"structure"` @@ -3344,11 +5273,8 @@ func (s SendEmailOutput) GoString() string { return s.String() } -// Represents a request instructing the service to send a raw email message. -// -// This datatype can be used in application code to compose a message consisting -// of source, destination, and raw message text. This object can then be sent -// using the SendRawEmail action. +// Represents a request to send a single raw email using Amazon SES. For more +// information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/send-email-raw.html). type SendRawEmailInput struct { _ struct{} `type:"structure"` @@ -3364,18 +5290,22 @@ type SendRawEmailInput struct { // in the raw message of the email. If you use both the FromArn parameter and // the corresponding X-header, Amazon SES uses the value of the FromArn parameter. // - // For information about when to use this parameter, see the description of + // For information about when to use this parameter, see the description of // SendRawEmail in this guide, or see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-delegate-sender-tasks-email.html). FromArn *string `type:"string"` // The raw text of the message. The client is responsible for ensuring the following: // - // Message must contain a header and a body, separated by a blank line. All - // required header fields must be present. Each part of a multipart MIME message - // must be formatted properly. MIME content types must be among those supported - // by Amazon SES. For more information, go to the Amazon SES Developer Guide - // (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mime-types.html). - // Content must be base64-encoded, if MIME requires it. + // Message must contain a header and a body, separated by a blank line. + // + // All required header fields must be present. + // + // Each part of a multipart MIME message must be formatted properly. + // + // MIME content types must be among those supported by Amazon SES. For more + // information, go to the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mime-types.html). + // + // Content must be base64-encoded, if MIME requires it. RawMessage *RawMessage `type:"structure" required:"true"` // This parameter is used only for sending authorization. It is the ARN of the @@ -3392,7 +5322,7 @@ type SendRawEmailInput struct { // and the corresponding X-header, Amazon SES uses the value of the ReturnPathArn // parameter. // - // For information about when to use this parameter, see the description of + // For information about when to use this parameter, see the description of // SendRawEmail in this guide, or see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-delegate-sender-tasks-email.html). ReturnPathArn *string `type:"string"` @@ -3405,7 +5335,7 @@ type SendRawEmailInput struct { // of a literal string. MIME encoded-word syntax uses the following form: =?charset?encoding?encoded-text?=. // For more information, see RFC 2047 (http://tools.ietf.org/html/rfc2047). // - // If you specify the Source parameter and have feedback forwarding enabled, + // If you specify the Source parameter and have feedback forwarding enabled, // then bounces and complaints will be sent to this email address. This takes // precedence over any Return-Path header that you might include in the raw // text of the message. @@ -3425,7 +5355,7 @@ type SendRawEmailInput struct { // and the corresponding X-header, Amazon SES uses the value of the SourceArn // parameter. // - // For information about when to use this parameter, see the description of + // For information about when to use this parameter, see the description of // SendRawEmail in this guide, or see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/sending-authorization-delegate-sender-tasks-email.html). SourceArn *string `type:"string"` } @@ -3440,7 +5370,25 @@ func (s SendRawEmailInput) GoString() string { return s.String() } -// Represents a unique message ID returned from a successful SendRawEmail request. +// Validate inspects the fields of the type to determine if they are valid. +func (s *SendRawEmailInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SendRawEmailInput"} + if s.RawMessage == nil { + invalidParams.Add(request.NewErrParamRequired("RawMessage")) + } + if s.RawMessage != nil { + if err := s.RawMessage.Validate(); err != nil { + invalidParams.AddNested("RawMessage", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a unique message ID. type SendRawEmailOutput struct { _ struct{} `type:"structure"` @@ -3458,6 +5406,9 @@ func (s SendRawEmailOutput) GoString() string { return s.String() } +// Represents a request to set a receipt rule set as the active receipt rule +// set. You use receipt rule sets to receive email with Amazon SES. For more +// information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). type SetActiveReceiptRuleSetInput struct { _ struct{} `type:"structure"` @@ -3476,6 +5427,7 @@ func (s SetActiveReceiptRuleSetInput) GoString() string { return s.String() } +// An empty element returned on a successful request. type SetActiveReceiptRuleSetOutput struct { _ struct{} `type:"structure"` } @@ -3490,8 +5442,9 @@ func (s SetActiveReceiptRuleSetOutput) GoString() string { return s.String() } -// Represents a request instructing the service to enable or disable DKIM signing -// for an identity. +// Represents a request to enable or disable Amazon SES Easy DKIM signing for +// an identity. For more information about setting up Easy DKIM, see the Amazon +// SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim.html). type SetIdentityDkimEnabledInput struct { _ struct{} `type:"structure"` @@ -3513,8 +5466,23 @@ func (s SetIdentityDkimEnabledInput) GoString() string { return s.String() } -// An empty element. Receiving this element indicates that the request completed -// successfully. +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetIdentityDkimEnabledInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetIdentityDkimEnabledInput"} + if s.DkimEnabled == nil { + invalidParams.Add(request.NewErrParamRequired("DkimEnabled")) + } + if s.Identity == nil { + invalidParams.Add(request.NewErrParamRequired("Identity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. type SetIdentityDkimEnabledOutput struct { _ struct{} `type:"structure"` } @@ -3529,6 +5497,9 @@ func (s SetIdentityDkimEnabledOutput) GoString() string { return s.String() } +// Represents a request to enable or disable whether Amazon SES forwards you +// bounce and complaint notifications through email. For information about email +// feedback forwarding, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications-via-email.html). type SetIdentityFeedbackForwardingEnabledInput struct { _ struct{} `type:"structure"` @@ -3555,8 +5526,23 @@ func (s SetIdentityFeedbackForwardingEnabledInput) GoString() string { return s.String() } -// An empty element. Receiving this element indicates that the request completed -// successfully. +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetIdentityFeedbackForwardingEnabledInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetIdentityFeedbackForwardingEnabledInput"} + if s.ForwardingEnabled == nil { + invalidParams.Add(request.NewErrParamRequired("ForwardingEnabled")) + } + if s.Identity == nil { + invalidParams.Add(request.NewErrParamRequired("Identity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. type SetIdentityFeedbackForwardingEnabledOutput struct { _ struct{} `type:"structure"` } @@ -3571,7 +5557,145 @@ func (s SetIdentityFeedbackForwardingEnabledOutput) GoString() string { return s.String() } -// Represents a request to set or clear an identity's notification topic. +// Represents a request to set whether Amazon SES includes the original email +// headers in the Amazon SNS notifications of a specified type. For information +// about notifications, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications-via-sns.html). +type SetIdentityHeadersInNotificationsEnabledInput struct { + _ struct{} `type:"structure"` + + // Sets whether Amazon SES includes the original email headers in Amazon SNS + // notifications of the specified notification type. A value of true specifies + // that Amazon SES will include headers in notifications, and a value of false + // specifies that Amazon SES will not include headers in notifications. + // + // This value can only be set when NotificationType is already set to use a + // particular Amazon SNS topic. + Enabled *bool `type:"boolean" required:"true"` + + // The identity for which to enable or disable headers in notifications. Examples: + // user@example.com, example.com. + Identity *string `type:"string" required:"true"` + + // The notification type for which to enable or disable headers in notifications. + NotificationType *string `type:"string" required:"true" enum:"NotificationType"` +} + +// String returns the string representation +func (s SetIdentityHeadersInNotificationsEnabledInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityHeadersInNotificationsEnabledInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetIdentityHeadersInNotificationsEnabledInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetIdentityHeadersInNotificationsEnabledInput"} + if s.Enabled == nil { + invalidParams.Add(request.NewErrParamRequired("Enabled")) + } + if s.Identity == nil { + invalidParams.Add(request.NewErrParamRequired("Identity")) + } + if s.NotificationType == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. +type SetIdentityHeadersInNotificationsEnabledOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetIdentityHeadersInNotificationsEnabledOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityHeadersInNotificationsEnabledOutput) GoString() string { + return s.String() +} + +// Represents a request to enable or disable the Amazon SES custom MAIL FROM +// domain setup for a verified identity. For information about using a custom +// MAIL FROM domain, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from.html). +type SetIdentityMailFromDomainInput struct { + _ struct{} `type:"structure"` + + // The action that you want Amazon SES to take if it cannot successfully read + // the required MX record when you send an email. If you choose UseDefaultValue, + // Amazon SES will use amazonses.com (or a subdomain of that) as the MAIL FROM + // domain. If you choose RejectMessage, Amazon SES will return a MailFromDomainNotVerified + // error and not send the email. + // + // The action specified in BehaviorOnMXFailure is taken when the custom MAIL + // FROM domain setup is in the Pending, Failed, and TemporaryFailure states. + BehaviorOnMXFailure *string `type:"string" enum:"BehaviorOnMXFailure"` + + // The verified identity for which you want to enable or disable the specified + // custom MAIL FROM domain. + Identity *string `type:"string" required:"true"` + + // The custom MAIL FROM domain that you want the verified identity to use. The + // MAIL FROM domain must 1) be a subdomain of the verified identity, 2) not + // be used in a "From" address if the MAIL FROM domain is the destination of + // email feedback forwarding (for more information, see the Amazon SES Developer + // Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mail-from.html)), + // and 3) not be used to receive emails. A value of null disables the custom + // MAIL FROM setting for the identity. + MailFromDomain *string `type:"string"` +} + +// String returns the string representation +func (s SetIdentityMailFromDomainInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityMailFromDomainInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetIdentityMailFromDomainInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetIdentityMailFromDomainInput"} + if s.Identity == nil { + invalidParams.Add(request.NewErrParamRequired("Identity")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. +type SetIdentityMailFromDomainOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetIdentityMailFromDomainOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetIdentityMailFromDomainOutput) GoString() string { + return s.String() +} + +// Represents a request to specify the Amazon SNS topic to which Amazon SES +// will publish bounce, complaint, or delivery notifications for emails sent +// with that identity as the Source. For information about Amazon SES notifications, +// see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/notifications-via-sns.html). type SetIdentityNotificationTopicInput struct { _ struct{} `type:"structure"` @@ -3600,8 +5724,23 @@ func (s SetIdentityNotificationTopicInput) GoString() string { return s.String() } -// An empty element. Receiving this element indicates that the request completed -// successfully. +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetIdentityNotificationTopicInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetIdentityNotificationTopicInput"} + if s.Identity == nil { + invalidParams.Add(request.NewErrParamRequired("Identity")) + } + if s.NotificationType == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationType")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. type SetIdentityNotificationTopicOutput struct { _ struct{} `type:"structure"` } @@ -3616,6 +5755,9 @@ func (s SetIdentityNotificationTopicOutput) GoString() string { return s.String() } +// Represents a request to set the position of a receipt rule in a receipt rule +// set. You use receipt rule sets to receive email with Amazon SES. For more +// information, see the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). type SetReceiptRulePositionInput struct { _ struct{} `type:"structure"` @@ -3639,6 +5781,23 @@ func (s SetReceiptRulePositionInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetReceiptRulePositionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetReceiptRulePositionInput"} + if s.RuleName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleName")) + } + if s.RuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. type SetReceiptRulePositionOutput struct { _ struct{} `type:"structure"` } @@ -3682,6 +5841,22 @@ func (s StopAction) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *StopAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StopAction"} + if s.Scope == nil { + invalidParams.Add(request.NewErrParamRequired("Scope")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Represents a request to update a receipt rule. You use receipt rules to receive +// email with Amazon SES. For more information, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-concepts.html). type UpdateReceiptRuleInput struct { _ struct{} `type:"structure"` @@ -3702,6 +5877,28 @@ func (s UpdateReceiptRuleInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateReceiptRuleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UpdateReceiptRuleInput"} + if s.Rule == nil { + invalidParams.Add(request.NewErrParamRequired("Rule")) + } + if s.RuleSetName == nil { + invalidParams.Add(request.NewErrParamRequired("RuleSetName")) + } + if s.Rule != nil { + if err := s.Rule.Validate(); err != nil { + invalidParams.AddNested("Rule", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. type UpdateReceiptRuleOutput struct { _ struct{} `type:"structure"` } @@ -3716,8 +5913,9 @@ func (s UpdateReceiptRuleOutput) GoString() string { return s.String() } -// Represents a request instructing the service to begin DKIM verification for -// a domain. +// Represents a request to generate the CNAME records needed to set up Easy +// DKIM with Amazon SES. For more information about setting up Easy DKIM, see +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/easy-dkim.html). type VerifyDomainDkimInput struct { _ struct{} `type:"structure"` @@ -3735,8 +5933,21 @@ func (s VerifyDomainDkimInput) GoString() string { return s.String() } -// Represents the DNS records that must be published in the domain name's DNS -// to complete DKIM setup. +// Validate inspects the fields of the type to determine if they are valid. +func (s *VerifyDomainDkimInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VerifyDomainDkimInput"} + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Returns CNAME records that you must publish to the DNS server of your domain +// to set up Easy DKIM with Amazon SES. type VerifyDomainDkimOutput struct { _ struct{} `type:"structure"` @@ -3764,7 +5975,10 @@ func (s VerifyDomainDkimOutput) GoString() string { return s.String() } -// Represents a request instructing the service to begin domain verification. +// Represents a request to begin Amazon SES domain verification and to generate +// the TXT records that you must publish to the DNS server of your domain to +// complete the verification. For information about domain verification, see +// the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-domains.html). type VerifyDomainIdentityInput struct { _ struct{} `type:"structure"` @@ -3782,7 +5996,21 @@ func (s VerifyDomainIdentityInput) GoString() string { return s.String() } -// Represents a token used for domain ownership verification. +// Validate inspects the fields of the type to determine if they are valid. +func (s *VerifyDomainIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VerifyDomainIdentityInput"} + if s.Domain == nil { + invalidParams.Add(request.NewErrParamRequired("Domain")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Returns a TXT record that you must publish to the DNS server of your domain +// to complete domain verification with Amazon SES. type VerifyDomainIdentityOutput struct { _ struct{} `type:"structure"` @@ -3801,7 +6029,9 @@ func (s VerifyDomainIdentityOutput) GoString() string { return s.String() } -// Represents a request instructing the service to begin email address verification. +// Represents a request to begin email address verification with Amazon SES. +// For information about email address verification, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-email-addresses.html). type VerifyEmailAddressInput struct { _ struct{} `type:"structure"` @@ -3819,6 +6049,19 @@ func (s VerifyEmailAddressInput) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *VerifyEmailAddressInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VerifyEmailAddressInput"} + if s.EmailAddress == nil { + invalidParams.Add(request.NewErrParamRequired("EmailAddress")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + type VerifyEmailAddressOutput struct { _ struct{} `type:"structure"` } @@ -3833,7 +6076,9 @@ func (s VerifyEmailAddressOutput) GoString() string { return s.String() } -// Represents a request instructing the service to begin email address verification. +// Represents a request to begin email address verification with Amazon SES. +// For information about email address verification, see the Amazon SES Developer +// Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-email-addresses.html). type VerifyEmailIdentityInput struct { _ struct{} `type:"structure"` @@ -3851,8 +6096,20 @@ func (s VerifyEmailIdentityInput) GoString() string { return s.String() } -// An empty element. Receiving this element indicates that the request completed -// successfully. +// Validate inspects the fields of the type to determine if they are valid. +func (s *VerifyEmailIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "VerifyEmailIdentityInput"} + if s.EmailAddress == nil { + invalidParams.Add(request.NewErrParamRequired("EmailAddress")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An empty element returned on a successful request. type VerifyEmailIdentityOutput struct { _ struct{} `type:"structure"` } @@ -3900,6 +6157,26 @@ func (s WorkmailAction) GoString() string { return s.String() } +// Validate inspects the fields of the type to determine if they are valid. +func (s *WorkmailAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WorkmailAction"} + if s.OrganizationArn == nil { + invalidParams.Add(request.NewErrParamRequired("OrganizationArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +const ( + // @enum BehaviorOnMXFailure + BehaviorOnMXFailureUseDefaultValue = "UseDefaultValue" + // @enum BehaviorOnMXFailure + BehaviorOnMXFailureRejectMessage = "RejectMessage" +) + const ( // @enum BounceType BounceTypeDoesNotExist = "DoesNotExist" @@ -3915,6 +6192,17 @@ const ( BounceTypeTemporaryFailure = "TemporaryFailure" ) +const ( + // @enum CustomMailFromStatus + CustomMailFromStatusPending = "Pending" + // @enum CustomMailFromStatus + CustomMailFromStatusSuccess = "Success" + // @enum CustomMailFromStatus + CustomMailFromStatusFailed = "Failed" + // @enum CustomMailFromStatus + CustomMailFromStatusTemporaryFailure = "TemporaryFailure" +) + const ( // @enum DsnAction DsnActionFailed = "failed" @@ -3958,6 +6246,13 @@ const ( ReceiptFilterPolicyAllow = "Allow" ) +const ( + // @enum SNSActionEncoding + SNSActionEncodingUtf8 = "UTF-8" + // @enum SNSActionEncoding + SNSActionEncodingBase64 = "Base64" +) + const ( // @enum StopScope StopScopeRuleSet = "RuleSet" diff --git a/vendor/github.com/aws/aws-sdk-go/service/ses/service.go b/vendor/github.com/aws/aws-sdk-go/service/ses/service.go index 8f721db6d..2952ac131 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ses/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ses/service.go @@ -7,15 +7,15 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/query" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // This is the API Reference for Amazon Simple Email Service (Amazon SES). This // documentation is intended to be used in conjunction with the Amazon SES Developer // Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/Welcome.html). // -// For a list of Amazon SES endpoints to use in service requests, see Regions +// For a list of Amazon SES endpoints to use in service requests, see Regions // and Amazon SES (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/regions.html) // in the Amazon SES Developer Guide. //The service client's operations are safe to be used concurrently. @@ -65,7 +65,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(query.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sns/api.go b/vendor/github.com/aws/aws-sdk-go/service/sns/api.go index 51f2f35cb..5f0be867c 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sns/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sns/api.go @@ -14,7 +14,28 @@ import ( const opAddPermission = "AddPermission" -// AddPermissionRequest generates a request for the AddPermission operation. +// AddPermissionRequest generates a "aws/request.Request" representing the +// client's request for the AddPermission operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddPermission method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddPermissionRequest method. +// req, resp := client.AddPermissionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SNS) AddPermissionRequest(input *AddPermissionInput) (req *request.Request, output *AddPermissionOutput) { op := &request.Operation{ Name: opAddPermission, @@ -42,9 +63,83 @@ func (c *SNS) AddPermission(input *AddPermissionInput) (*AddPermissionOutput, er return out, err } +const opCheckIfPhoneNumberIsOptedOut = "CheckIfPhoneNumberIsOptedOut" + +// CheckIfPhoneNumberIsOptedOutRequest generates a "aws/request.Request" representing the +// client's request for the CheckIfPhoneNumberIsOptedOut operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CheckIfPhoneNumberIsOptedOut method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CheckIfPhoneNumberIsOptedOutRequest method. +// req, resp := client.CheckIfPhoneNumberIsOptedOutRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) CheckIfPhoneNumberIsOptedOutRequest(input *CheckIfPhoneNumberIsOptedOutInput) (req *request.Request, output *CheckIfPhoneNumberIsOptedOutOutput) { + op := &request.Operation{ + Name: opCheckIfPhoneNumberIsOptedOut, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CheckIfPhoneNumberIsOptedOutInput{} + } + + req = c.newRequest(op, input, output) + output = &CheckIfPhoneNumberIsOptedOutOutput{} + req.Data = output + return +} + +// Accepts a phone number and indicates whether the phone holder has opted out +// of receiving SMS messages from your account. You cannot send SMS messages +// to a number that is opted out. +// +// To resume sending messages, you can opt in the number by using the OptInPhoneNumber +// action. +func (c *SNS) CheckIfPhoneNumberIsOptedOut(input *CheckIfPhoneNumberIsOptedOutInput) (*CheckIfPhoneNumberIsOptedOutOutput, error) { + req, out := c.CheckIfPhoneNumberIsOptedOutRequest(input) + err := req.Send() + return out, err +} + const opConfirmSubscription = "ConfirmSubscription" -// ConfirmSubscriptionRequest generates a request for the ConfirmSubscription operation. +// ConfirmSubscriptionRequest generates a "aws/request.Request" representing the +// client's request for the ConfirmSubscription operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ConfirmSubscription method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ConfirmSubscriptionRequest method. +// req, resp := client.ConfirmSubscriptionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SNS) ConfirmSubscriptionRequest(input *ConfirmSubscriptionInput) (req *request.Request, output *ConfirmSubscriptionOutput) { op := &request.Operation{ Name: opConfirmSubscription, @@ -75,7 +170,28 @@ func (c *SNS) ConfirmSubscription(input *ConfirmSubscriptionInput) (*ConfirmSubs const opCreatePlatformApplication = "CreatePlatformApplication" -// CreatePlatformApplicationRequest generates a request for the CreatePlatformApplication operation. +// CreatePlatformApplicationRequest generates a "aws/request.Request" representing the +// client's request for the CreatePlatformApplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreatePlatformApplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreatePlatformApplicationRequest method. +// req, resp := client.CreatePlatformApplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SNS) CreatePlatformApplicationRequest(input *CreatePlatformApplicationInput) (req *request.Request, output *CreatePlatformApplicationOutput) { op := &request.Operation{ Name: opCreatePlatformApplication, @@ -100,12 +216,25 @@ func (c *SNS) CreatePlatformApplicationRequest(input *CreatePlatformApplicationI // from the notification service. For APNS/APNS_SANDBOX, PlatformPrincipal is // "SSL certificate". For GCM, PlatformPrincipal is not applicable. For ADM, // PlatformPrincipal is "client id". The PlatformCredential is also received -// from the notification service. For APNS/APNS_SANDBOX, PlatformCredential -// is "private key". For GCM, PlatformCredential is "API key". For ADM, PlatformCredential -// is "client secret". The PlatformApplicationArn that is returned when using -// CreatePlatformApplication is then used as an attribute for the CreatePlatformEndpoint -// action. For more information, see Using Amazon SNS Mobile Push Notifications -// (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +// from the notification service. For WNS, PlatformPrincipal is "Package Security +// Identifier". For MPNS, PlatformPrincipal is "TLS certificate". For Baidu, +// PlatformPrincipal is "API key". +// +// For APNS/APNS_SANDBOX, PlatformCredential is "private key". For GCM, PlatformCredential +// is "API key". For ADM, PlatformCredential is "client secret". For WNS, PlatformCredential +// is "secret key". For MPNS, PlatformCredential is "private key". For Baidu, +// PlatformCredential is "secret key". The PlatformApplicationArn that is returned +// when using CreatePlatformApplication is then used as an attribute for the +// CreatePlatformEndpoint action. For more information, see Using Amazon SNS +// Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +// For more information about obtaining the PlatformPrincipal and PlatformCredential +// for each of the supported push notification services, see Getting Started +// with Apple Push Notification Service (http://docs.aws.amazon.com/sns/latest/dg/mobile-push-apns.html), +// Getting Started with Amazon Device Messaging (http://docs.aws.amazon.com/sns/latest/dg/mobile-push-adm.html), +// Getting Started with Baidu Cloud Push (http://docs.aws.amazon.com/sns/latest/dg/mobile-push-baidu.html), +// Getting Started with Google Cloud Messaging for Android (http://docs.aws.amazon.com/sns/latest/dg/mobile-push-gcm.html), +// Getting Started with MPNS (http://docs.aws.amazon.com/sns/latest/dg/mobile-push-mpns.html), +// or Getting Started with WNS (http://docs.aws.amazon.com/sns/latest/dg/mobile-push-wns.html). func (c *SNS) CreatePlatformApplication(input *CreatePlatformApplicationInput) (*CreatePlatformApplicationOutput, error) { req, out := c.CreatePlatformApplicationRequest(input) err := req.Send() @@ -114,7 +243,28 @@ func (c *SNS) CreatePlatformApplication(input *CreatePlatformApplicationInput) ( const opCreatePlatformEndpoint = "CreatePlatformEndpoint" -// CreatePlatformEndpointRequest generates a request for the CreatePlatformEndpoint operation. +// CreatePlatformEndpointRequest generates a "aws/request.Request" representing the +// client's request for the CreatePlatformEndpoint operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreatePlatformEndpoint method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreatePlatformEndpointRequest method. +// req, resp := client.CreatePlatformEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SNS) CreatePlatformEndpointRequest(input *CreatePlatformEndpointInput) (req *request.Request, output *CreatePlatformEndpointOutput) { op := &request.Operation{ Name: opCreatePlatformEndpoint, @@ -154,7 +304,28 @@ func (c *SNS) CreatePlatformEndpoint(input *CreatePlatformEndpointInput) (*Creat const opCreateTopic = "CreateTopic" -// CreateTopicRequest generates a request for the CreateTopic operation. +// CreateTopicRequest generates a "aws/request.Request" representing the +// client's request for the CreateTopic operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateTopic method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateTopicRequest method. +// req, resp := client.CreateTopicRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SNS) CreateTopicRequest(input *CreateTopicInput) (req *request.Request, output *CreateTopicOutput) { op := &request.Operation{ Name: opCreateTopic, @@ -173,7 +344,7 @@ func (c *SNS) CreateTopicRequest(input *CreateTopicInput) (req *request.Request, } // Creates a topic to which notifications can be published. Users can create -// at most 3000 topics. For more information, see http://aws.amazon.com/sns +// at most 100,000 topics. For more information, see http://aws.amazon.com/sns // (http://aws.amazon.com/sns/). This action is idempotent, so if the requester // already owns a topic with the specified name, that topic's ARN is returned // without creating a new topic. @@ -185,7 +356,28 @@ func (c *SNS) CreateTopic(input *CreateTopicInput) (*CreateTopicOutput, error) { const opDeleteEndpoint = "DeleteEndpoint" -// DeleteEndpointRequest generates a request for the DeleteEndpoint operation. +// DeleteEndpointRequest generates a "aws/request.Request" representing the +// client's request for the DeleteEndpoint operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteEndpoint method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteEndpointRequest method. +// req, resp := client.DeleteEndpointRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SNS) DeleteEndpointRequest(input *DeleteEndpointInput) (req *request.Request, output *DeleteEndpointOutput) { op := &request.Operation{ Name: opDeleteEndpoint, @@ -205,8 +397,12 @@ func (c *SNS) DeleteEndpointRequest(input *DeleteEndpointInput) (req *request.Re return } -// Deletes the endpoint from Amazon SNS. This action is idempotent. For more -// information, see Using Amazon SNS Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +// Deletes the endpoint for a device and mobile app from Amazon SNS. This action +// is idempotent. For more information, see Using Amazon SNS Mobile Push Notifications +// (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +// +// When you delete an endpoint that is also subscribed to a topic, then you +// must also unsubscribe the endpoint from the topic. func (c *SNS) DeleteEndpoint(input *DeleteEndpointInput) (*DeleteEndpointOutput, error) { req, out := c.DeleteEndpointRequest(input) err := req.Send() @@ -215,7 +411,28 @@ func (c *SNS) DeleteEndpoint(input *DeleteEndpointInput) (*DeleteEndpointOutput, const opDeletePlatformApplication = "DeletePlatformApplication" -// DeletePlatformApplicationRequest generates a request for the DeletePlatformApplication operation. +// DeletePlatformApplicationRequest generates a "aws/request.Request" representing the +// client's request for the DeletePlatformApplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeletePlatformApplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeletePlatformApplicationRequest method. +// req, resp := client.DeletePlatformApplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SNS) DeletePlatformApplicationRequest(input *DeletePlatformApplicationInput) (req *request.Request, output *DeletePlatformApplicationOutput) { op := &request.Operation{ Name: opDeletePlatformApplication, @@ -246,7 +463,28 @@ func (c *SNS) DeletePlatformApplication(input *DeletePlatformApplicationInput) ( const opDeleteTopic = "DeleteTopic" -// DeleteTopicRequest generates a request for the DeleteTopic operation. +// DeleteTopicRequest generates a "aws/request.Request" representing the +// client's request for the DeleteTopic operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteTopic method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteTopicRequest method. +// req, resp := client.DeleteTopicRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SNS) DeleteTopicRequest(input *DeleteTopicInput) (req *request.Request, output *DeleteTopicOutput) { op := &request.Operation{ Name: opDeleteTopic, @@ -278,7 +516,28 @@ func (c *SNS) DeleteTopic(input *DeleteTopicInput) (*DeleteTopicOutput, error) { const opGetEndpointAttributes = "GetEndpointAttributes" -// GetEndpointAttributesRequest generates a request for the GetEndpointAttributes operation. +// GetEndpointAttributesRequest generates a "aws/request.Request" representing the +// client's request for the GetEndpointAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetEndpointAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetEndpointAttributesRequest method. +// req, resp := client.GetEndpointAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SNS) GetEndpointAttributesRequest(input *GetEndpointAttributesInput) (req *request.Request, output *GetEndpointAttributesOutput) { op := &request.Operation{ Name: opGetEndpointAttributes, @@ -307,7 +566,28 @@ func (c *SNS) GetEndpointAttributes(input *GetEndpointAttributesInput) (*GetEndp const opGetPlatformApplicationAttributes = "GetPlatformApplicationAttributes" -// GetPlatformApplicationAttributesRequest generates a request for the GetPlatformApplicationAttributes operation. +// GetPlatformApplicationAttributesRequest generates a "aws/request.Request" representing the +// client's request for the GetPlatformApplicationAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetPlatformApplicationAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetPlatformApplicationAttributesRequest method. +// req, resp := client.GetPlatformApplicationAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SNS) GetPlatformApplicationAttributesRequest(input *GetPlatformApplicationAttributesInput) (req *request.Request, output *GetPlatformApplicationAttributesOutput) { op := &request.Operation{ Name: opGetPlatformApplicationAttributes, @@ -334,9 +614,80 @@ func (c *SNS) GetPlatformApplicationAttributes(input *GetPlatformApplicationAttr return out, err } +const opGetSMSAttributes = "GetSMSAttributes" + +// GetSMSAttributesRequest generates a "aws/request.Request" representing the +// client's request for the GetSMSAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetSMSAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetSMSAttributesRequest method. +// req, resp := client.GetSMSAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) GetSMSAttributesRequest(input *GetSMSAttributesInput) (req *request.Request, output *GetSMSAttributesOutput) { + op := &request.Operation{ + Name: opGetSMSAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSMSAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &GetSMSAttributesOutput{} + req.Data = output + return +} + +// Returns the settings for sending SMS messages from your account. +// +// These settings are set with the SetSMSAttributes action. +func (c *SNS) GetSMSAttributes(input *GetSMSAttributesInput) (*GetSMSAttributesOutput, error) { + req, out := c.GetSMSAttributesRequest(input) + err := req.Send() + return out, err +} + const opGetSubscriptionAttributes = "GetSubscriptionAttributes" -// GetSubscriptionAttributesRequest generates a request for the GetSubscriptionAttributes operation. +// GetSubscriptionAttributesRequest generates a "aws/request.Request" representing the +// client's request for the GetSubscriptionAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetSubscriptionAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetSubscriptionAttributesRequest method. +// req, resp := client.GetSubscriptionAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SNS) GetSubscriptionAttributesRequest(input *GetSubscriptionAttributesInput) (req *request.Request, output *GetSubscriptionAttributesOutput) { op := &request.Operation{ Name: opGetSubscriptionAttributes, @@ -363,7 +714,28 @@ func (c *SNS) GetSubscriptionAttributes(input *GetSubscriptionAttributesInput) ( const opGetTopicAttributes = "GetTopicAttributes" -// GetTopicAttributesRequest generates a request for the GetTopicAttributes operation. +// GetTopicAttributesRequest generates a "aws/request.Request" representing the +// client's request for the GetTopicAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetTopicAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetTopicAttributesRequest method. +// req, resp := client.GetTopicAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SNS) GetTopicAttributesRequest(input *GetTopicAttributesInput) (req *request.Request, output *GetTopicAttributesOutput) { op := &request.Operation{ Name: opGetTopicAttributes, @@ -391,7 +763,28 @@ func (c *SNS) GetTopicAttributes(input *GetTopicAttributesInput) (*GetTopicAttri const opListEndpointsByPlatformApplication = "ListEndpointsByPlatformApplication" -// ListEndpointsByPlatformApplicationRequest generates a request for the ListEndpointsByPlatformApplication operation. +// ListEndpointsByPlatformApplicationRequest generates a "aws/request.Request" representing the +// client's request for the ListEndpointsByPlatformApplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListEndpointsByPlatformApplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListEndpointsByPlatformApplicationRequest method. +// req, resp := client.ListEndpointsByPlatformApplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SNS) ListEndpointsByPlatformApplicationRequest(input *ListEndpointsByPlatformApplicationInput) (req *request.Request, output *ListEndpointsByPlatformApplicationOutput) { op := &request.Operation{ Name: opListEndpointsByPlatformApplication, @@ -429,6 +822,23 @@ func (c *SNS) ListEndpointsByPlatformApplication(input *ListEndpointsByPlatformA return out, err } +// ListEndpointsByPlatformApplicationPages iterates over the pages of a ListEndpointsByPlatformApplication operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListEndpointsByPlatformApplication method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListEndpointsByPlatformApplication operation. +// pageNum := 0 +// err := client.ListEndpointsByPlatformApplicationPages(params, +// func(page *ListEndpointsByPlatformApplicationOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *SNS) ListEndpointsByPlatformApplicationPages(input *ListEndpointsByPlatformApplicationInput, fn func(p *ListEndpointsByPlatformApplicationOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListEndpointsByPlatformApplicationRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -437,9 +847,86 @@ func (c *SNS) ListEndpointsByPlatformApplicationPages(input *ListEndpointsByPlat }) } +const opListPhoneNumbersOptedOut = "ListPhoneNumbersOptedOut" + +// ListPhoneNumbersOptedOutRequest generates a "aws/request.Request" representing the +// client's request for the ListPhoneNumbersOptedOut operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListPhoneNumbersOptedOut method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListPhoneNumbersOptedOutRequest method. +// req, resp := client.ListPhoneNumbersOptedOutRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) ListPhoneNumbersOptedOutRequest(input *ListPhoneNumbersOptedOutInput) (req *request.Request, output *ListPhoneNumbersOptedOutOutput) { + op := &request.Operation{ + Name: opListPhoneNumbersOptedOut, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListPhoneNumbersOptedOutInput{} + } + + req = c.newRequest(op, input, output) + output = &ListPhoneNumbersOptedOutOutput{} + req.Data = output + return +} + +// Returns a list of phone numbers that are opted out, meaning you cannot send +// SMS messages to them. +// +// The results for ListPhoneNumbersOptedOut are paginated, and each page returns +// up to 100 phone numbers. If additional phone numbers are available after +// the first page of results, then a NextToken string will be returned. To receive +// the next page, you call ListPhoneNumbersOptedOut again using the NextToken +// string received from the previous call. When there are no more records to +// return, NextToken will be null. +func (c *SNS) ListPhoneNumbersOptedOut(input *ListPhoneNumbersOptedOutInput) (*ListPhoneNumbersOptedOutOutput, error) { + req, out := c.ListPhoneNumbersOptedOutRequest(input) + err := req.Send() + return out, err +} + const opListPlatformApplications = "ListPlatformApplications" -// ListPlatformApplicationsRequest generates a request for the ListPlatformApplications operation. +// ListPlatformApplicationsRequest generates a "aws/request.Request" representing the +// client's request for the ListPlatformApplications operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListPlatformApplications method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListPlatformApplicationsRequest method. +// req, resp := client.ListPlatformApplicationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SNS) ListPlatformApplicationsRequest(input *ListPlatformApplicationsInput) (req *request.Request, output *ListPlatformApplicationsOutput) { op := &request.Operation{ Name: opListPlatformApplications, @@ -477,6 +964,23 @@ func (c *SNS) ListPlatformApplications(input *ListPlatformApplicationsInput) (*L return out, err } +// ListPlatformApplicationsPages iterates over the pages of a ListPlatformApplications operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListPlatformApplications method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListPlatformApplications operation. +// pageNum := 0 +// err := client.ListPlatformApplicationsPages(params, +// func(page *ListPlatformApplicationsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *SNS) ListPlatformApplicationsPages(input *ListPlatformApplicationsInput, fn func(p *ListPlatformApplicationsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListPlatformApplicationsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -487,7 +991,28 @@ func (c *SNS) ListPlatformApplicationsPages(input *ListPlatformApplicationsInput const opListSubscriptions = "ListSubscriptions" -// ListSubscriptionsRequest generates a request for the ListSubscriptions operation. +// ListSubscriptionsRequest generates a "aws/request.Request" representing the +// client's request for the ListSubscriptions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListSubscriptions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListSubscriptionsRequest method. +// req, resp := client.ListSubscriptionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SNS) ListSubscriptionsRequest(input *ListSubscriptionsInput) (req *request.Request, output *ListSubscriptionsOutput) { op := &request.Operation{ Name: opListSubscriptions, @@ -521,6 +1046,23 @@ func (c *SNS) ListSubscriptions(input *ListSubscriptionsInput) (*ListSubscriptio return out, err } +// ListSubscriptionsPages iterates over the pages of a ListSubscriptions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListSubscriptions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListSubscriptions operation. +// pageNum := 0 +// err := client.ListSubscriptionsPages(params, +// func(page *ListSubscriptionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *SNS) ListSubscriptionsPages(input *ListSubscriptionsInput, fn func(p *ListSubscriptionsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListSubscriptionsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -531,7 +1073,28 @@ func (c *SNS) ListSubscriptionsPages(input *ListSubscriptionsInput, fn func(p *L const opListSubscriptionsByTopic = "ListSubscriptionsByTopic" -// ListSubscriptionsByTopicRequest generates a request for the ListSubscriptionsByTopic operation. +// ListSubscriptionsByTopicRequest generates a "aws/request.Request" representing the +// client's request for the ListSubscriptionsByTopic operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListSubscriptionsByTopic method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListSubscriptionsByTopicRequest method. +// req, resp := client.ListSubscriptionsByTopicRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SNS) ListSubscriptionsByTopicRequest(input *ListSubscriptionsByTopicInput) (req *request.Request, output *ListSubscriptionsByTopicOutput) { op := &request.Operation{ Name: opListSubscriptionsByTopic, @@ -565,6 +1128,23 @@ func (c *SNS) ListSubscriptionsByTopic(input *ListSubscriptionsByTopicInput) (*L return out, err } +// ListSubscriptionsByTopicPages iterates over the pages of a ListSubscriptionsByTopic operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListSubscriptionsByTopic method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListSubscriptionsByTopic operation. +// pageNum := 0 +// err := client.ListSubscriptionsByTopicPages(params, +// func(page *ListSubscriptionsByTopicOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *SNS) ListSubscriptionsByTopicPages(input *ListSubscriptionsByTopicInput, fn func(p *ListSubscriptionsByTopicOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListSubscriptionsByTopicRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -575,7 +1155,28 @@ func (c *SNS) ListSubscriptionsByTopicPages(input *ListSubscriptionsByTopicInput const opListTopics = "ListTopics" -// ListTopicsRequest generates a request for the ListTopics operation. +// ListTopicsRequest generates a "aws/request.Request" representing the +// client's request for the ListTopics operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTopics method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTopicsRequest method. +// req, resp := client.ListTopicsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SNS) ListTopicsRequest(input *ListTopicsInput) (req *request.Request, output *ListTopicsOutput) { op := &request.Operation{ Name: opListTopics, @@ -608,6 +1209,23 @@ func (c *SNS) ListTopics(input *ListTopicsInput) (*ListTopicsOutput, error) { return out, err } +// ListTopicsPages iterates over the pages of a ListTopics operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListTopics method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListTopics operation. +// pageNum := 0 +// err := client.ListTopicsPages(params, +// func(page *ListTopicsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// func (c *SNS) ListTopicsPages(input *ListTopicsInput, fn func(p *ListTopicsOutput, lastPage bool) (shouldContinue bool)) error { page, _ := c.ListTopicsRequest(input) page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) @@ -616,9 +1234,81 @@ func (c *SNS) ListTopicsPages(input *ListTopicsInput, fn func(p *ListTopicsOutpu }) } +const opOptInPhoneNumber = "OptInPhoneNumber" + +// OptInPhoneNumberRequest generates a "aws/request.Request" representing the +// client's request for the OptInPhoneNumber operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the OptInPhoneNumber method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the OptInPhoneNumberRequest method. +// req, resp := client.OptInPhoneNumberRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) OptInPhoneNumberRequest(input *OptInPhoneNumberInput) (req *request.Request, output *OptInPhoneNumberOutput) { + op := &request.Operation{ + Name: opOptInPhoneNumber, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &OptInPhoneNumberInput{} + } + + req = c.newRequest(op, input, output) + output = &OptInPhoneNumberOutput{} + req.Data = output + return +} + +// Use this request to opt in a phone number that is opted out, which enables +// you to resume sending SMS messages to the number. +// +// You can opt in a phone number only once every 30 days. +func (c *SNS) OptInPhoneNumber(input *OptInPhoneNumberInput) (*OptInPhoneNumberOutput, error) { + req, out := c.OptInPhoneNumberRequest(input) + err := req.Send() + return out, err +} + const opPublish = "Publish" -// PublishRequest generates a request for the Publish operation. +// PublishRequest generates a "aws/request.Request" representing the +// client's request for the Publish operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the Publish method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PublishRequest method. +// req, resp := client.PublishRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SNS) PublishRequest(input *PublishInput) (req *request.Request, output *PublishOutput) { op := &request.Operation{ Name: opPublish, @@ -639,13 +1329,16 @@ func (c *SNS) PublishRequest(input *PublishInput) (req *request.Request, output // Sends a message to all of a topic's subscribed endpoints. When a messageId // is returned, the message has been saved and Amazon SNS will attempt to deliver // it to the topic's subscribers shortly. The format of the outgoing message -// to each subscribed endpoint depends on the notification protocol selected. +// to each subscribed endpoint depends on the notification protocol. // // To use the Publish action for sending a message to a mobile endpoint, such -// as an app on a Kindle device or mobile phone, you must specify the EndpointArn. -// The EndpointArn is returned when making a call with the CreatePlatformEndpoint -// action. The second example below shows a request and response for publishing -// to a mobile endpoint. +// as an app on a Kindle device or mobile phone, you must specify the EndpointArn +// for the TargetArn parameter. The EndpointArn is returned when making a call +// with the CreatePlatformEndpoint action. The second example below shows a +// request and response for publishing to a mobile endpoint. +// +// For more information about formatting messages, see Send Custom Platform-Specific +// Payloads in Messages to Mobile Devices (http://docs.aws.amazon.com/sns/latest/dg/mobile-push-send-custommessage.html). func (c *SNS) Publish(input *PublishInput) (*PublishOutput, error) { req, out := c.PublishRequest(input) err := req.Send() @@ -654,7 +1347,28 @@ func (c *SNS) Publish(input *PublishInput) (*PublishOutput, error) { const opRemovePermission = "RemovePermission" -// RemovePermissionRequest generates a request for the RemovePermission operation. +// RemovePermissionRequest generates a "aws/request.Request" representing the +// client's request for the RemovePermission operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemovePermission method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemovePermissionRequest method. +// req, resp := client.RemovePermissionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SNS) RemovePermissionRequest(input *RemovePermissionInput) (req *request.Request, output *RemovePermissionOutput) { op := &request.Operation{ Name: opRemovePermission, @@ -683,7 +1397,28 @@ func (c *SNS) RemovePermission(input *RemovePermissionInput) (*RemovePermissionO const opSetEndpointAttributes = "SetEndpointAttributes" -// SetEndpointAttributesRequest generates a request for the SetEndpointAttributes operation. +// SetEndpointAttributesRequest generates a "aws/request.Request" representing the +// client's request for the SetEndpointAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetEndpointAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetEndpointAttributesRequest method. +// req, resp := client.SetEndpointAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SNS) SetEndpointAttributesRequest(input *SetEndpointAttributesInput) (req *request.Request, output *SetEndpointAttributesOutput) { op := &request.Operation{ Name: opSetEndpointAttributes, @@ -714,7 +1449,28 @@ func (c *SNS) SetEndpointAttributes(input *SetEndpointAttributesInput) (*SetEndp const opSetPlatformApplicationAttributes = "SetPlatformApplicationAttributes" -// SetPlatformApplicationAttributesRequest generates a request for the SetPlatformApplicationAttributes operation. +// SetPlatformApplicationAttributesRequest generates a "aws/request.Request" representing the +// client's request for the SetPlatformApplicationAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetPlatformApplicationAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetPlatformApplicationAttributesRequest method. +// req, resp := client.SetPlatformApplicationAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SNS) SetPlatformApplicationAttributesRequest(input *SetPlatformApplicationAttributesInput) (req *request.Request, output *SetPlatformApplicationAttributesOutput) { op := &request.Operation{ Name: opSetPlatformApplicationAttributes, @@ -737,15 +1493,92 @@ func (c *SNS) SetPlatformApplicationAttributesRequest(input *SetPlatformApplicat // Sets the attributes of the platform application object for the supported // push notification services, such as APNS and GCM. For more information, see // Using Amazon SNS Mobile Push Notifications (http://docs.aws.amazon.com/sns/latest/dg/SNSMobilePush.html). +// For information on configuring attributes for message delivery status, see +// Using Amazon SNS Application Attributes for Message Delivery Status (http://docs.aws.amazon.com/sns/latest/dg/sns-msg-status.html). func (c *SNS) SetPlatformApplicationAttributes(input *SetPlatformApplicationAttributesInput) (*SetPlatformApplicationAttributesOutput, error) { req, out := c.SetPlatformApplicationAttributesRequest(input) err := req.Send() return out, err } +const opSetSMSAttributes = "SetSMSAttributes" + +// SetSMSAttributesRequest generates a "aws/request.Request" representing the +// client's request for the SetSMSAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetSMSAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetSMSAttributesRequest method. +// req, resp := client.SetSMSAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *SNS) SetSMSAttributesRequest(input *SetSMSAttributesInput) (req *request.Request, output *SetSMSAttributesOutput) { + op := &request.Operation{ + Name: opSetSMSAttributes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetSMSAttributesInput{} + } + + req = c.newRequest(op, input, output) + output = &SetSMSAttributesOutput{} + req.Data = output + return +} + +// Use this request to set the default settings for sending SMS messages and +// receiving daily SMS usage reports. +// +// You can override some of these settings for a single message when you use +// the Publish action with the MessageAttributes.entry.N parameter. For more +// information, see Sending an SMS Message (http://docs.aws.amazon.com/sns/latest/dg/sms_publish-to-phone.html) +// in the Amazon SNS Developer Guide. +func (c *SNS) SetSMSAttributes(input *SetSMSAttributesInput) (*SetSMSAttributesOutput, error) { + req, out := c.SetSMSAttributesRequest(input) + err := req.Send() + return out, err +} + const opSetSubscriptionAttributes = "SetSubscriptionAttributes" -// SetSubscriptionAttributesRequest generates a request for the SetSubscriptionAttributes operation. +// SetSubscriptionAttributesRequest generates a "aws/request.Request" representing the +// client's request for the SetSubscriptionAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetSubscriptionAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetSubscriptionAttributesRequest method. +// req, resp := client.SetSubscriptionAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SNS) SetSubscriptionAttributesRequest(input *SetSubscriptionAttributesInput) (req *request.Request, output *SetSubscriptionAttributesOutput) { op := &request.Operation{ Name: opSetSubscriptionAttributes, @@ -774,7 +1607,28 @@ func (c *SNS) SetSubscriptionAttributes(input *SetSubscriptionAttributesInput) ( const opSetTopicAttributes = "SetTopicAttributes" -// SetTopicAttributesRequest generates a request for the SetTopicAttributes operation. +// SetTopicAttributesRequest generates a "aws/request.Request" representing the +// client's request for the SetTopicAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetTopicAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetTopicAttributesRequest method. +// req, resp := client.SetTopicAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SNS) SetTopicAttributesRequest(input *SetTopicAttributesInput) (req *request.Request, output *SetTopicAttributesOutput) { op := &request.Operation{ Name: opSetTopicAttributes, @@ -803,7 +1657,28 @@ func (c *SNS) SetTopicAttributes(input *SetTopicAttributesInput) (*SetTopicAttri const opSubscribe = "Subscribe" -// SubscribeRequest generates a request for the Subscribe operation. +// SubscribeRequest generates a "aws/request.Request" representing the +// client's request for the Subscribe operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the Subscribe method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SubscribeRequest method. +// req, resp := client.SubscribeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SNS) SubscribeRequest(input *SubscribeInput) (req *request.Request, output *SubscribeOutput) { op := &request.Operation{ Name: opSubscribe, @@ -833,7 +1708,28 @@ func (c *SNS) Subscribe(input *SubscribeInput) (*SubscribeOutput, error) { const opUnsubscribe = "Unsubscribe" -// UnsubscribeRequest generates a request for the Unsubscribe operation. +// UnsubscribeRequest generates a "aws/request.Request" representing the +// client's request for the Unsubscribe operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the Unsubscribe method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UnsubscribeRequest method. +// req, resp := client.UnsubscribeRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SNS) UnsubscribeRequest(input *UnsubscribeInput) (req *request.Request, output *UnsubscribeOutput) { op := &request.Operation{ Name: opUnsubscribe, @@ -931,6 +1827,61 @@ func (s AddPermissionOutput) GoString() string { return s.String() } +// The input for the CheckIfPhoneNumberIsOptedOut action. +type CheckIfPhoneNumberIsOptedOutInput struct { + _ struct{} `type:"structure"` + + // The phone number for which you want to check the opt out status. + PhoneNumber *string `locationName:"phoneNumber" type:"string" required:"true"` +} + +// String returns the string representation +func (s CheckIfPhoneNumberIsOptedOutInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CheckIfPhoneNumberIsOptedOutInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CheckIfPhoneNumberIsOptedOutInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CheckIfPhoneNumberIsOptedOutInput"} + if s.PhoneNumber == nil { + invalidParams.Add(request.NewErrParamRequired("PhoneNumber")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response from the CheckIfPhoneNumberIsOptedOut action. +type CheckIfPhoneNumberIsOptedOutOutput struct { + _ struct{} `type:"structure"` + + // Indicates whether the phone number is opted out: + // + // true – The phone number is opted out, meaning you cannot publish SMS messages + // to it. + // + // false – The phone number is opted in, meaning you can publish SMS messages + // to it. + IsOptedOut *bool `locationName:"isOptedOut" type:"boolean"` +} + +// String returns the string representation +func (s CheckIfPhoneNumberIsOptedOutOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CheckIfPhoneNumberIsOptedOutOutput) GoString() string { + return s.String() +} + // Input for ConfirmSubscription action. type ConfirmSubscriptionInput struct { _ struct{} `type:"structure"` @@ -1368,15 +2319,18 @@ type GetEndpointAttributesOutput struct { // Attributes include the following: // - // CustomUserData -- arbitrary user data to associate with the endpoint. - // Amazon SNS does not use this data. The data must be in UTF-8 format and less - // than 2KB. Enabled -- flag that enables/disables delivery to the endpoint. - // Amazon SNS will set this to false when a notification service indicates to - // Amazon SNS that the endpoint is invalid. Users can set it back to true, typically - // after updating Token. Token -- device token, also referred to as a registration - // id, for an app and mobile device. This is returned from the notification - // service when an app and mobile device are registered with the notification - // service. + // CustomUserData -- arbitrary user data to associate with the endpoint. Amazon + // SNS does not use this data. The data must be in UTF-8 format and less than + // 2KB. + // + // Enabled -- flag that enables/disables delivery to the endpoint. Amazon SNS + // will set this to false when a notification service indicates to Amazon SNS + // that the endpoint is invalid. Users can set it back to true, typically after + // updating Token. + // + // Token -- device token, also referred to as a registration id, for an app + // and mobile device. This is returned from the notification service when an + // app and mobile device are registered with the notification service. Attributes map[string]*string `type:"map"` } @@ -1427,13 +2381,18 @@ type GetPlatformApplicationAttributesOutput struct { // Attributes include the following: // - // EventEndpointCreated -- Topic ARN to which EndpointCreated event notifications - // should be sent. EventEndpointDeleted -- Topic ARN to which EndpointDeleted - // event notifications should be sent. EventEndpointUpdated -- Topic ARN to - // which EndpointUpdate event notifications should be sent. EventDeliveryFailure - // -- Topic ARN to which DeliveryFailure event notifications should be sent - // upon Direct Publish delivery failure (permanent) to one of the application's - // endpoints. + // EventEndpointCreated -- Topic ARN to which EndpointCreated event notifications + // should be sent. + // + // EventEndpointDeleted -- Topic ARN to which EndpointDeleted event notifications + // should be sent. + // + // EventEndpointUpdated -- Topic ARN to which EndpointUpdate event notifications + // should be sent. + // + // EventDeliveryFailure -- Topic ARN to which DeliveryFailure event notifications + // should be sent upon Direct Publish delivery failure (permanent) to one of + // the application's endpoints. Attributes map[string]*string `type:"map"` } @@ -1447,6 +2406,47 @@ func (s GetPlatformApplicationAttributesOutput) GoString() string { return s.String() } +// The input for the GetSMSAttributes request. +type GetSMSAttributesInput struct { + _ struct{} `type:"structure"` + + // A list of the individual attribute names, such as MonthlySpendLimit, for + // which you want values. + // + // For all attribute names, see SetSMSAttributes (http://docs.aws.amazon.com/sns/latest/api/API_SetSMSAttributes.html). + // + // If you don't use this parameter, Amazon SNS returns all SMS attributes. + Attributes []*string `locationName:"attributes" type:"list"` +} + +// String returns the string representation +func (s GetSMSAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSMSAttributesInput) GoString() string { + return s.String() +} + +// The response from the GetSMSAttributes request. +type GetSMSAttributesOutput struct { + _ struct{} `type:"structure"` + + // The SMS attribute names and their values. + Attributes map[string]*string `locationName:"attributes" type:"map"` +} + +// String returns the string representation +func (s GetSMSAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSMSAttributesOutput) GoString() string { + return s.String() +} + // Input for GetSubscriptionAttributes. type GetSubscriptionAttributesInput struct { _ struct{} `type:"structure"` @@ -1485,13 +2485,21 @@ type GetSubscriptionAttributesOutput struct { // A map of the subscription's attributes. Attributes in this map include the // following: // - // SubscriptionArn -- the subscription's ARN TopicArn -- the topic ARN that - // the subscription is associated with Owner -- the AWS account ID of the subscription's - // owner ConfirmationWasAuthenticated -- true if the subscription confirmation - // request was authenticated DeliveryPolicy -- the JSON serialization of the - // subscription's delivery policy EffectiveDeliveryPolicy -- the JSON serialization - // of the effective delivery policy that takes into account the topic delivery - // policy and account system defaults + // SubscriptionArn -- the subscription's ARN + // + // TopicArn -- the topic ARN that the subscription is associated with + // + // Owner -- the AWS account ID of the subscription's owner + // + // ConfirmationWasAuthenticated -- true if the subscription confirmation + // request was authenticated + // + // DeliveryPolicy -- the JSON serialization of the subscription's delivery + // policy + // + // EffectiveDeliveryPolicy -- the JSON serialization of the effective delivery + // policy that takes into account the topic delivery policy and account system + // defaults Attributes map[string]*string `type:"map"` } @@ -1542,16 +2550,27 @@ type GetTopicAttributesOutput struct { // A map of the topic's attributes. Attributes in this map include the following: // - // TopicArn -- the topic's ARN Owner -- the AWS account ID of the topic's - // owner Policy -- the JSON serialization of the topic's access control policy - // DisplayName -- the human-readable name used in the "From" field for notifications - // to email and email-json endpoints SubscriptionsPending -- the number of - // subscriptions pending confirmation on this topic SubscriptionsConfirmed - // -- the number of confirmed subscriptions on this topic SubscriptionsDeleted - // -- the number of deleted subscriptions on this topic DeliveryPolicy -- the - // JSON serialization of the topic's delivery policy EffectiveDeliveryPolicy - // -- the JSON serialization of the effective delivery policy that takes into - // account system defaults + // TopicArn -- the topic's ARN + // + // Owner -- the AWS account ID of the topic's owner + // + // Policy -- the JSON serialization of the topic's access control policy + // + // DisplayName -- the human-readable name used in the "From" field for notifications + // to email and email-json endpoints + // + // SubscriptionsPending -- the number of subscriptions pending confirmation + // on this topic + // + // SubscriptionsConfirmed -- the number of confirmed subscriptions on this + // topic + // + // SubscriptionsDeleted -- the number of deleted subscriptions on this topic + // + // DeliveryPolicy -- the JSON serialization of the topic's delivery policy + // + // EffectiveDeliveryPolicy -- the JSON serialization of the effective delivery + // policy that takes into account system defaults Attributes map[string]*string `type:"map"` } @@ -1623,6 +2642,49 @@ func (s ListEndpointsByPlatformApplicationOutput) GoString() string { return s.String() } +// The input for the ListPhoneNumbersOptedOut action. +type ListPhoneNumbersOptedOutInput struct { + _ struct{} `type:"structure"` + + // A NextToken string is used when you call the ListPhoneNumbersOptedOut action + // to retrieve additional records that are available after the first page of + // results. + NextToken *string `locationName:"nextToken" type:"string"` +} + +// String returns the string representation +func (s ListPhoneNumbersOptedOutInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPhoneNumbersOptedOutInput) GoString() string { + return s.String() +} + +// The response from the ListPhoneNumbersOptedOut action. +type ListPhoneNumbersOptedOutOutput struct { + _ struct{} `type:"structure"` + + // A NextToken string is returned when you call the ListPhoneNumbersOptedOut + // action if additional records are available after the first page of results. + NextToken *string `locationName:"nextToken" type:"string"` + + // A list of phone numbers that are opted out of receiving SMS messages. The + // list is paginated, and each page can contain up to 100 phone numbers. + PhoneNumbers []*string `locationName:"phoneNumbers" type:"list"` +} + +// String returns the string representation +func (s ListPhoneNumbersOptedOutOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPhoneNumbersOptedOutOutput) GoString() string { + return s.String() +} + // Input for ListPlatformApplications action. type ListPlatformApplicationsInput struct { _ struct{} `type:"structure"` @@ -1849,6 +2911,52 @@ func (s *MessageAttributeValue) Validate() error { return nil } +// Input for the OptInPhoneNumber action. +type OptInPhoneNumberInput struct { + _ struct{} `type:"structure"` + + // The phone number to opt in. + PhoneNumber *string `locationName:"phoneNumber" type:"string" required:"true"` +} + +// String returns the string representation +func (s OptInPhoneNumberInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptInPhoneNumberInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *OptInPhoneNumberInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "OptInPhoneNumberInput"} + if s.PhoneNumber == nil { + invalidParams.Add(request.NewErrParamRequired("PhoneNumber")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response for the OptInPhoneNumber action. +type OptInPhoneNumberOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s OptInPhoneNumberOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptInPhoneNumberOutput) GoString() string { + return s.String() +} + // Platform application object. type PlatformApplication struct { _ struct{} `type:"structure"` @@ -1887,17 +2995,30 @@ type PublishInput struct { // Constraints: Messages must be UTF-8 encoded strings at most 256 KB in size // (262144 bytes, not 262144 characters). // - // JSON-specific constraints: Keys in the JSON object that correspond to supported - // transport protocols must have simple JSON string values. The values will - // be parsed (unescaped) before they are used in outgoing messages. Outbound - // notifications are JSON encoded (meaning that the characters will be reescaped - // for sending). Values have a minimum length of 0 (the empty string, "", is - // allowed). Values have a maximum length bounded by the overall message size - // (so, including multiple protocols may limit message sizes). Non-string values - // will cause the key to be ignored. Keys that do not correspond to supported - // transport protocols are ignored. Duplicate keys are not allowed. Failure - // to parse or validate any key or value in the message will cause the Publish - // call to return an error (no partial delivery). + // JSON-specific constraints: + // + // Keys in the JSON object that correspond to supported transport protocols + // must have simple JSON string values. + // + // The values will be parsed (unescaped) before they are used in outgoing + // messages. + // + // Outbound notifications are JSON encoded (meaning that the characters will + // be reescaped for sending). + // + // Values have a minimum length of 0 (the empty string, "", is allowed). + // + // Values have a maximum length bounded by the overall message size (so, + // including multiple protocols may limit message sizes). + // + // Non-string values will cause the key to be ignored. + // + // Keys that do not correspond to supported transport protocols are ignored. + // + // Duplicate keys are not allowed. + // + // Failure to parse or validate any key or value in the message will cause + // the Publish call to return an error (no partial delivery). Message *string `type:"string" required:"true"` // Message attributes for Publish action. @@ -1908,10 +3029,13 @@ type PublishInput struct { // message to your SMS subscribers and a longer message to your email subscribers. // If you set MessageStructure to json, the value of the Message parameter must: // - // be a syntactically valid JSON object; and contain at least a top-level - // JSON key of "default" with a value that is a string. You can define other - // top-level keys that define the message you want to send to a specific transport - // protocol (e.g., "http"). + // be a syntactically valid JSON object; and + // + // contain at least a top-level JSON key of "default" with a value that is + // a string. + // + // You can define other top-level keys that define the message you want + // to send to a specific transport protocol (e.g., "http"). // // For information about sending different messages for each protocol using // the AWS Management Console, go to Create Different Messages for Each Protocol @@ -1921,6 +3045,12 @@ type PublishInput struct { // Valid value: json MessageStructure *string `type:"string"` + // The phone number to which you want to deliver an SMS message. Use E.164 format. + // + // If you don't specify a value for the PhoneNumber parameter, you must specify + // a value for the TargetArn or TopicArn parameters. + PhoneNumber *string `type:"string"` + // Optional parameter to be used as the "Subject" line when the message is delivered // to email endpoints. This field will also be included, if present, in the // standard JSON messages delivered to other endpoints. @@ -1931,9 +3061,15 @@ type PublishInput struct { Subject *string `type:"string"` // Either TopicArn or EndpointArn, but not both. + // + // If you don't specify a value for the TargetArn parameter, you must specify + // a value for the PhoneNumber or TopicArn parameters. TargetArn *string `type:"string"` // The topic you want to publish to. + // + // If you don't specify a value for the TopicArn parameter, you must specify + // a value for the PhoneNumber or TargetArn parameters. TopicArn *string `type:"string"` } @@ -2047,15 +3183,18 @@ type SetEndpointAttributesInput struct { // A map of the endpoint attributes. Attributes in this map include the following: // - // CustomUserData -- arbitrary user data to associate with the endpoint. - // Amazon SNS does not use this data. The data must be in UTF-8 format and less - // than 2KB. Enabled -- flag that enables/disables delivery to the endpoint. - // Amazon SNS will set this to false when a notification service indicates to - // Amazon SNS that the endpoint is invalid. Users can set it back to true, typically - // after updating Token. Token -- device token, also referred to as a registration - // id, for an app and mobile device. This is returned from the notification - // service when an app and mobile device are registered with the notification - // service. + // CustomUserData -- arbitrary user data to associate with the endpoint. Amazon + // SNS does not use this data. The data must be in UTF-8 format and less than + // 2KB. + // + // Enabled -- flag that enables/disables delivery to the endpoint. Amazon SNS + // will set this to false when a notification service indicates to Amazon SNS + // that the endpoint is invalid. Users can set it back to true, typically after + // updating Token. + // + // Token -- device token, also referred to as a registration id, for an app + // and mobile device. This is returned from the notification service when an + // app and mobile device are registered with the notification service. Attributes map[string]*string `type:"map" required:"true"` // EndpointArn used for SetEndpointAttributes action. @@ -2109,18 +3248,35 @@ type SetPlatformApplicationAttributesInput struct { // A map of the platform application attributes. Attributes in this map include // the following: // - // PlatformCredential -- The credential received from the notification service. - // For APNS/APNS_SANDBOX, PlatformCredential is "private key". For GCM, PlatformCredential - // is "API key". For ADM, PlatformCredential is "client secret". PlatformPrincipal - // -- The principal received from the notification service. For APNS/APNS_SANDBOX, - // PlatformPrincipal is "SSL certificate". For GCM, PlatformPrincipal is not - // applicable. For ADM, PlatformPrincipal is "client id". EventEndpointCreated - // -- Topic ARN to which EndpointCreated event notifications should be sent. - // EventEndpointDeleted -- Topic ARN to which EndpointDeleted event notifications - // should be sent. EventEndpointUpdated -- Topic ARN to which EndpointUpdate - // event notifications should be sent. EventDeliveryFailure -- Topic ARN to - // which DeliveryFailure event notifications should be sent upon Direct Publish - // delivery failure (permanent) to one of the application's endpoints. + // PlatformCredential -- The credential received from the notification service. + // For APNS/APNS_SANDBOX, PlatformCredential is private key. For GCM, PlatformCredential + // is "API key". For ADM, PlatformCredential is "client secret". + // + // PlatformPrincipal -- The principal received from the notification service. + // For APNS/APNS_SANDBOX, PlatformPrincipal is SSL certificate. For GCM, PlatformPrincipal + // is not applicable. For ADM, PlatformPrincipal is "client id". + // + // EventEndpointCreated -- Topic ARN to which EndpointCreated event notifications + // should be sent. + // + // EventEndpointDeleted -- Topic ARN to which EndpointDeleted event notifications + // should be sent. + // + // EventEndpointUpdated -- Topic ARN to which EndpointUpdate event notifications + // should be sent. + // + // EventDeliveryFailure -- Topic ARN to which DeliveryFailure event notifications + // should be sent upon Direct Publish delivery failure (permanent) to one of + // the application's endpoints. + // + // SuccessFeedbackRoleArn -- IAM role ARN used to give Amazon SNS write access + // to use CloudWatch Logs on your behalf. + // + // FailureFeedbackRoleArn -- IAM role ARN used to give Amazon SNS write access + // to use CloudWatch Logs on your behalf. + // + // SuccessFeedbackSampleRate -- Sample rate percentage (0-100) of successfully + // delivered messages. Attributes map[string]*string `type:"map" required:"true"` // PlatformApplicationArn for SetPlatformApplicationAttributes action. @@ -2167,6 +3323,119 @@ func (s SetPlatformApplicationAttributesOutput) GoString() string { return s.String() } +// The input for the SetSMSAttributes action. +type SetSMSAttributesInput struct { + _ struct{} `type:"structure"` + + // The default settings for sending SMS messages from your account. You can + // set values for the following attribute names: + // + // MonthlySpendLimit – The maximum amount in USD that you are willing to spend + // each month to send SMS messages. When Amazon SNS determines that sending + // an SMS message would incur a cost that exceeds this limit, it stops sending + // SMS messages within minutes. + // + // Amazon SNS stops sending SMS messages within minutes of the limit being + // crossed. During that interval, if you continue to send SMS messages, you + // will incur costs that exceed your limit. + // + // DeliveryStatusIAMRole – The ARN of the IAM role that allows Amazon SNS + // to write logs about SMS deliveries in CloudWatch Logs. For each SMS message + // that you send, Amazon SNS writes a log that includes the message price, the + // success or failure status, the reason for failure (if the message failed), + // the message dwell time, and other information. + // + // DeliveryStatusSuccessSamplingRate – The percentage of successful SMS deliveries + // for which Amazon SNS will write logs in CloudWatch Logs. The value can be + // an integer from 0 - 100. For example, to write logs only for failed deliveries, + // set this value to 0. To write logs for 10% of your successful deliveries, + // set it to 10. + // + // DefaultSenderID – A string, such as your business brand, that is displayed + // as the sender on the receiving device. Support for sender IDs varies by country. + // The sender ID can be 1 - 11 alphanumeric characters, and it must contain + // at least one letter. + // + // DefaultSMSType – The type of SMS message that you will send by default. + // You can assign the following values: + // + // Promotional – Noncritical messages, such as marketing messages. Amazon + // SNS optimizes the message delivery to incur the lowest cost. + // + // Transactional – (Default) Critical messages that support customer transactions, + // such as one-time passcodes for multi-factor authentication. Amazon SNS optimizes + // the message delivery to achieve the highest reliability. + // + // UsageReportS3Bucket – The name of the Amazon S3 bucket to receive daily + // SMS usage reports from Amazon SNS. Each day, Amazon SNS will deliver a usage + // report as a CSV file to the bucket. The report includes the following information + // for each SMS message that was successfully delivered by your account: + // + // Time that the message was published (in UTC) + // + // Message ID + // + // Destination phone number + // + // Message type + // + // Delivery status + // + // Message price (in USD) + // + // Part number (a message is split into multiple parts if it is too long + // for a single message) + // + // Total number of parts + // + // To receive the report, the bucket must have a policy that allows the Amazon + // SNS service principle to perform the s3:PutObject and s3:GetBucketLocation + // actions. + // + // For an example bucket policy and usage report, see Viewing Statistics About + // SMS Message Delivery (http://docs.aws.amazon.com/sns/latest/dg/sms_stats.html) + // in the Amazon SNS Developer Guide. + Attributes map[string]*string `locationName:"attributes" type:"map" required:"true"` +} + +// String returns the string representation +func (s SetSMSAttributesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetSMSAttributesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetSMSAttributesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "SetSMSAttributesInput"} + if s.Attributes == nil { + invalidParams.Add(request.NewErrParamRequired("Attributes")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The response for the SetSMSAttributes action. +type SetSMSAttributesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetSMSAttributesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SetSMSAttributesOutput) GoString() string { + return s.String() +} + // Input for SetSubscriptionAttributes action. type SetSubscriptionAttributesInput struct { _ struct{} `type:"structure"` @@ -2287,23 +3556,43 @@ type SubscribeInput struct { // The endpoint that you want to receive notifications. Endpoints vary by protocol: // - // For the http protocol, the endpoint is an URL beginning with "http://" - // For the https protocol, the endpoint is a URL beginning with "https://" For - // the email protocol, the endpoint is an email address For the email-json protocol, - // the endpoint is an email address For the sms protocol, the endpoint is a - // phone number of an SMS-enabled device For the sqs protocol, the endpoint - // is the ARN of an Amazon SQS queue For the application protocol, the endpoint - // is the EndpointArn of a mobile app and device. + // For the http protocol, the endpoint is an URL beginning with "http://" + // + // For the https protocol, the endpoint is a URL beginning with "https://" + // + // For the email protocol, the endpoint is an email address + // + // For the email-json protocol, the endpoint is an email address + // + // For the sms protocol, the endpoint is a phone number of an SMS-enabled + // device + // + // For the sqs protocol, the endpoint is the ARN of an Amazon SQS queue + // + // For the application protocol, the endpoint is the EndpointArn of a mobile + // app and device. + // + // For the lambda protocol, the endpoint is the ARN of an AWS Lambda function. Endpoint *string `type:"string"` // The protocol you want to use. Supported protocols include: // - // http -- delivery of JSON-encoded message via HTTP POST https -- delivery - // of JSON-encoded message via HTTPS POST email -- delivery of message via - // SMTP email-json -- delivery of JSON-encoded message via SMTP sms -- delivery - // of message via SMS sqs -- delivery of JSON-encoded message to an Amazon - // SQS queue application -- delivery of JSON-encoded message to an EndpointArn - // for a mobile app and device. + // http -- delivery of JSON-encoded message via HTTP POST + // + // https -- delivery of JSON-encoded message via HTTPS POST + // + // email -- delivery of message via SMTP + // + // email-json -- delivery of JSON-encoded message via SMTP + // + // sms -- delivery of message via SMS + // + // sqs -- delivery of JSON-encoded message to an Amazon SQS queue + // + // application -- delivery of JSON-encoded message to an EndpointArn for + // a mobile app and device. + // + // lambda -- delivery of JSON-encoded message to an AWS Lambda function. Protocol *string `type:"string" required:"true"` // The ARN of the topic you want to subscribe to. diff --git a/vendor/github.com/aws/aws-sdk-go/service/sns/service.go b/vendor/github.com/aws/aws-sdk-go/service/sns/service.go index 7d87839b3..75ecb0ac5 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sns/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sns/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/query" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // Amazon Simple Notification Service (Amazon SNS) is a web service that enables @@ -70,7 +70,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(query.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go index 5d3fea658..9b416bdab 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/api.go @@ -14,7 +14,28 @@ import ( const opAddPermission = "AddPermission" -// AddPermissionRequest generates a request for the AddPermission operation. +// AddPermissionRequest generates a "aws/request.Request" representing the +// client's request for the AddPermission operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddPermission method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddPermissionRequest method. +// req, resp := client.AddPermissionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SQS) AddPermissionRequest(input *AddPermissionInput) (req *request.Request, output *AddPermissionOutput) { op := &request.Operation{ Name: opAddPermission, @@ -58,7 +79,28 @@ func (c *SQS) AddPermission(input *AddPermissionInput) (*AddPermissionOutput, er const opChangeMessageVisibility = "ChangeMessageVisibility" -// ChangeMessageVisibilityRequest generates a request for the ChangeMessageVisibility operation. +// ChangeMessageVisibilityRequest generates a "aws/request.Request" representing the +// client's request for the ChangeMessageVisibility operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ChangeMessageVisibility method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ChangeMessageVisibilityRequest method. +// req, resp := client.ChangeMessageVisibilityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SQS) ChangeMessageVisibilityRequest(input *ChangeMessageVisibilityInput) (req *request.Request, output *ChangeMessageVisibilityOutput) { op := &request.Operation{ Name: opChangeMessageVisibility, @@ -103,12 +145,14 @@ func (c *SQS) ChangeMessageVisibilityRequest(input *ChangeMessageVisibilityInput // // If you attempt to set the VisibilityTimeout to an amount more than the maximum // time left, Amazon SQS returns an error. It will not automatically recalculate -// and increase the timeout to the maximum time remaining. Unlike with a queue, -// when you change the visibility timeout for a specific message, that timeout -// value is applied immediately but is not saved in memory for that message. -// If you don't delete a message after it is received, the visibility timeout -// for the message the next time it is received reverts to the original timeout -// value, not the value you set with the ChangeMessageVisibility action. +// and increase the timeout to the maximum time remaining. +// +// Unlike with a queue, when you change the visibility timeout for a specific +// message, that timeout value is applied immediately but is not saved in memory +// for that message. If you don't delete a message after it is received, the +// visibility timeout for the message the next time it is received reverts to +// the original timeout value, not the value you set with the ChangeMessageVisibility +// action. func (c *SQS) ChangeMessageVisibility(input *ChangeMessageVisibilityInput) (*ChangeMessageVisibilityOutput, error) { req, out := c.ChangeMessageVisibilityRequest(input) err := req.Send() @@ -117,7 +161,28 @@ func (c *SQS) ChangeMessageVisibility(input *ChangeMessageVisibilityInput) (*Cha const opChangeMessageVisibilityBatch = "ChangeMessageVisibilityBatch" -// ChangeMessageVisibilityBatchRequest generates a request for the ChangeMessageVisibilityBatch operation. +// ChangeMessageVisibilityBatchRequest generates a "aws/request.Request" representing the +// client's request for the ChangeMessageVisibilityBatch operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ChangeMessageVisibilityBatch method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ChangeMessageVisibilityBatchRequest method. +// req, resp := client.ChangeMessageVisibilityBatchRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SQS) ChangeMessageVisibilityBatchRequest(input *ChangeMessageVisibilityBatchInput) (req *request.Request, output *ChangeMessageVisibilityBatchOutput) { op := &request.Operation{ Name: opChangeMessageVisibilityBatch, @@ -142,10 +207,11 @@ func (c *SQS) ChangeMessageVisibilityBatchRequest(input *ChangeMessageVisibility // // Because the batch request can result in a combination of successful and // unsuccessful actions, you should check for batch errors even when the call -// returns an HTTP status code of 200. Some API actions take lists of parameters. -// These lists are specified using the param.n notation. Values of n are integers -// starting from 1. For example, a parameter list with two elements looks like -// this: +// returns an HTTP status code of 200. +// +// Some API actions take lists of parameters. These lists are specified using +// the param.n notation. Values of n are integers starting from 1. For example, +// a parameter list with two elements looks like this: func (c *SQS) ChangeMessageVisibilityBatch(input *ChangeMessageVisibilityBatchInput) (*ChangeMessageVisibilityBatchOutput, error) { req, out := c.ChangeMessageVisibilityBatchRequest(input) err := req.Send() @@ -154,7 +220,28 @@ func (c *SQS) ChangeMessageVisibilityBatch(input *ChangeMessageVisibilityBatchIn const opCreateQueue = "CreateQueue" -// CreateQueueRequest generates a request for the CreateQueue operation. +// CreateQueueRequest generates a "aws/request.Request" representing the +// client's request for the CreateQueue operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateQueue method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateQueueRequest method. +// req, resp := client.CreateQueueRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SQS) CreateQueueRequest(input *CreateQueueInput) (req *request.Request, output *CreateQueueOutput) { op := &request.Operation{ Name: opCreateQueue, @@ -182,7 +269,7 @@ func (c *SQS) CreateQueueRequest(input *CreateQueueInput) (req *request.Request, // // You may pass one or more attributes in the request. If you do not provide // a value for any attribute, the queue will have the default value for that -// attribute. Permitted attributes are the same that can be set using SetQueueAttributes. +// attribute. // // Use GetQueueUrl to get a queue's URL. GetQueueUrl requires only the QueueName // parameter. @@ -203,7 +290,28 @@ func (c *SQS) CreateQueue(input *CreateQueueInput) (*CreateQueueOutput, error) { const opDeleteMessage = "DeleteMessage" -// DeleteMessageRequest generates a request for the DeleteMessage operation. +// DeleteMessageRequest generates a "aws/request.Request" representing the +// client's request for the DeleteMessage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteMessage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteMessageRequest method. +// req, resp := client.DeleteMessageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SQS) DeleteMessageRequest(input *DeleteMessageInput) (req *request.Request, output *DeleteMessageOutput) { op := &request.Operation{ Name: opDeleteMessage, @@ -251,7 +359,28 @@ func (c *SQS) DeleteMessage(input *DeleteMessageInput) (*DeleteMessageOutput, er const opDeleteMessageBatch = "DeleteMessageBatch" -// DeleteMessageBatchRequest generates a request for the DeleteMessageBatch operation. +// DeleteMessageBatchRequest generates a "aws/request.Request" representing the +// client's request for the DeleteMessageBatch operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteMessageBatch method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteMessageBatchRequest method. +// req, resp := client.DeleteMessageBatchRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SQS) DeleteMessageBatchRequest(input *DeleteMessageBatchInput) (req *request.Request, output *DeleteMessageBatchOutput) { op := &request.Operation{ Name: opDeleteMessageBatch, @@ -288,7 +417,28 @@ func (c *SQS) DeleteMessageBatch(input *DeleteMessageBatchInput) (*DeleteMessage const opDeleteQueue = "DeleteQueue" -// DeleteQueueRequest generates a request for the DeleteQueue operation. +// DeleteQueueRequest generates a "aws/request.Request" representing the +// client's request for the DeleteQueue operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteQueue method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteQueueRequest method. +// req, resp := client.DeleteQueueRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SQS) DeleteQueueRequest(input *DeleteQueueInput) (req *request.Request, output *DeleteQueueOutput) { op := &request.Operation{ Name: opDeleteQueue, @@ -333,7 +483,28 @@ func (c *SQS) DeleteQueue(input *DeleteQueueInput) (*DeleteQueueOutput, error) { const opGetQueueAttributes = "GetQueueAttributes" -// GetQueueAttributesRequest generates a request for the GetQueueAttributes operation. +// GetQueueAttributesRequest generates a "aws/request.Request" representing the +// client's request for the GetQueueAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetQueueAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetQueueAttributesRequest method. +// req, resp := client.GetQueueAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SQS) GetQueueAttributesRequest(input *GetQueueAttributesInput) (req *request.Request, output *GetQueueAttributesOutput) { op := &request.Operation{ Name: opGetQueueAttributes, @@ -351,39 +522,11 @@ func (c *SQS) GetQueueAttributesRequest(input *GetQueueAttributesInput) (req *re return } -// Gets attributes for the specified queue. The following attributes are supported: -// All - returns all values. ApproximateNumberOfMessages - returns the approximate -// number of visible messages in a queue. For more information, see Resources -// Required to Process Messages (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/ApproximateNumber.html) -// in the Amazon SQS Developer Guide. ApproximateNumberOfMessagesNotVisible -// - returns the approximate number of messages that are not timed-out and not -// deleted. For more information, see Resources Required to Process Messages -// (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/ApproximateNumber.html) -// in the Amazon SQS Developer Guide. VisibilityTimeout - returns the visibility -// timeout for the queue. For more information about visibility timeout, see -// Visibility Timeout (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/AboutVT.html) -// in the Amazon SQS Developer Guide. CreatedTimestamp - returns the time when -// the queue was created (epoch time in seconds). LastModifiedTimestamp - returns -// the time when the queue was last changed (epoch time in seconds). Policy -// - returns the queue's policy. MaximumMessageSize - returns the limit of how -// many bytes a message can contain before Amazon SQS rejects it. MessageRetentionPeriod -// - returns the number of seconds Amazon SQS retains a message. QueueArn - -// returns the queue's Amazon resource name (ARN). ApproximateNumberOfMessagesDelayed -// - returns the approximate number of messages that are pending to be added -// to the queue. DelaySeconds - returns the default delay on the queue in seconds. -// ReceiveMessageWaitTimeSeconds - returns the time for which a ReceiveMessage -// call will wait for a message to arrive. RedrivePolicy - returns the parameters -// for dead letter queue functionality of the source queue. For more information -// about RedrivePolicy and dead letter queues, see Using Amazon SQS Dead Letter -// Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSDeadLetterQueue.html) -// in the Amazon SQS Developer Guide. +// Gets attributes for the specified queue. // -// Going forward, new attributes might be added. If you are writing code that -// calls this action, we recommend that you structure your code so that it can -// handle new attributes gracefully. Some API actions take lists of parameters. -// These lists are specified using the param.n notation. Values of n are integers -// starting from 1. For example, a parameter list with two elements looks like -// this: +// Some API actions take lists of parameters. These lists are specified using +// the param.n notation. Values of n are integers starting from 1. For example, +// a parameter list with two elements looks like this: func (c *SQS) GetQueueAttributes(input *GetQueueAttributesInput) (*GetQueueAttributesOutput, error) { req, out := c.GetQueueAttributesRequest(input) err := req.Send() @@ -392,7 +535,28 @@ func (c *SQS) GetQueueAttributes(input *GetQueueAttributesInput) (*GetQueueAttri const opGetQueueUrl = "GetQueueUrl" -// GetQueueUrlRequest generates a request for the GetQueueUrl operation. +// GetQueueUrlRequest generates a "aws/request.Request" representing the +// client's request for the GetQueueUrl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetQueueUrl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetQueueUrlRequest method. +// req, resp := client.GetQueueUrlRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SQS) GetQueueUrlRequest(input *GetQueueUrlInput) (req *request.Request, output *GetQueueUrlOutput) { op := &request.Operation{ Name: opGetQueueUrl, @@ -426,7 +590,28 @@ func (c *SQS) GetQueueUrl(input *GetQueueUrlInput) (*GetQueueUrlOutput, error) { const opListDeadLetterSourceQueues = "ListDeadLetterSourceQueues" -// ListDeadLetterSourceQueuesRequest generates a request for the ListDeadLetterSourceQueues operation. +// ListDeadLetterSourceQueuesRequest generates a "aws/request.Request" representing the +// client's request for the ListDeadLetterSourceQueues operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListDeadLetterSourceQueues method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListDeadLetterSourceQueuesRequest method. +// req, resp := client.ListDeadLetterSourceQueuesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SQS) ListDeadLetterSourceQueuesRequest(input *ListDeadLetterSourceQueuesInput) (req *request.Request, output *ListDeadLetterSourceQueuesOutput) { op := &request.Operation{ Name: opListDeadLetterSourceQueues, @@ -457,7 +642,28 @@ func (c *SQS) ListDeadLetterSourceQueues(input *ListDeadLetterSourceQueuesInput) const opListQueues = "ListQueues" -// ListQueuesRequest generates a request for the ListQueues operation. +// ListQueuesRequest generates a "aws/request.Request" representing the +// client's request for the ListQueues operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListQueues method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListQueuesRequest method. +// req, resp := client.ListQueuesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SQS) ListQueuesRequest(input *ListQueuesInput) (req *request.Request, output *ListQueuesOutput) { op := &request.Operation{ Name: opListQueues, @@ -486,7 +692,28 @@ func (c *SQS) ListQueues(input *ListQueuesInput) (*ListQueuesOutput, error) { const opPurgeQueue = "PurgeQueue" -// PurgeQueueRequest generates a request for the PurgeQueue operation. +// PurgeQueueRequest generates a "aws/request.Request" representing the +// client's request for the PurgeQueue operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PurgeQueue method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PurgeQueueRequest method. +// req, resp := client.PurgeQueueRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SQS) PurgeQueueRequest(input *PurgeQueueInput) (req *request.Request, output *PurgeQueueOutput) { op := &request.Operation{ Name: opPurgeQueue, @@ -509,12 +736,13 @@ func (c *SQS) PurgeQueueRequest(input *PurgeQueueInput) (req *request.Request, o // Deletes the messages in a queue specified by the queue URL. // // When you use the PurgeQueue API, the deleted messages in the queue cannot -// be retrieved. When you purge a queue, the message deletion process takes -// up to 60 seconds. All messages sent to the queue before calling PurgeQueue -// will be deleted; messages sent to the queue while it is being purged may -// be deleted. While the queue is being purged, messages sent to the queue before -// PurgeQueue was called may be received, but will be deleted within the next -// minute. +// be retrieved. +// +// When you purge a queue, the message deletion process takes up to 60 seconds. +// All messages sent to the queue before calling PurgeQueue will be deleted; +// messages sent to the queue while it is being purged may be deleted. While +// the queue is being purged, messages sent to the queue before PurgeQueue was +// called may be received, but will be deleted within the next minute. func (c *SQS) PurgeQueue(input *PurgeQueueInput) (*PurgeQueueOutput, error) { req, out := c.PurgeQueueRequest(input) err := req.Send() @@ -523,7 +751,28 @@ func (c *SQS) PurgeQueue(input *PurgeQueueInput) (*PurgeQueueOutput, error) { const opReceiveMessage = "ReceiveMessage" -// ReceiveMessageRequest generates a request for the ReceiveMessage operation. +// ReceiveMessageRequest generates a "aws/request.Request" representing the +// client's request for the ReceiveMessage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ReceiveMessage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ReceiveMessageRequest method. +// req, resp := client.ReceiveMessageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SQS) ReceiveMessageRequest(input *ReceiveMessageInput) (req *request.Request, output *ReceiveMessageOutput) { op := &request.Operation{ Name: opReceiveMessage, @@ -591,7 +840,28 @@ func (c *SQS) ReceiveMessage(input *ReceiveMessageInput) (*ReceiveMessageOutput, const opRemovePermission = "RemovePermission" -// RemovePermissionRequest generates a request for the RemovePermission operation. +// RemovePermissionRequest generates a "aws/request.Request" representing the +// client's request for the RemovePermission operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemovePermission method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemovePermissionRequest method. +// req, resp := client.RemovePermissionRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SQS) RemovePermissionRequest(input *RemovePermissionInput) (req *request.Request, output *RemovePermissionOutput) { op := &request.Operation{ Name: opRemovePermission, @@ -621,7 +891,28 @@ func (c *SQS) RemovePermission(input *RemovePermissionInput) (*RemovePermissionO const opSendMessage = "SendMessage" -// SendMessageRequest generates a request for the SendMessage operation. +// SendMessageRequest generates a "aws/request.Request" representing the +// client's request for the SendMessage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SendMessage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SendMessageRequest method. +// req, resp := client.SendMessageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SQS) SendMessageRequest(input *SendMessageInput) (req *request.Request, output *SendMessageOutput) { op := &request.Operation{ Name: opSendMessage, @@ -659,7 +950,28 @@ func (c *SQS) SendMessage(input *SendMessageInput) (*SendMessageOutput, error) { const opSendMessageBatch = "SendMessageBatch" -// SendMessageBatchRequest generates a request for the SendMessageBatch operation. +// SendMessageBatchRequest generates a "aws/request.Request" representing the +// client's request for the SendMessageBatch operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SendMessageBatch method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SendMessageBatchRequest method. +// req, resp := client.SendMessageBatchRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SQS) SendMessageBatchRequest(input *SendMessageBatchInput) (req *request.Request, output *SendMessageBatchOutput) { op := &request.Operation{ Name: opSendMessageBatch, @@ -692,15 +1004,17 @@ func (c *SQS) SendMessageBatchRequest(input *SendMessageBatchInput) (req *reques // your message, according to the W3C XML specification. For more information, // go to http://www.faqs.org/rfcs/rfc1321.html (http://www.faqs.org/rfcs/rfc1321.html). // If you send any characters that are not included in the list, your request -// will be rejected. #x9 | #xA | #xD | [#x20 to #xD7FF] | [#xE000 to #xFFFD] -// | [#x10000 to #x10FFFF] +// will be rejected. +// +// #x9 | #xA | #xD | [#x20 to #xD7FF] | [#xE000 to #xFFFD] | [#x10000 to #x10FFFF] // // Because the batch request can result in a combination of successful and // unsuccessful actions, you should check for batch errors even when the call -// returns an HTTP status code of 200. Some API actions take lists of parameters. -// These lists are specified using the param.n notation. Values of n are integers -// starting from 1. For example, a parameter list with two elements looks like -// this: +// returns an HTTP status code of 200. +// +// Some API actions take lists of parameters. These lists are specified using +// the param.n notation. Values of n are integers starting from 1. For example, +// a parameter list with two elements looks like this: func (c *SQS) SendMessageBatch(input *SendMessageBatchInput) (*SendMessageBatchOutput, error) { req, out := c.SendMessageBatchRequest(input) err := req.Send() @@ -709,7 +1023,28 @@ func (c *SQS) SendMessageBatch(input *SendMessageBatchInput) (*SendMessageBatchO const opSetQueueAttributes = "SetQueueAttributes" -// SetQueueAttributesRequest generates a request for the SetQueueAttributes operation. +// SetQueueAttributesRequest generates a "aws/request.Request" representing the +// client's request for the SetQueueAttributes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the SetQueueAttributes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the SetQueueAttributesRequest method. +// req, resp := client.SetQueueAttributesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *SQS) SetQueueAttributesRequest(input *SetQueueAttributesInput) (req *request.Request, output *SetQueueAttributesOutput) { op := &request.Operation{ Name: opSetQueueAttributes, @@ -770,6 +1105,8 @@ type AddPermissionInput struct { Label *string `type:"string" required:"true"` // The URL of the Amazon SQS queue to take action on. + // + // Queue URLs are case-sensitive. QueueUrl *string `type:"string" required:"true"` } @@ -855,6 +1192,8 @@ type ChangeMessageVisibilityBatchInput struct { Entries []*ChangeMessageVisibilityBatchRequestEntry `locationNameList:"ChangeMessageVisibilityBatchRequestEntry" type:"list" flattened:"true" required:"true"` // The URL of the Amazon SQS queue to take action on. + // + // Queue URLs are case-sensitive. QueueUrl *string `type:"string" required:"true"` } @@ -990,6 +1329,8 @@ type ChangeMessageVisibilityInput struct { _ struct{} `type:"structure"` // The URL of the Amazon SQS queue to take action on. + // + // Queue URLs are case-sensitive. QueueUrl *string `type:"string" required:"true"` // The receipt handle associated with the message whose visibility timeout should @@ -1052,26 +1393,44 @@ type CreateQueueInput struct { // The following lists the names, descriptions, and values of the special request // parameters the CreateQueue action uses: // - // DelaySeconds - The time in seconds that the delivery of all messages in + // DelaySeconds - The time in seconds that the delivery of all messages in // the queue will be delayed. An integer from 0 to 900 (15 minutes). The default - // for this attribute is 0 (zero). MaximumMessageSize - The limit of how many - // bytes a message can contain before Amazon SQS rejects it. An integer from - // 1024 bytes (1 KiB) up to 262144 bytes (256 KiB). The default for this attribute - // is 262144 (256 KiB). MessageRetentionPeriod - The number of seconds Amazon - // SQS retains a message. Integer representing seconds, from 60 (1 minute) to - // 1209600 (14 days). The default for this attribute is 345600 (4 days). Policy - // - The queue's policy. A valid AWS policy. For more information about policy - // structure, see Overview of AWS IAM Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/PoliciesOverview.html) - // in the Amazon IAM User Guide. ReceiveMessageWaitTimeSeconds - The time for - // which a ReceiveMessage call will wait for a message to arrive. An integer - // from 0 to 20 (seconds). The default for this attribute is 0. VisibilityTimeout - // - The visibility timeout for the queue. An integer from 0 to 43200 (12 hours). - // The default for this attribute is 30. For more information about visibility - // timeout, see Visibility Timeout (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/AboutVT.html) + // for this attribute is 0 (zero). + // + // MaximumMessageSize - The limit of how many bytes a message can contain before + // Amazon SQS rejects it. An integer from 1024 bytes (1 KiB) up to 262144 bytes + // (256 KiB). The default for this attribute is 262144 (256 KiB). + // + // MessageRetentionPeriod - The number of seconds Amazon SQS retains a message. + // Integer representing seconds, from 60 (1 minute) to 1209600 (14 days). The + // default for this attribute is 345600 (4 days). + // + // Policy - The queue's policy. A valid AWS policy. For more information about + // policy structure, see Overview of AWS IAM Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/PoliciesOverview.html) + // in the Amazon IAM User Guide. + // + // ReceiveMessageWaitTimeSeconds - The time for which a ReceiveMessage call + // will wait for a message to arrive. An integer from 0 to 20 (seconds). The + // default for this attribute is 0. + // + // RedrivePolicy - The parameters for dead letter queue functionality of the + // source queue. For more information about RedrivePolicy and dead letter queues, + // see Using Amazon SQS Dead Letter Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSDeadLetterQueue.html) // in the Amazon SQS Developer Guide. + // + // VisibilityTimeout - The visibility timeout for the queue. An integer from + // 0 to 43200 (12 hours). The default for this attribute is 30. For more information + // about visibility timeout, see Visibility Timeout (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/AboutVT.html) + // in the Amazon SQS Developer Guide. + // + // Any other valid special request parameters that are specified (such as + // ApproximateNumberOfMessages, ApproximateNumberOfMessagesDelayed, ApproximateNumberOfMessagesNotVisible, + // CreatedTimestamp, LastModifiedTimestamp, and QueueArn) will be ignored. Attributes map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` // The name for the queue to be created. + // + // Queue names are case-sensitive. QueueName *string `type:"string" required:"true"` } @@ -1123,6 +1482,8 @@ type DeleteMessageBatchInput struct { Entries []*DeleteMessageBatchRequestEntry `locationNameList:"DeleteMessageBatchRequestEntry" type:"list" flattened:"true" required:"true"` // The URL of the Amazon SQS queue to take action on. + // + // Queue URLs are case-sensitive. QueueUrl *string `type:"string" required:"true"` } @@ -1246,6 +1607,8 @@ type DeleteMessageInput struct { _ struct{} `type:"structure"` // The URL of the Amazon SQS queue to take action on. + // + // Queue URLs are case-sensitive. QueueUrl *string `type:"string" required:"true"` // The receipt handle associated with the message to delete. @@ -1296,6 +1659,8 @@ type DeleteQueueInput struct { _ struct{} `type:"structure"` // The URL of the Amazon SQS queue to take action on. + // + // Queue URLs are case-sensitive. QueueUrl *string `type:"string" required:"true"` } @@ -1339,10 +1704,62 @@ func (s DeleteQueueOutput) GoString() string { type GetQueueAttributesInput struct { _ struct{} `type:"structure"` - // A list of attributes to retrieve information for. + // A list of attributes to retrieve information for. The following attributes + // are supported: + // + // All - returns all values. + // + // ApproximateNumberOfMessages - returns the approximate number of visible + // messages in a queue. For more information, see Resources Required to Process + // Messages (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/ApproximateNumber.html) + // in the Amazon SQS Developer Guide. + // + // ApproximateNumberOfMessagesNotVisible - returns the approximate number of + // messages that are not timed-out and not deleted. For more information, see + // Resources Required to Process Messages (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/ApproximateNumber.html) + // in the Amazon SQS Developer Guide. + // + // VisibilityTimeout - returns the visibility timeout for the queue. For more + // information about visibility timeout, see Visibility Timeout (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/AboutVT.html) + // in the Amazon SQS Developer Guide. + // + // CreatedTimestamp - returns the time when the queue was created (epoch time + // in seconds). + // + // LastModifiedTimestamp - returns the time when the queue was last changed + // (epoch time in seconds). + // + // Policy - returns the queue's policy. + // + // MaximumMessageSize - returns the limit of how many bytes a message can contain + // before Amazon SQS rejects it. + // + // MessageRetentionPeriod - returns the number of seconds Amazon SQS retains + // a message. + // + // QueueArn - returns the queue's Amazon resource name (ARN). + // + // ApproximateNumberOfMessagesDelayed - returns the approximate number of messages + // that are pending to be added to the queue. + // + // DelaySeconds - returns the default delay on the queue in seconds. + // + // ReceiveMessageWaitTimeSeconds - returns the time for which a ReceiveMessage + // call will wait for a message to arrive. + // + // RedrivePolicy - returns the parameters for dead letter queue functionality + // of the source queue. For more information about RedrivePolicy and dead letter + // queues, see Using Amazon SQS Dead Letter Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSDeadLetterQueue.html) + // in the Amazon SQS Developer Guide. + // + // Going forward, new attributes might be added. If you are writing code that + // calls this action, we recommend that you structure your code so that it can + // handle new attributes gracefully. AttributeNames []*string `locationNameList:"AttributeName" type:"list" flattened:"true"` // The URL of the Amazon SQS queue to take action on. + // + // Queue URLs are case-sensitive. QueueUrl *string `type:"string" required:"true"` } @@ -1392,6 +1809,8 @@ type GetQueueUrlInput struct { // The name of the queue whose URL must be fetched. Maximum 80 characters; alphanumeric // characters, hyphens (-), and underscores (_) are allowed. + // + // Queue names are case-sensitive. QueueName *string `type:"string" required:"true"` // The AWS account ID of the account that created the queue. @@ -1444,6 +1863,8 @@ type ListDeadLetterSourceQueuesInput struct { _ struct{} `type:"structure"` // The queue URL of a dead letter queue. + // + // Queue URLs are case-sensitive. QueueUrl *string `type:"string" required:"true"` } @@ -1494,6 +1915,8 @@ type ListQueuesInput struct { // A string to use for filtering the list results. Only those queues whose name // begins with the specified string are returned. + // + // Queue names are case-sensitive. QueueNamePrefix *string `type:"string"` } @@ -1592,8 +2015,10 @@ type MessageAttributeValue struct { BinaryValue []byte `type:"blob"` // Amazon SQS supports the following logical data types: String, Number, and - // Binary. In addition, you can append your own custom labels. For more information, - // see Message Attribute Data Types (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSMessageAttributes.html#SQSMessageAttributes.DataTypes). + // Binary. For the Number data type, you must use StringValue. + // + // You can also append custom labels. For more information, see Message Attribute + // Data Types (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSMessageAttributes.html#SQSMessageAttributes.DataTypes). DataType *string `type:"string" required:"true"` // Not implemented. Reserved for future use. @@ -1632,6 +2057,8 @@ type PurgeQueueInput struct { // The queue URL of the queue to delete the messages from when using the PurgeQueue // API. + // + // Queue URLs are case-sensitive. QueueUrl *string `type:"string" required:"true"` } @@ -1675,18 +2102,28 @@ func (s PurgeQueueOutput) GoString() string { type ReceiveMessageInput struct { _ struct{} `type:"structure"` - // A list of attributes that need to be returned along with each message. + // A list of attributes that need to be returned along with each message. These + // attributes include: // - // The following lists the names and descriptions of the attributes that can - // be returned: + // All - returns all values. + // + // ApproximateFirstReceiveTimestamp - returns the time when the message was + // first received from the queue (epoch time in milliseconds). // - // All - returns all values. ApproximateFirstReceiveTimestamp - returns the - // time when the message was first received from the queue (epoch time in milliseconds). // ApproximateReceiveCount - returns the number of times a message has been - // received from the queue but not deleted. SenderId - returns the AWS account - // number (or the IP address, if anonymous access is allowed) of the sender. - // SentTimestamp - returns the time when the message was sent to the queue (epoch - // time in milliseconds). + // received from the queue but not deleted. + // + // SenderId - returns the AWS account number (or the IP address, if anonymous + // access is allowed) of the sender. + // + // SentTimestamp - returns the time when the message was sent to the queue + // (epoch time in milliseconds). + // + // Any other valid special request parameters that are specified (such as + // ApproximateNumberOfMessages, ApproximateNumberOfMessagesDelayed, ApproximateNumberOfMessagesNotVisible, + // CreatedTimestamp, DelaySeconds, LastModifiedTimestamp, MaximumMessageSize, + // MessageRetentionPeriod, Policy, QueueArn, ReceiveMessageWaitTimeSeconds, + // RedrivePolicy, and VisibilityTimeout) will be ignored. AttributeNames []*string `locationNameList:"AttributeName" type:"list" flattened:"true"` // The maximum number of messages to return. Amazon SQS never returns more messages @@ -1707,11 +2144,13 @@ type ReceiveMessageInput struct { // // When using ReceiveMessage, you can send a list of attribute names to receive, // or you can return all of the attributes by specifying "All" or ".*" in your - // request. You can also use "foo.*" to return all message attributes starting - // with the "foo" prefix. + // request. You can also use "bar.*" to return all message attributes starting + // with the "bar" prefix. MessageAttributeNames []*string `locationNameList:"MessageAttributeName" type:"list" flattened:"true"` // The URL of the Amazon SQS queue to take action on. + // + // Queue URLs are case-sensitive. QueueUrl *string `type:"string" required:"true"` // The duration (in seconds) that the received messages are hidden from subsequent @@ -1773,6 +2212,8 @@ type RemovePermissionInput struct { Label *string `type:"string" required:"true"` // The URL of the Amazon SQS queue to take action on. + // + // Queue URLs are case-sensitive. QueueUrl *string `type:"string" required:"true"` } @@ -1823,6 +2264,8 @@ type SendMessageBatchInput struct { Entries []*SendMessageBatchRequestEntry `locationNameList:"SendMessageBatchRequestEntry" type:"list" flattened:"true" required:"true"` // The URL of the Amazon SQS queue to take action on. + // + // Queue URLs are case-sensitive. QueueUrl *string `type:"string" required:"true"` } @@ -1993,6 +2436,8 @@ type SendMessageInput struct { MessageBody *string `type:"string" required:"true"` // The URL of the Amazon SQS queue to take action on. + // + // Queue URLs are case-sensitive. QueueUrl *string `type:"string" required:"true"` } @@ -2072,28 +2517,43 @@ type SetQueueAttributesInput struct { // The following lists the names, descriptions, and values of the special request // parameters the SetQueueAttributes action uses: // - // DelaySeconds - The time in seconds that the delivery of all messages in + // DelaySeconds - The time in seconds that the delivery of all messages in // the queue will be delayed. An integer from 0 to 900 (15 minutes). The default - // for this attribute is 0 (zero). MaximumMessageSize - The limit of how many - // bytes a message can contain before Amazon SQS rejects it. An integer from - // 1024 bytes (1 KiB) up to 262144 bytes (256 KiB). The default for this attribute - // is 262144 (256 KiB). MessageRetentionPeriod - The number of seconds Amazon - // SQS retains a message. Integer representing seconds, from 60 (1 minute) to - // 1209600 (14 days). The default for this attribute is 345600 (4 days). Policy - // - The queue's policy. A valid AWS policy. For more information about policy - // structure, see Overview of AWS IAM Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/PoliciesOverview.html) - // in the Amazon IAM User Guide. ReceiveMessageWaitTimeSeconds - The time for - // which a ReceiveMessage call will wait for a message to arrive. An integer - // from 0 to 20 (seconds). The default for this attribute is 0. VisibilityTimeout - // - The visibility timeout for the queue. An integer from 0 to 43200 (12 hours). - // The default for this attribute is 30. For more information about visibility - // timeout, see Visibility Timeout in the Amazon SQS Developer Guide. RedrivePolicy - // - The parameters for dead letter queue functionality of the source queue. - // For more information about RedrivePolicy and dead letter queues, see Using - // Amazon SQS Dead Letter Queues in the Amazon SQS Developer Guide. + // for this attribute is 0 (zero). + // + // MaximumMessageSize - The limit of how many bytes a message can contain before + // Amazon SQS rejects it. An integer from 1024 bytes (1 KiB) up to 262144 bytes + // (256 KiB). The default for this attribute is 262144 (256 KiB). + // + // MessageRetentionPeriod - The number of seconds Amazon SQS retains a message. + // Integer representing seconds, from 60 (1 minute) to 1209600 (14 days). The + // default for this attribute is 345600 (4 days). + // + // Policy - The queue's policy. A valid AWS policy. For more information about + // policy structure, see Overview of AWS IAM Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/PoliciesOverview.html) + // in the Amazon IAM User Guide. + // + // ReceiveMessageWaitTimeSeconds - The time for which a ReceiveMessage call + // will wait for a message to arrive. An integer from 0 to 20 (seconds). The + // default for this attribute is 0. + // + // VisibilityTimeout - The visibility timeout for the queue. An integer from + // 0 to 43200 (12 hours). The default for this attribute is 30. For more information + // about visibility timeout, see Visibility Timeout in the Amazon SQS Developer + // Guide. + // + // RedrivePolicy - The parameters for dead letter queue functionality of the + // source queue. For more information about RedrivePolicy and dead letter queues, + // see Using Amazon SQS Dead Letter Queues in the Amazon SQS Developer Guide. + // + // Any other valid special request parameters that are specified (such as + // ApproximateNumberOfMessages, ApproximateNumberOfMessagesDelayed, ApproximateNumberOfMessagesNotVisible, + // CreatedTimestamp, LastModifiedTimestamp, and QueueArn) will be ignored. Attributes map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true" required:"true"` // The URL of the Amazon SQS queue to take action on. + // + // Queue URLs are case-sensitive. QueueUrl *string `type:"string" required:"true"` } diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/checksums.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/checksums.go index ef0c3d014..5dd17c4d9 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sqs/checksums.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/checksums.go @@ -71,9 +71,19 @@ func verifyReceiveMessage(r *request.Request) { if r.DataFilled() && r.ParamsFilled() { ids := []string{} out := r.Data.(*ReceiveMessageOutput) - for _, msg := range out.Messages { + for i, msg := range out.Messages { err := checksumsMatch(msg.Body, msg.MD5OfBody) if err != nil { + if msg.MessageId == nil { + if r.Config.Logger != nil { + r.Config.Logger.Log(fmt.Sprintf( + "WARN: SQS.ReceiveMessage failed checksum request id: %s, message %d has no message ID.", + r.RequestID, i, + )) + } + continue + } + ids = append(ids, *msg.MessageId) } } diff --git a/vendor/github.com/aws/aws-sdk-go/service/sqs/service.go b/vendor/github.com/aws/aws-sdk-go/service/sqs/service.go index dca9d855c..aaf456caa 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sqs/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sqs/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/query" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // Welcome to the Amazon Simple Queue Service API Reference. This section describes @@ -20,22 +20,31 @@ import ( // between distributed components of your applications that perform different // tasks without losing messages or requiring each component to be always available. // -// Helpful Links: Current WSDL (2012-11-05) (http://queue.amazonaws.com/doc/2012-11-05/QueueService.wsdl) +// Helpful Links: +// +// Current WSDL (2012-11-05) (http://queue.amazonaws.com/doc/2012-11-05/QueueService.wsdl) +// // Making API Requests (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/MakingRequestsArticle.html) -// Amazon SQS product page (http://aws.amazon.com/sqs/) Using Amazon SQS Message -// Attributes (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSMessageAttributes.html) +// +// Amazon SQS product page (http://aws.amazon.com/sqs/) +// +// Using Amazon SQS Message Attributes (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSMessageAttributes.html) +// // Using Amazon SQS Dead Letter Queues (http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSDeadLetterQueue.html) +// // Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sqs_region) // -// -// We also provide SDKs that enable you to access Amazon SQS from your preferred +// We also provide SDKs that enable you to access Amazon SQS from your preferred // programming language. The SDKs contain functionality that automatically takes // care of tasks such as: // -// Cryptographically signing your service requests Retrying requests Handling -// error responses +// Cryptographically signing your service requests // -// For a list of available SDKs, go to Tools for Amazon Web Services (http://aws.amazon.com/tools/). +// Retrying requests +// +// Handling error responses +// +// For a list of available SDKs, go to Tools for Amazon Web Services (http://aws.amazon.com/tools/). //The service client's operations are safe to be used concurrently. // It is not safe to mutate any of the client's properties though. type SQS struct { @@ -82,7 +91,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(query.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index 35a098166..5e4078ea8 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -12,7 +12,28 @@ import ( const opAssumeRole = "AssumeRole" -// AssumeRoleRequest generates a request for the AssumeRole operation. +// AssumeRoleRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRole operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssumeRole method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssumeRoleRequest method. +// req, resp := client.AssumeRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { op := &request.Operation{ Name: opAssumeRole, @@ -40,8 +61,8 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o // in the IAM User Guide. // // Important: You cannot call AssumeRole by using AWS root account credentials; -// access is denied. You must use IAM user credentials or temporary security -// credentials to call AssumeRole. +// access is denied. You must use credentials for an IAM user or an IAM role +// to call AssumeRole. // // For cross-account access, imagine that you own multiple accounts and need // to access resources in each account. You could create long-term credentials @@ -127,7 +148,28 @@ func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { const opAssumeRoleWithSAML = "AssumeRoleWithSAML" -// AssumeRoleWithSAMLRequest generates a request for the AssumeRoleWithSAML operation. +// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithSAML operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssumeRoleWithSAML method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssumeRoleWithSAMLRequest method. +// req, resp := client.AssumeRoleWithSAMLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { op := &request.Operation{ Name: opAssumeRoleWithSAML, @@ -219,7 +261,28 @@ func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWit const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" -// AssumeRoleWithWebIdentityRequest generates a request for the AssumeRoleWithWebIdentity operation. +// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithWebIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssumeRoleWithWebIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. +// req, resp := client.AssumeRoleWithWebIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { op := &request.Operation{ Name: opAssumeRoleWithWebIdentity, @@ -330,7 +393,28 @@ func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) ( const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" -// DecodeAuthorizationMessageRequest generates a request for the DecodeAuthorizationMessage operation. +// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the +// client's request for the DecodeAuthorizationMessage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DecodeAuthorizationMessage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DecodeAuthorizationMessageRequest method. +// req, resp := client.DecodeAuthorizationMessageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { op := &request.Operation{ Name: opDecodeAuthorizationMessage, @@ -388,7 +472,28 @@ func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) const opGetCallerIdentity = "GetCallerIdentity" -// GetCallerIdentityRequest generates a request for the GetCallerIdentity operation. +// GetCallerIdentityRequest generates a "aws/request.Request" representing the +// client's request for the GetCallerIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetCallerIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetCallerIdentityRequest method. +// req, resp := client.GetCallerIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { op := &request.Operation{ Name: opGetCallerIdentity, @@ -416,7 +521,28 @@ func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdenti const opGetFederationToken = "GetFederationToken" -// GetFederationTokenRequest generates a request for the GetFederationToken operation. +// GetFederationTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetFederationToken operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetFederationToken method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetFederationTokenRequest method. +// req, resp := client.GetFederationTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { op := &request.Operation{ Name: opGetFederationToken, @@ -520,7 +646,28 @@ func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederation const opGetSessionToken = "GetSessionToken" -// GetSessionTokenRequest generates a request for the GetSessionToken operation. +// GetSessionTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetSessionToken operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetSessionToken method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetSessionTokenRequest method. +// req, resp := client.GetSessionTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { op := &request.Operation{ Name: opGetSessionToken, diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go index fbe3cff33..c938e6ca1 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -7,8 +7,8 @@ import ( "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/client/metadata" "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" "github.com/aws/aws-sdk-go/private/protocol/query" - "github.com/aws/aws-sdk-go/private/signer/v4" ) // The AWS Security Token Service (STS) is a web service that enables you to @@ -102,7 +102,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio } // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) svc.Handlers.Build.PushBackNamed(query.BuildHandler) svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) diff --git a/vendor/vendor.json b/vendor/vendor.json index 7712dc914..931fedb09 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -275,125 +275,183 @@ "revision": "4239b77079c7b5d1243b7b4736304ce8ddb6f0f2" }, { + "checksumSHA1": "zrKMMpGfvfCUU07ydetOaOKum5U=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "AWg3FBA1NTPdIVZipaQf/rGx38o=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/awserr", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "dkfyy7aRNZ6BmUZ4ZdLIcMMXiPA=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/awsutil", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "RsYlRfQceaAgqjIrExwNsb/RBEM=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/client", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "ieAJ+Cvp/PKv1LpUEnUXpc3OI6E=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/client/metadata", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "gNWirlrTfSLbOe421hISBAhTqa4=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/corehandlers", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "EiauD48zRlXIFvAENgZ+PXSEnT0=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/credentials", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "KQiUK/zr3mqnAXD7x/X55/iNme0=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "NUJUTWlc1sV8b7WjfiYc4JZbXl0=", + "path": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds", + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" + }, + { + "checksumSHA1": "svFeyM3oQkk0nfQ0pguDjMgV2M4=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/defaults", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "U0SthWum+t9ACanK7SDJOg3dO6M=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/ec2metadata", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "NyUg1P8ZS/LHAAQAk/4C5O4X3og=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/request", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "46SVikiXo5xuy/CS6mM1XVTUU7w=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/session", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "0HzXzMByDLiJSqrMEqbg5URAx0o=", + "path": "github.com/aws/aws-sdk-go/aws/signer/v4", + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" + }, + { + "checksumSHA1": "sgft7A0lRCVD7QBogydg46lr3NM=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/endpoints", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "wk7EyvDaHwb5qqoOP/4d3cV0708=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "G1he3uSmd1h8ZRnKOIWuDrWp2zQ=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/ec2query", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "gHqZ41fSrCEUftkImHKGW+cKxFk=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "MPzz1x/qt6f2R/JW6aELbm/qT4k=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/jsonrpc", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "nHHyS4+VgZOV7F3Xu87crArmbds=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/query", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "5xzix1R8prUyWxgLnzUQoxTsfik=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "TW/7U+/8ormL7acf6z2rv2hDD+s=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/rest", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "ayzKZc+f+OrjOtE2bz4+lrlKR7c=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/restjson", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "ttxyyPnlmMDqX+sY10BwbwwA+jo=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/restxml", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "LsCIsjbzX2r3n/AhpNJvAC5ueNA=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { "checksumSHA1": "F6mth+G7dXN1GI+nktaGo8Lx8aE=", "path": "github.com/aws/aws-sdk-go/private/signer/v2", - "revision": "333fcdc9874ea63fbdb3176e12ffa04b5ec44f5a", - "revisionTime": "2016-07-05T22:03:21Z" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { "comment": "v1.1.23", @@ -401,196 +459,274 @@ "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" }, { + "checksumSHA1": "Eo9yODN5U99BK0pMzoqnBm7PCrY=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/waiter", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "lD48Br3S98XvKfKID0QiTbBgC1M=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/apigateway", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "AUA6op9dlm0X4vv1YPFnIFs6404=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/autoscaling", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "HMNQSV7Om3yvNiougcTrfZVJFbE=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/cloudformation", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "4deSd9La3EF2Cmq+tD5rcvhfTGQ=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/cloudfront", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "eCFTaV9GKqv/UEzwRgFFUaFz098=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/cloudtrail", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "b9W5mR0lazSwYV6Pl8HNslokIpo=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/cloudwatch", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "mWNJKpt18ASs9/RhnIjILcsGlng=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/cloudwatchevents", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "Q6xeArbCzOunYsn2tFyTA5LN1Cg=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/cloudwatchlogs", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "p5a/DcdUvhTx0PCRR+/CRXk9g6c=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/codecommit", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "p9BTPHO+J8OdzK2btdcGGAaTmhk=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/codedeploy", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "t1fZO+x4OG6e7T8HIi2Yr2wR9D4=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/directoryservice", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "y+pZPK8hcTDwq1zHuRduWE14flw=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/dynamodb", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "gqlYKqMKCuQ3fzNTyDw6jiG1sCs=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/ec2", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "IEHq+VLH1fud1oQ4MXj1nqfpgUY=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/ecr", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "sHPoLMWXO5tM63ipuxVXduuRypI=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/ecs", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "1vOgFGxLhjNe6BK3RJaV1OqisCs=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/efs", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "rjSScNzMTvEHv7Lk5KcxDpNU5EE=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/elasticache", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "RZF1yHtJhAqaMwbeAM/6BdLLavk=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/elasticbeanstalk", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "TAuizMIsvgeuZhmGTYPA7LOXHvY=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/elasticsearchservice", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { - "checksumSHA1": "hvaMQ+Wb+SWjYhT6gSAx6DCks2Y=", + "checksumSHA1": "B/g+Usd8rImjgUpVPLyNTL0LaUQ=", "path": "github.com/aws/aws-sdk-go/service/elastictranscoder", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011", - "revisionTime": "2016-05-03T21:45:29Z" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "1c9xsISLQWKSrORIpdokCCWCe2M=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/elb", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "bvVmHWxCOk0Cmw333zQ5jutPCZQ=", "comment": "v1.1.15", "path": "github.com/aws/aws-sdk-go/service/emr", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "TtIAgZ+evpkKB5bBYCB69k0wZoU=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/firehose", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "B1EtgBrv//gYqA+Sp6a/SK2zLO4=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/glacier", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "kXJ9ycLAIj0PFSFbfrA/LR/hIi8=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/iam", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "2n5/m0ClE4OyQRNdjfLwg+nSY3o=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/kinesis", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "/cFX1/Gr6M+r9232gLIV+4np7Po=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/kms", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "jM0EhAIybh0fyLHxrmVSmG3JLmU=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/lambda", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "aLwDFgrPzIBidURxso1ujcr2pDs=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/opsworks", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "w0aQAtZ42oGeVOqwwG15OBGoU1s=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/rds", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "mgImZ/bluUOY9GpQ/oAnscIXwrA=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/redshift", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "6ejP+X+O9e6y40GICj9Vcn1MuBY=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/route53", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "68YN+UopWOSISIcQQ6zSVbyaDzQ=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/s3", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" + }, + { + "checksumSHA1": "X9g/Vdq939ijN2gcumwOyYfHM2U=", + "path": "github.com/aws/aws-sdk-go/service/ses", + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { "checksumSHA1": "DW5kDRWLA2yAgYh9vsI+0uVqq/Q=", "path": "github.com/aws/aws-sdk-go/service/simpledb", - "revision": "333fcdc9874ea63fbdb3176e12ffa04b5ec44f5a", - "revisionTime": "2016-07-05T22:03:21Z" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "+ic7vevBfganFLENR29pJaEf4Tw=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/sns", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "oLAlquYlQzgYFS9ochS/iQ9+uXY=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/sqs", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { + "checksumSHA1": "6a2WM0r/rXUxFjxH73jYL88LBSw=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/sts", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", + "revisionTime": "2016-07-13T21:13:24Z" }, { "path": "github.com/bgentry/speakeasy", From 411ad21fd89be66c0a987eb0d2bdb98a795ea3c7 Mon Sep 17 00:00:00 2001 From: Joe Topjian Date: Thu, 14 Jul 2016 15:29:31 +0000 Subject: [PATCH 0290/1238] provider/openstack: Fixing boot volume interference This commit fixes the situation where instances with both a bootable volume and attached block storage volumes were reporting an inconsistent state. --- .../resource_openstack_compute_instance_v2.go | 25 ++++++++--- ...urce_openstack_compute_instance_v2_test.go | 41 +++++++++++++++++++ 2 files changed, 60 insertions(+), 6 deletions(-) diff --git a/builtin/providers/openstack/resource_openstack_compute_instance_v2.go b/builtin/providers/openstack/resource_openstack_compute_instance_v2.go index 7f1b8f9bc..4edd512e0 100644 --- a/builtin/providers/openstack/resource_openstack_compute_instance_v2.go +++ b/builtin/providers/openstack/resource_openstack_compute_instance_v2.go @@ -1436,12 +1436,25 @@ func getVolumeAttachments(computeClient *gophercloud.ServiceClient, d *schema.Re return err } - vols := make([]map[string]interface{}, len(attachments)) - for i, attachment := range attachments { - vols[i] = make(map[string]interface{}) - vols[i]["id"] = attachment.ID - vols[i]["volume_id"] = attachment.VolumeID - vols[i]["device"] = attachment.Device + var vols []map[string]interface{} + for _, attachment := range attachments { + // ignore the volume if it is attached as a root device + rootDevFound := false + for _, rootDev := range []string{"/dev/vda", "/dev/xda", "/dev/sda", "/dev/xvda"} { + if attachment.Device == rootDev { + rootDevFound = true + } + } + + if rootDevFound { + continue + } + + vol := make(map[string]interface{}) + vol["id"] = attachment.ID + vol["volume_id"] = attachment.VolumeID + vol["device"] = attachment.Device + vols = append(vols, vol) } log.Printf("[INFO] Volume attachments: %v", vols) d.Set("volume", vols) diff --git a/builtin/providers/openstack/resource_openstack_compute_instance_v2_test.go b/builtin/providers/openstack/resource_openstack_compute_instance_v2_test.go index cc87bf707..6e1f9c422 100644 --- a/builtin/providers/openstack/resource_openstack_compute_instance_v2_test.go +++ b/builtin/providers/openstack/resource_openstack_compute_instance_v2_test.go @@ -388,6 +388,47 @@ func TestAccComputeV2Instance_bootFromVolumeImage(t *testing.T) { }) } +func TestAccComputeV2Instance_bootFromVolumeImageWithAttachedVolume(t *testing.T) { + var instance servers.Server + var testAccComputeV2Instance_bootFromVolumeImageWithAttachedVolume = fmt.Sprintf(` + resource "openstack_blockstorage_volume_v1" "volume_1" { + name = "volume_1" + size = 1 + } + + resource "openstack_compute_instance_v2" "instance_1" { + name = "instance_1" + security_groups = ["default"] + block_device { + uuid = "%s" + source_type = "image" + volume_size = 2 + boot_index = 0 + destination_type = "volume" + delete_on_termination = true + } + + volume { + volume_id = "${openstack_blockstorage_volume_v1.volume_1.id}" + } + }`, + os.Getenv("OS_IMAGE_ID")) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeV2InstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeV2Instance_bootFromVolumeImageWithAttachedVolume, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.instance_1", &instance), + ), + }, + }, + }) +} + func TestAccComputeV2Instance_bootFromVolumeVolume(t *testing.T) { var instance servers.Server var testAccComputeV2Instance_bootFromVolumeVolume = fmt.Sprintf(` From ddffb47492d90f35ec12b49ab6ece98c8811aff7 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Fri, 15 Jul 2016 09:49:07 -0600 Subject: [PATCH 0291/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d36b889a0..5bb6fad5b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -79,6 +79,7 @@ IMPROVEMENTS: * core: The `jsonencode` interpolation function now supports encoding lists and maps [GH-6749] * core: Add the ability for resource definitions to mark attributes as "sensitive" which will omit them from UI output. [GH-6923] + * core: Support `.` in map keys [GH-7654] * provider/aws: Add `dns_name` to `aws_efs_mount_target` [GH-7428] * provider/aws: Add `option_settings` to `aws_db_option_group` [GH-6560] * provider/aws: Add more explicit support for Skipping Final Snapshot in RDS Cluster [GH-6795] From 93832527b1bfd673c35c152d7206df7060704b44 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Fri, 15 Jul 2016 11:24:58 -0600 Subject: [PATCH 0292/1238] provider/aws: Fixup skip_final_snapshot import Neither skip_final_snapshot nor final_snapshot_identifier can be fetched from any API call, so we need to default skip_final_snapshot to true during import so that final_snapshot_identifier is not required --- .../aws/import_aws_db_instance_test.go | 5 ++++- .../providers/aws/resource_aws_db_instance.go | 18 ++++++++++++------ 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/builtin/providers/aws/import_aws_db_instance_test.go b/builtin/providers/aws/import_aws_db_instance_test.go index 8079d117b..7af427fd8 100644 --- a/builtin/providers/aws/import_aws_db_instance_test.go +++ b/builtin/providers/aws/import_aws_db_instance_test.go @@ -23,7 +23,10 @@ func TestAccAWSDBInstance_importBasic(t *testing.T) { ImportState: true, ImportStateVerify: true, ImportStateVerifyIgnore: []string{ - "password", "skip_final_snapshot"}, + "password", + "skip_final_snapshot", + "final_snapshot_identifier", + }, }, }, }) diff --git a/builtin/providers/aws/resource_aws_db_instance.go b/builtin/providers/aws/resource_aws_db_instance.go index 8dc6f115b..c7615161b 100644 --- a/builtin/providers/aws/resource_aws_db_instance.go +++ b/builtin/providers/aws/resource_aws_db_instance.go @@ -23,7 +23,7 @@ func resourceAwsDbInstance() *schema.Resource { Update: resourceAwsDbInstanceUpdate, Delete: resourceAwsDbInstanceDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + State: resourceAwsDbInstanceImport, }, Schema: map[string]*schema.Schema{ @@ -757,11 +757,8 @@ func resourceAwsDbInstanceDelete(d *schema.ResourceData, meta interface{}) error opts := rds.DeleteDBInstanceInput{DBInstanceIdentifier: aws.String(d.Id())} - skipFinalSnapshot, exists := d.GetOk("skip_final_snapshot") - if !exists { - skipFinalSnapshot = true - } - opts.SkipFinalSnapshot = aws.Bool(skipFinalSnapshot.(bool)) + skipFinalSnapshot := d.Get("skip_final_snapshot").(bool) + opts.SkipFinalSnapshot = aws.Bool(skipFinalSnapshot) if skipFinalSnapshot == false { if name, present := d.GetOk("final_snapshot_identifier"); present { @@ -1023,6 +1020,15 @@ func resourceAwsDbInstanceRetrieve( return resp.DBInstances[0], nil } +func resourceAwsDbInstanceImport( + d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + // Neither skip_final_snapshot nor final_snapshot_identifier can be fetched + // from any API call, so we need to default skip_final_snapshot to true so + // that final_snapshot_identifier is not required + d.Set("skip_final_snapshot", true) + return []*schema.ResourceData{d}, nil +} + func resourceAwsDbInstanceStateRefreshFunc( d *schema.ResourceData, meta interface{}) resource.StateRefreshFunc { return func() (interface{}, string, error) { From 81003fa6b10913dd02dbcb16a539c95e5bb3a182 Mon Sep 17 00:00:00 2001 From: Krzysztof Wilczynski Date: Sat, 16 Jul 2016 03:28:09 +0900 Subject: [PATCH 0293/1238] Fix icmp_type and icmp_code in aws_network_acl_rule. The ICMP type 0 (Echo Reply) was not handled correctly. This commit changes the type of attributes "icmp_type" and "icmp_code" from TypeInt to TypeString, allowing for the string value to be manually converted into an integer. This enables an integer values such as -1, 0, 8, etc., coming from the resource definition in the template to be handled correctly. Signed-off-by: Krzysztof Wilczynski --- .../aws/resource_aws_network_acl_rule.go | 21 ++++++++++++++----- .../aws/resource_aws_network_acl_rule_test.go | 14 +++++++++++-- 2 files changed, 28 insertions(+), 7 deletions(-) diff --git a/builtin/providers/aws/resource_aws_network_acl_rule.go b/builtin/providers/aws/resource_aws_network_acl_rule.go index b27f908d2..be347daf3 100644 --- a/builtin/providers/aws/resource_aws_network_acl_rule.go +++ b/builtin/providers/aws/resource_aws_network_acl_rule.go @@ -63,12 +63,12 @@ func resourceAwsNetworkAclRule() *schema.Resource { ForceNew: true, }, "icmp_type": &schema.Schema{ - Type: schema.TypeInt, + Type: schema.TypeString, Optional: true, ForceNew: true, }, "icmp_code": &schema.Schema{ - Type: schema.TypeInt, + Type: schema.TypeString, Optional: true, ForceNew: true, }, @@ -103,14 +103,25 @@ func resourceAwsNetworkAclRuleCreate(d *schema.ResourceData, meta interface{}) e }, } - // Specify additional required fields for ICMP + // Specify additional required fields for ICMP. For the list + // of ICMP codes and types, see: http://www.nthelp.com/icmp.html if p == 1 { params.IcmpTypeCode = &ec2.IcmpTypeCode{} if v, ok := d.GetOk("icmp_code"); ok { - params.IcmpTypeCode.Code = aws.Int64(int64(v.(int))) + icmpCode, err := strconv.Atoi(v.(string)) + if err != nil { + return fmt.Errorf("Unable to parse ICMP code %s for rule %#v", v, d.Get("rule_number").(int)) + } + params.IcmpTypeCode.Code = aws.Int64(int64(icmpCode)) + log.Printf("[DEBUG] Transformed ICMP code %s into %d", v, icmpCode) } if v, ok := d.GetOk("icmp_type"); ok { - params.IcmpTypeCode.Type = aws.Int64(int64(v.(int))) + icmpType, err := strconv.Atoi(v.(string)) + if err != nil { + return fmt.Errorf("Unable to parse ICMP type %s for rule %#v", v, d.Get("rule_number").(int)) + } + params.IcmpTypeCode.Type = aws.Int64(int64(icmpType)) + log.Printf("[DEBUG] Transformed ICMP type %s into %d", v, icmpType) } } diff --git a/builtin/providers/aws/resource_aws_network_acl_rule_test.go b/builtin/providers/aws/resource_aws_network_acl_rule_test.go index 56973b1d4..95682bf41 100644 --- a/builtin/providers/aws/resource_aws_network_acl_rule_test.go +++ b/builtin/providers/aws/resource_aws_network_acl_rule_test.go @@ -23,7 +23,8 @@ func TestAccAWSNetworkAclRule_basic(t *testing.T) { resource.TestStep{ Config: testAccAWSNetworkAclRuleBasicConfig, Check: resource.ComposeTestCheckFunc( - testAccCheckAWSNetworkAclRuleExists("aws_network_acl_rule.bar", &networkAcl), + testAccCheckAWSNetworkAclRuleExists("aws_network_acl_rule.baz", &networkAcl), + testAccCheckAWSNetworkAclRuleExists("aws_network_acl_rule.quux", &networkAcl), ), }, }, @@ -112,7 +113,7 @@ resource "aws_vpc" "foo" { resource "aws_network_acl" "bar" { vpc_id = "${aws_vpc.foo.id}" } -resource "aws_network_acl_rule" "bar" { +resource "aws_network_acl_rule" "baz" { network_acl_id = "${aws_network_acl.bar.id}" rule_number = 200 egress = false @@ -122,4 +123,13 @@ resource "aws_network_acl_rule" "bar" { from_port = 22 to_port = 22 } +resource "aws_network_acl_rule" "quux" { + network_acl_id = "${aws_network_acl.bar.id}" + rule_number = 300 + protocol = "icmp" + rule_action = "allow" + cidr_block = "0.0.0.0/0" + icmp_type = 0 + icmp_code = -1 +} ` From bbe7fd261337f00cc4ebc4f1739c6b8f06241645 Mon Sep 17 00:00:00 2001 From: David Harris Date: Fri, 15 Jul 2016 13:03:42 -0600 Subject: [PATCH 0294/1238] provider/aws: Fix Elastic Beanstalk test (#7668) This fixes the `TestAccAWSBeanstalkEnv_tier` test. The instance profile needs access to send and receive messages from its sqs queue. Without these permissions Beanstalk returns an error event, causing the test to fail. --- ..._aws_elastic_beanstalk_environment_test.go | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment_test.go b/builtin/providers/aws/resource_aws_elastic_beanstalk_environment_test.go index 009560a20..eeb4657c4 100644 --- a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment_test.go +++ b/builtin/providers/aws/resource_aws_elastic_beanstalk_environment_test.go @@ -323,6 +323,23 @@ resource "aws_elastic_beanstalk_environment" "tfenvtest" { ` const testAccBeanstalkWorkerEnvConfig = ` +resource "aws_iam_instance_profile" "tftest" { + name = "tftest_profile" + roles = ["${aws_iam_role.tftest.name}"] +} + +resource "aws_iam_role" "tftest" { + name = "tftest_role" + path = "/" + assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Action\":\"sts:AssumeRole\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Effect\":\"Allow\",\"Sid\":\"\"}]}" +} + +resource "aws_iam_role_policy" "tftest" { + name = "tftest_policy" + role = "${aws_iam_role.tftest.id}" + policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"QueueAccess\",\"Action\":[\"sqs:ChangeMessageVisibility\",\"sqs:DeleteMessage\",\"sqs:ReceiveMessage\"],\"Effect\":\"Allow\",\"Resource\":\"*\"}]}" +} + resource "aws_elastic_beanstalk_application" "tftest" { name = "tf-test-name" description = "tf-test-desc" @@ -333,6 +350,12 @@ resource "aws_elastic_beanstalk_environment" "tfenvtest" { application = "${aws_elastic_beanstalk_application.tftest.name}" tier = "Worker" solution_stack_name = "64bit Amazon Linux running Python" + + setting { + namespace = "aws:autoscaling:launchconfiguration" + name = "IamInstanceProfile" + value = "${aws_iam_instance_profile.tftest.name}" + } } ` From 614806d59f0bca7131ff0706ce927e6cf9ba3421 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Fri, 15 Jul 2016 13:15:47 -0600 Subject: [PATCH 0295/1238] helper/resource: Fix import test harness, which was modifying state Maps are reference types, it turns out :D --- helper/resource/testing_import_state.go | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/helper/resource/testing_import_state.go b/helper/resource/testing_import_state.go index f16f17130..00ec87324 100644 --- a/helper/resource/testing_import_state.go +++ b/helper/resource/testing_import_state.go @@ -90,8 +90,14 @@ func testStepImportState( } // Compare their attributes - actual := r.Primary.Attributes - expected := oldR.Primary.Attributes + actual := make(map[string]string) + for k, v := range r.Primary.Attributes { + actual[k] = v + } + expected := make(map[string]string) + for k, v := range oldR.Primary.Attributes { + expected[k] = v + } // Remove fields we're ignoring for _, v := range step.ImportStateVerifyIgnore { From a4c96e56196c2f1ce9e88f288be271f04515b9ee Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Thu, 14 Jul 2016 09:32:47 -0600 Subject: [PATCH 0296/1238] dag: Fix graph printing type mismatch Dependencies were being sorted, but their types were not, causing incorrect printing. --- dag/graph.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/dag/graph.go b/dag/graph.go index b271339ba..012118057 100644 --- a/dag/graph.go +++ b/dag/graph.go @@ -202,16 +202,17 @@ func (g *Graph) StringWithNodeTypes() string { // Alphabetize dependencies deps := make([]string, 0, targets.Len()) - targetNodes := make([]Vertex, 0, targets.Len()) + targetNodes := make(map[string]Vertex) for _, target := range targets.List() { - deps = append(deps, VertexName(target)) - targetNodes = append(targetNodes, target) + dep := VertexName(target) + deps = append(deps, dep) + targetNodes[dep] = target } sort.Strings(deps) // Write dependencies - for i, d := range deps { - buf.WriteString(fmt.Sprintf(" %s - %T\n", d, targetNodes[i])) + for _, d := range deps { + buf.WriteString(fmt.Sprintf(" %s - %T\n", d, targetNodes[d])) } } From b45f53eef4c05473f8d630f8b6bd5cb9a4f8dd77 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Thu, 14 Jul 2016 09:33:37 -0600 Subject: [PATCH 0297/1238] dag: fix ReverseDepthFirstWalk when nodes remove themselves The report in #7378 led us into a deep rabbit hole that turned out to expose a bug in the graph walk implementation being used by the `NoopTransformer`. The problem ended up being when two nodes in a single dependency chain both reported `Noop() -> true` and needed to be removed. This was breaking the walk and preventing the second node from ever being visited. Fixes #7378 --- dag/dag.go | 12 ++++----- dag/dag_test.go | 27 +++++++++++++++++++ terraform/context_apply_test.go | 2 ++ .../middle/bottom/bottom.tf | 6 ++++- .../middle/middle.tf | 8 +++--- 5 files changed, 45 insertions(+), 10 deletions(-) diff --git a/dag/dag.go b/dag/dag.go index b609ac707..20b3e0400 100644 --- a/dag/dag.go +++ b/dag/dag.go @@ -332,12 +332,7 @@ func (g *AcyclicGraph) ReverseDepthFirstWalk(start []Vertex, f DepthWalkFunc) er } seen[current.Vertex] = struct{}{} - // Visit the current node - if err := f(current.Vertex, current.Depth); err != nil { - return err - } - - // Visit targets of this in a consistent order. + // Add next set of targets in a consistent order. targets := AsVertexList(g.UpEdges(current.Vertex)) sort.Sort(byVertexName(targets)) for _, t := range targets { @@ -346,6 +341,11 @@ func (g *AcyclicGraph) ReverseDepthFirstWalk(start []Vertex, f DepthWalkFunc) er Depth: current.Depth + 1, }) } + + // Visit the current node + if err := f(current.Vertex, current.Depth); err != nil { + return err + } } return nil diff --git a/dag/dag_test.go b/dag/dag_test.go index 93441775a..2d17a8c37 100644 --- a/dag/dag_test.go +++ b/dag/dag_test.go @@ -260,6 +260,33 @@ func TestAcyclicGraphWalk_error(t *testing.T) { t.Fatalf("bad: %#v", visits) } +func TestAcyclicGraph_ReverseDepthFirstWalk_WithRemoval(t *testing.T) { + var g AcyclicGraph + g.Add(1) + g.Add(2) + g.Add(3) + g.Connect(BasicEdge(3, 2)) + g.Connect(BasicEdge(2, 1)) + + var visits []Vertex + var lock sync.Mutex + err := g.ReverseDepthFirstWalk([]Vertex{1}, func(v Vertex, d int) error { + lock.Lock() + defer lock.Unlock() + visits = append(visits, v) + g.Remove(v) + return nil + }) + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := []Vertex{1, 2, 3} + if !reflect.DeepEqual(visits, expected) { + t.Fatalf("expected: %#v, got: %#v", expected, visits) + } +} + const testGraphTransReductionStr = ` 1 2 diff --git a/terraform/context_apply_test.go b/terraform/context_apply_test.go index 142f735c8..ab35ca29d 100644 --- a/terraform/context_apply_test.go +++ b/terraform/context_apply_test.go @@ -4873,6 +4873,8 @@ func TestContext2Apply_destroyNestedModuleWithAttrsReferencingResource(t *testin expected := strings.TrimSpace(` module.middle: + +module.middle.bottom: `) if actual != expected { diff --git a/terraform/test-fixtures/apply-destroy-nested-module-with-attrs/middle/bottom/bottom.tf b/terraform/test-fixtures/apply-destroy-nested-module-with-attrs/middle/bottom/bottom.tf index a9ce7fcc8..b5db44ee3 100644 --- a/terraform/test-fixtures/apply-destroy-nested-module-with-attrs/middle/bottom/bottom.tf +++ b/terraform/test-fixtures/apply-destroy-nested-module-with-attrs/middle/bottom/bottom.tf @@ -1 +1,5 @@ -variable "bottom_param" {} +variable bottom_param {} + +resource "null_resource" "bottom" { + value = "${var.bottom_param}" +} diff --git a/terraform/test-fixtures/apply-destroy-nested-module-with-attrs/middle/middle.tf b/terraform/test-fixtures/apply-destroy-nested-module-with-attrs/middle/middle.tf index 0fde5830b..76652ee44 100644 --- a/terraform/test-fixtures/apply-destroy-nested-module-with-attrs/middle/middle.tf +++ b/terraform/test-fixtures/apply-destroy-nested-module-with-attrs/middle/middle.tf @@ -1,8 +1,10 @@ -variable "param" {} - -resource "null_resource" "n" {} +variable param {} module "bottom" { source = "./bottom" bottom_param = "${var.param}" } + +resource "null_resource" "middle" { + value = "${var.param}" +} From 3f4857a07a24f3c9e2db6b4458fbf5be19a8b256 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Fri, 15 Jul 2016 22:29:21 +0000 Subject: [PATCH 0298/1238] v0.7.0-rc3 --- terraform/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/terraform/version.go b/terraform/version.go index e781d9c25..b42880b83 100644 --- a/terraform/version.go +++ b/terraform/version.go @@ -10,7 +10,7 @@ const Version = "0.7.0" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "dev" +const VersionPrerelease = "rc3" // SemVersion is an instance of version.Version. This has the secondary // benefit of verifying during tests and init time that our version is a From 7c40c174efc219a3785289ceef5601d277e3fb51 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Fri, 15 Jul 2016 18:33:02 -0600 Subject: [PATCH 0299/1238] clean up after v0.7.0-rc3 --- terraform/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/terraform/version.go b/terraform/version.go index b42880b83..e781d9c25 100644 --- a/terraform/version.go +++ b/terraform/version.go @@ -10,7 +10,7 @@ const Version = "0.7.0" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "rc3" +const VersionPrerelease = "dev" // SemVersion is an instance of version.Version. This has the secondary // benefit of verifying during tests and init time that our version is a From bb51834a4dfd09a11a9f4d71c8d92a23fcb15fbd Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Fri, 15 Jul 2016 18:38:47 -0600 Subject: [PATCH 0300/1238] scripts/dist.sh: tweaks for recent hc-releases --- scripts/dist.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/dist.sh b/scripts/dist.sh index 3cbeb83ee..6bf81eb7e 100755 --- a/scripts/dist.sh +++ b/scripts/dist.sh @@ -39,6 +39,6 @@ gpg --default-key 348FFC4C --detach-sig ./terraform_${VERSION}_SHA256SUMS popd # Upload -hc-releases -upload=./pkg/dist +hc-releases upload ./pkg/dist exit 0 From d9e3beb276c5ed5ea2ed0828aab8361a9d4a610f Mon Sep 17 00:00:00 2001 From: shanoor Date: Sat, 16 Jul 2016 17:36:15 +0400 Subject: [PATCH 0301/1238] azurerm_storage_account now returns storage keys value instead of their names (#7674) --- builtin/providers/azurerm/resource_arm_storage_account.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/builtin/providers/azurerm/resource_arm_storage_account.go b/builtin/providers/azurerm/resource_arm_storage_account.go index 4a4e73945..2d01952cf 100644 --- a/builtin/providers/azurerm/resource_arm_storage_account.go +++ b/builtin/providers/azurerm/resource_arm_storage_account.go @@ -223,8 +223,8 @@ func resourceArmStorageAccountRead(d *schema.ResourceData, meta interface{}) err } accessKeys := *keys.Keys - d.Set("primary_access_key", accessKeys[0].KeyName) - d.Set("secondary_access_key", accessKeys[1].KeyName) + d.Set("primary_access_key", accessKeys[0].Value) + d.Set("secondary_access_key", accessKeys[1].Value) d.Set("location", resp.Location) d.Set("account_type", resp.Sku.Name) d.Set("primary_location", resp.Properties.PrimaryLocation) From 28561bd064eda4b3ab27bc51d6778421b46a5ea4 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Sat, 16 Jul 2016 14:37:41 +0100 Subject: [PATCH 0302/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5bb6fad5b..0f398dc4f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -207,6 +207,7 @@ BUG FIXES: * provider/azurerm: destroy azurerm_virtual_machine OS Disk VHD on deletion [GH-7584] * provider/azurerm: catch `azurerm_template_deployment` erroring silently [GH-7644] * provider/azurerm: changing the name of an `azurerm_virtual_machine` now forces a new resource [GH-7646] + * provider/azurerm: azurerm_storage_account now returns storage keys value instead of their names [GH-7674] * provider/cloudflare: Fix issue upgrading CloudFlare Records created before v0.6.15 [GH-6969] * provider/cloudstack: Fix using `cloudstack_network_acl` within a project [GH-6743] * provider/digitalocean: Stop `digitocean_droplet` forcing new resource on uppercase region [GH-7044] From 03157610d82fcc96bb1f33351b59e160ae957259 Mon Sep 17 00:00:00 2001 From: David Harris Date: Sat, 16 Jul 2016 07:43:12 -0600 Subject: [PATCH 0303/1238] provider/aws: Read Elastic Beanstalk stack name (#7671) --- .../resource_aws_elastic_beanstalk_environment.go | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go b/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go index 5e5cb0c06..861722e17 100644 --- a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go +++ b/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go @@ -108,13 +108,14 @@ func resourceAwsElasticBeanstalkEnvironment() *schema.Resource { Set: optionSettingValueHash, }, "solution_stack_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - }, - "template_name": &schema.Schema{ Type: schema.TypeString, Optional: true, - ConflictsWith: []string{"solution_stack_name"}, + Computed: true, + ConflictsWith: []string{"template_name"}, + }, + "template_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, }, "wait_for_ready_timeout": &schema.Schema{ Type: schema.TypeString, @@ -420,6 +421,10 @@ func resourceAwsElasticBeanstalkEnvironmentRead(d *schema.ResourceData, meta int } } + if err := d.Set("solution_stack_name", env.SolutionStackName); err != nil { + return err + } + if err := d.Set("autoscaling_groups", flattenBeanstalkAsg(resources.EnvironmentResources.AutoScalingGroups)); err != nil { return err } From e85297f9585b0454a111bc8839c89a40d2f8b202 Mon Sep 17 00:00:00 2001 From: Andrew Sy Kim Date: Sat, 16 Jul 2016 10:13:29 -0400 Subject: [PATCH 0304/1238] provider/aws: Allow VPC Classic Linking in Autoscaling Launch Configs (#7470) * support for vpc linking ec2 instances in launch configs * add acceptance testing * generate vpc and security groups for each test run --- .../aws/resource_aws_launch_configuration.go | 27 ++++++++++++ .../resource_aws_launch_configuration_test.go | 43 +++++++++++++++++++ .../aws/r/launch_configuration.html.markdown | 2 + 3 files changed, 72 insertions(+) diff --git a/builtin/providers/aws/resource_aws_launch_configuration.go b/builtin/providers/aws/resource_aws_launch_configuration.go index 7726dda46..4070e369a 100644 --- a/builtin/providers/aws/resource_aws_launch_configuration.go +++ b/builtin/providers/aws/resource_aws_launch_configuration.go @@ -110,6 +110,20 @@ func resourceAwsLaunchConfiguration() *schema.Resource { Set: schema.HashString, }, + "vpc_classic_link_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "vpc_classic_link_security_groups": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "associate_public_ip_address": &schema.Schema{ Type: schema.TypeBool, Optional: true, @@ -329,6 +343,16 @@ func resourceAwsLaunchConfigurationCreate(d *schema.ResourceData, meta interface ) } + if v, ok := d.GetOk("vpc_classic_link_id"); ok { + createLaunchConfigurationOpts.ClassicLinkVPCId = aws.String(v.(string)) + } + + if v, ok := d.GetOk("vpc_classic_link_security_groups"); ok { + createLaunchConfigurationOpts.ClassicLinkVPCSecurityGroups = expandStringList( + v.(*schema.Set).List(), + ) + } + var blockDevices []*autoscaling.BlockDeviceMapping // We'll use this to detect if we're declaring it incorrectly as an ebs_block_device. @@ -519,6 +543,9 @@ func resourceAwsLaunchConfigurationRead(d *schema.ResourceData, meta interface{} d.Set("enable_monitoring", lc.InstanceMonitoring.Enabled) d.Set("security_groups", lc.SecurityGroups) + d.Set("vpc_classic_link_id", lc.ClassicLinkVPCId) + d.Set("vpc_classic_link_security_groups", lc.ClassicLinkVPCSecurityGroups) + if err := readLCBlockDevices(d, lc, ec2conn); err != nil { return err } diff --git a/builtin/providers/aws/resource_aws_launch_configuration_test.go b/builtin/providers/aws/resource_aws_launch_configuration_test.go index 9fcab65a8..d63b9eea0 100644 --- a/builtin/providers/aws/resource_aws_launch_configuration_test.go +++ b/builtin/providers/aws/resource_aws_launch_configuration_test.go @@ -10,6 +10,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/autoscaling" + "github.com/aws/aws-sdk-go/service/ec2" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" ) @@ -89,6 +90,28 @@ func TestAccAWSLaunchConfiguration_withSpotPrice(t *testing.T) { }) } +func TestAccAWSLaunchConfiguration_withVpcClassicLink(t *testing.T) { + var vpc ec2.Vpc + var group ec2.SecurityGroup + var conf autoscaling.LaunchConfiguration + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSLaunchConfigurationDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSLaunchConfigurationConfig_withVpcClassicLink, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.foo", &conf), + testAccCheckVpcExists("aws_vpc.foo", &vpc), + testAccCheckAWSSecurityGroupExists("aws_security_group.foo", &group), + ), + }, + }, + }) +} + func TestAccAWSLaunchConfiguration_withIAMProfile(t *testing.T) { var conf autoscaling.LaunchConfiguration @@ -355,6 +378,26 @@ resource "aws_launch_configuration" "baz" { } } ` +const testAccAWSLaunchConfigurationConfig_withVpcClassicLink = ` +resource "aws_vpc" "foo" { + cidr_block = "10.0.0.0/16" + enable_classiclink = true +} + +resource "aws_security_group" "foo" { + name = "foo" + vpc_id = "${aws_vpc.foo.id}" +} + +resource "aws_launch_configuration" "foo" { + name = "TestAccAWSLaunchConfiguration_withVpcClassicLink" + image_id = "ami-2d39803a" + instance_type = "t1.micro" + + vpc_classic_link_id = "${aws_vpc.foo.id}" + vpc_classic_link_security_groups = ["${aws_security_group.foo.id}"] +} +` const testAccAWSLaunchConfigurationConfig_withIAMProfile = ` resource "aws_iam_role" "role" { diff --git a/website/source/docs/providers/aws/r/launch_configuration.html.markdown b/website/source/docs/providers/aws/r/launch_configuration.html.markdown index d6e46ac84..e35fc51f6 100644 --- a/website/source/docs/providers/aws/r/launch_configuration.html.markdown +++ b/website/source/docs/providers/aws/r/launch_configuration.html.markdown @@ -134,6 +134,8 @@ The following arguments are supported: * `key_name` - (Optional) The key name that should be used for the instance. * `security_groups` - (Optional) A list of associated security group IDS. * `associate_public_ip_address` - (Optional) Associate a public ip address with an instance in a VPC. +* `vpc_classic_link_id` - (Optional) The ID of a ClassicLink-enabled VPC. Only applies to EC2-Classic instances. (eg. `vpc-2730681a`) +* `vpc_classic_link_security_groups` - (Optional) The IDs of one or more security groups for the specified ClassicLink-enabled VPC (eg. `sg-46ae3d11`). * `user_data` - (Optional) The user data to provide when launching the instance. * `enable_monitoring` - (Optional) Enables/disables detailed monitoring. This is enabled by default. * `ebs_optimized` - (Optional) If true, the launched EC2 instance will be EBS-optimized. From bcef828a2078545ebf4f7e17d4070aaeace8e90c Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Sat, 16 Jul 2016 15:14:48 +0100 Subject: [PATCH 0305/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0f398dc4f..1c13f9df8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -110,6 +110,7 @@ IMPROVEMENTS: * provider/aws: AWS prefix lists to enable security group egress to a VPC Endpoint [GH-7511] * provider/aws: Retry creation of IAM role depending on new IAM user [GH-7324] * provider/aws: Allow `port` on `aws_db_instance` to be updated [GH-7441] + * provider/aws: Allow VPC Classic Linking in Autoscaling Launch Configs [GH-7470] * provider/azurerm: Add support for EnableIPForwarding to `azurerm_network_interface` [GH-6807] * provider/azurerm: Add support for exporting the `azurerm_storage_account` access keys [GH-6742] * provider/azurerm: The Azure SDK now exposes better error messages [GH-6976] From 6468b899b9a9cbbedbc7a496de374bf59038a479 Mon Sep 17 00:00:00 2001 From: stack72 Date: Sat, 16 Jul 2016 15:17:37 +0100 Subject: [PATCH 0306/1238] provider/aws: Fix the AMI ID in the test for VPC Classic Link --- builtin/providers/aws/resource_aws_launch_configuration_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_launch_configuration_test.go b/builtin/providers/aws/resource_aws_launch_configuration_test.go index d63b9eea0..d26027a25 100644 --- a/builtin/providers/aws/resource_aws_launch_configuration_test.go +++ b/builtin/providers/aws/resource_aws_launch_configuration_test.go @@ -391,7 +391,7 @@ resource "aws_security_group" "foo" { resource "aws_launch_configuration" "foo" { name = "TestAccAWSLaunchConfiguration_withVpcClassicLink" - image_id = "ami-2d39803a" + image_id = "ami-21f78e11" instance_type = "t1.micro" vpc_classic_link_id = "${aws_vpc.foo.id}" From b8d21330a732587f0a22d874a53bb47d136f7c26 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Sat, 16 Jul 2016 16:36:28 +0200 Subject: [PATCH 0307/1238] provider/aws: Allow importing lambda_function (#7610) --- .../aws/import_aws_lambda_function_test.go | 81 +++++++++++++++++++ .../aws/resource_aws_lambda_function.go | 7 ++ 2 files changed, 88 insertions(+) create mode 100644 builtin/providers/aws/import_aws_lambda_function_test.go diff --git a/builtin/providers/aws/import_aws_lambda_function_test.go b/builtin/providers/aws/import_aws_lambda_function_test.go new file mode 100644 index 000000000..3672c2e10 --- /dev/null +++ b/builtin/providers/aws/import_aws_lambda_function_test.go @@ -0,0 +1,81 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAWSLambdaFunction_importLocalFile(t *testing.T) { + resourceName := "aws_lambda_function.lambda_function_test" + + rName := fmt.Sprintf("tf_test_%s", acctest.RandString(5)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckLambdaFunctionDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSLambdaConfigBasic(rName), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"filename"}, + }, + }, + }) +} + +func TestAccAWSLambdaFunction_importLocalFile_VPC(t *testing.T) { + resourceName := "aws_lambda_function.lambda_function_test" + + rName := fmt.Sprintf("tf_test_%s", acctest.RandString(5)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckLambdaFunctionDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSLambdaConfigWithVPC(rName), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"filename"}, + }, + }, + }) +} + +func TestAccAWSLambdaFunction_importS3(t *testing.T) { + resourceName := "aws_lambda_function.lambda_function_s3test" + + rName := fmt.Sprintf("tf_test_%s", acctest.RandString(5)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckLambdaFunctionDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSLambdaConfigS3(rName), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"s3_bucket", "s3_key"}, + }, + }, + }) +} diff --git a/builtin/providers/aws/resource_aws_lambda_function.go b/builtin/providers/aws/resource_aws_lambda_function.go index 4df10e948..dee086c6a 100644 --- a/builtin/providers/aws/resource_aws_lambda_function.go +++ b/builtin/providers/aws/resource_aws_lambda_function.go @@ -24,6 +24,13 @@ func resourceAwsLambdaFunction() *schema.Resource { Update: resourceAwsLambdaFunctionUpdate, Delete: resourceAwsLambdaFunctionDelete, + Importer: &schema.ResourceImporter{ + State: func(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + d.Set("function_name", d.Id()) + return []*schema.ResourceData{d}, nil + }, + }, + Schema: map[string]*schema.Schema{ "filename": &schema.Schema{ Type: schema.TypeString, From 6dd0e3f6dba21148abfc0d51928f5e7897aee829 Mon Sep 17 00:00:00 2001 From: Krzysztof Wilczynski Date: Sun, 17 Jul 2016 03:24:48 +0900 Subject: [PATCH 0308/1238] Add validation for icmp_type and icmp_code. Also, change order of processing to parse icmp_type first. Change wording of the debug messages, and change format string type for rule_number where appropriate. Signed-off-by: Krzysztof Wilczynski --- .../aws/resource_aws_network_acl_rule.go | 49 ++++++++++++------- .../aws/resource_aws_network_acl_rule_test.go | 14 +++++- 2 files changed, 42 insertions(+), 21 deletions(-) diff --git a/builtin/providers/aws/resource_aws_network_acl_rule.go b/builtin/providers/aws/resource_aws_network_acl_rule.go index be347daf3..ad3f302a3 100644 --- a/builtin/providers/aws/resource_aws_network_acl_rule.go +++ b/builtin/providers/aws/resource_aws_network_acl_rule.go @@ -63,14 +63,16 @@ func resourceAwsNetworkAclRule() *schema.Resource { ForceNew: true, }, "icmp_type": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateICMPArgumentValue, }, "icmp_code": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateICMPArgumentValue, }, }, } @@ -85,7 +87,7 @@ func resourceAwsNetworkAclRuleCreate(d *schema.ResourceData, meta interface{}) e var ok bool p, ok = protocolIntegers()[protocol] if !ok { - return fmt.Errorf("Invalid Protocol %s for rule %#v", protocol, d.Get("rule_number").(int)) + return fmt.Errorf("Invalid Protocol %s for rule %d", protocol, d.Get("rule_number").(int)) } } log.Printf("[INFO] Transformed Protocol %s into %d", protocol, p) @@ -107,21 +109,21 @@ func resourceAwsNetworkAclRuleCreate(d *schema.ResourceData, meta interface{}) e // of ICMP codes and types, see: http://www.nthelp.com/icmp.html if p == 1 { params.IcmpTypeCode = &ec2.IcmpTypeCode{} - if v, ok := d.GetOk("icmp_code"); ok { - icmpCode, err := strconv.Atoi(v.(string)) - if err != nil { - return fmt.Errorf("Unable to parse ICMP code %s for rule %#v", v, d.Get("rule_number").(int)) - } - params.IcmpTypeCode.Code = aws.Int64(int64(icmpCode)) - log.Printf("[DEBUG] Transformed ICMP code %s into %d", v, icmpCode) - } if v, ok := d.GetOk("icmp_type"); ok { icmpType, err := strconv.Atoi(v.(string)) if err != nil { - return fmt.Errorf("Unable to parse ICMP type %s for rule %#v", v, d.Get("rule_number").(int)) + return fmt.Errorf("Unable to parse ICMP type %s for rule %d", v, d.Get("rule_number").(int)) } params.IcmpTypeCode.Type = aws.Int64(int64(icmpType)) - log.Printf("[DEBUG] Transformed ICMP type %s into %d", v, icmpType) + log.Printf("[DEBUG] Got ICMP type %d for rule %d", icmpType, d.Get("rule_number").(int)) + } + if v, ok := d.GetOk("icmp_code"); ok { + icmpCode, err := strconv.Atoi(v.(string)) + if err != nil { + return fmt.Errorf("Unable to parse ICMP code %s for rule %d", v, d.Get("rule_number").(int)) + } + params.IcmpTypeCode.Code = aws.Int64(int64(icmpCode)) + log.Printf("[DEBUG] Got ICMP code %d for rule %d", icmpCode, d.Get("rule_number").(int)) } } @@ -176,7 +178,7 @@ func resourceAwsNetworkAclRuleRead(d *schema.ResourceData, meta interface{}) err var ok bool protocol, ok := protocolStrings(protocolIntegers())[p] if !ok { - return fmt.Errorf("Invalid Protocol %s for rule %#v", *resp.Protocol, d.Get("rule_number").(int)) + return fmt.Errorf("Invalid Protocol %s for rule %d", *resp.Protocol, d.Get("rule_number").(int)) } log.Printf("[INFO] Transformed Protocol %s back into %s", *resp.Protocol, protocol) d.Set("protocol", protocol) @@ -209,7 +211,7 @@ func findNetworkAclRule(d *schema.ResourceData, meta interface{}) (*ec2.NetworkA filters := make([]*ec2.Filter, 0, 2) ruleNumberFilter := &ec2.Filter{ Name: aws.String("entry.rule-number"), - Values: []*string{aws.String(fmt.Sprintf("%v", d.Get("rule_number").(int)))}, + Values: []*string{aws.String(fmt.Sprintf("%d", d.Get("rule_number").(int)))}, } filters = append(filters, ruleNumberFilter) egressFilter := &ec2.Filter{ @@ -256,3 +258,12 @@ func networkAclIdRuleNumberEgressHash(networkAclId string, ruleNumber int, egres buf.WriteString(fmt.Sprintf("%s-", protocol)) return fmt.Sprintf("nacl-%d", hashcode.String(buf.String())) } + +func validateICMPArgumentValue(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + _, err := strconv.Atoi(value) + if len(value) == 0 || err != nil { + errors = append(errors, fmt.Errorf("%q must be an integer value: %q", k, value)) + } + return +} diff --git a/builtin/providers/aws/resource_aws_network_acl_rule_test.go b/builtin/providers/aws/resource_aws_network_acl_rule_test.go index 95682bf41..b7559d7c9 100644 --- a/builtin/providers/aws/resource_aws_network_acl_rule_test.go +++ b/builtin/providers/aws/resource_aws_network_acl_rule_test.go @@ -24,7 +24,8 @@ func TestAccAWSNetworkAclRule_basic(t *testing.T) { Config: testAccAWSNetworkAclRuleBasicConfig, Check: resource.ComposeTestCheckFunc( testAccCheckAWSNetworkAclRuleExists("aws_network_acl_rule.baz", &networkAcl), - testAccCheckAWSNetworkAclRuleExists("aws_network_acl_rule.quux", &networkAcl), + testAccCheckAWSNetworkAclRuleExists("aws_network_acl_rule.qux", &networkAcl), + testAccCheckAWSNetworkAclRuleExists("aws_network_acl_rule.wibble", &networkAcl), ), }, }, @@ -123,7 +124,7 @@ resource "aws_network_acl_rule" "baz" { from_port = 22 to_port = 22 } -resource "aws_network_acl_rule" "quux" { +resource "aws_network_acl_rule" "qux" { network_acl_id = "${aws_network_acl.bar.id}" rule_number = 300 protocol = "icmp" @@ -132,4 +133,13 @@ resource "aws_network_acl_rule" "quux" { icmp_type = 0 icmp_code = -1 } +resource "aws_network_acl_rule" "wibble" { + network_acl_id = "${aws_network_acl.bar.id}" + rule_number = 400 + protocol = "icmp" + rule_action = "allow" + cidr_block = "0.0.0.0/0" + icmp_type = -1 + icmp_code = -1 +} ` From 96b6a3dcb88aa6537376645f46f7b001338a2b3e Mon Sep 17 00:00:00 2001 From: Krzysztof Wilczynski Date: Sun, 17 Jul 2016 03:34:32 +0900 Subject: [PATCH 0309/1238] Add note about setting wildcard icmp_type. Signed-off-by: Krzysztof Wilczynski --- .../source/docs/providers/aws/r/network_acl_rule.html.markdown | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/source/docs/providers/aws/r/network_acl_rule.html.markdown b/website/source/docs/providers/aws/r/network_acl_rule.html.markdown index c5e7a327b..2df0198a6 100644 --- a/website/source/docs/providers/aws/r/network_acl_rule.html.markdown +++ b/website/source/docs/providers/aws/r/network_acl_rule.html.markdown @@ -45,6 +45,8 @@ The following arguments are supported: ~> **NOTE:** If the value of `protocol` is `-1` or `all`, the `from_port` and `to_port` values will be ignored and the rule will apply to all ports. +~> **NOTE:** If the value of `icmp_type` is `-1` (which results in a wildcard ICMP type), the `icmp_code` must also be set to `-1` (wildcard ICMP code). + ~> Note: For more information on ICMP types and codes, see here: http://www.nthelp.com/icmp.html ## Attributes Reference @@ -52,4 +54,3 @@ The following arguments are supported: The following attributes are exported: * `id` - The ID of the network ACL Rule - From 9dbf998d2359e3edbf46ba609bd5cbbb4f0f9f56 Mon Sep 17 00:00:00 2001 From: Matthew Clarke Date: Mon, 18 Jul 2016 09:37:39 +0100 Subject: [PATCH 0310/1238] Update aws api gateway docs to add some examples (#7586) * added aws api gateway docs request parameter examples; fixed deployment example * update aws api gw deployment docs to add depends on --- .../aws/r/api_gateway_deployment.html.markdown | 16 ++++++++++++---- .../aws/r/api_gateway_integration.html.markdown | 4 +++- ...pi_gateway_integration_response.html.markdown | 2 ++ .../aws/r/api_gateway_method.html.markdown | 2 ++ .../r/api_gateway_method_response.html.markdown | 2 ++ 5 files changed, 21 insertions(+), 5 deletions(-) diff --git a/website/source/docs/providers/aws/r/api_gateway_deployment.html.markdown b/website/source/docs/providers/aws/r/api_gateway_deployment.html.markdown index 5be552d83..9068a2d19 100644 --- a/website/source/docs/providers/aws/r/api_gateway_deployment.html.markdown +++ b/website/source/docs/providers/aws/r/api_gateway_deployment.html.markdown @@ -21,23 +21,31 @@ resource "aws_api_gateway_rest_api" "MyDemoAPI" { description = "This is my API for demonstration purposes" } -resource "aws_api_gateway_resource" "test" { +resource "aws_api_gateway_resource" "MyDemoResource" { rest_api_id = "${aws_api_gateway_rest_api.MyDemoAPI.id}" parent_id = "${aws_api_gateway_rest_api.MyDemoAPI.root_resource_id}" path_part = "test" } -resource "aws_api_gateway_method" "test" { +resource "aws_api_gateway_method" "MyDemoMethod" { rest_api_id = "${aws_api_gateway_rest_api.MyDemoAPI.id}" - resource_id = "${aws_api_gateway_resource.test.id}" + resource_id = "${aws_api_gateway_resource.MyDemoResource.id}" http_method = "GET" authorization = "NONE" } +resource "aws_api_gateway_integration" "MyDemoIntegration" { + rest_api_id = "${aws_api_gateway_rest_api.MyDemoAPI.id}" + resource_id = "${aws_api_gateway_resource.MyDemoResource.id}" + http_method = "${aws_api_gateway_method.MyDemoMethod.http_method}" + type = "MOCK" +} + resource "aws_api_gateway_deployment" "MyDemoDeployment" { - depends_on = ["aws_api_gateway_integration.test"] + depends_on = ["aws_api_gateway_method.MyDemoMethod"] rest_api_id = "${aws_api_gateway_rest_api.MyDemoAPI.id}" + stage_name = "test" variables = { "answer" = "42" diff --git a/website/source/docs/providers/aws/r/api_gateway_integration.html.markdown b/website/source/docs/providers/aws/r/api_gateway_integration.html.markdown index fc36efcc0..b6704bb74 100644 --- a/website/source/docs/providers/aws/r/api_gateway_integration.html.markdown +++ b/website/source/docs/providers/aws/r/api_gateway_integration.html.markdown @@ -58,4 +58,6 @@ The following arguments are supported: * `request_templates` - (Optional) A map of the integration's request templates. * `request_parameters_in_json` - (Optional) A map written as a JSON string specifying the request query string parameters and headers that should be passed to the - backend responder + backend responder. + For example: `request_parameters_in_json = "{\"integration.request.header.X-Some-Other-Header\":\"method.request.header.X-Some-Header\"}"` + would add the header `X-Some-Header` from method to the integration as the header `X-Some-Other-Header`. diff --git a/website/source/docs/providers/aws/r/api_gateway_integration_response.html.markdown b/website/source/docs/providers/aws/r/api_gateway_integration_response.html.markdown index bf65d2771..1db32a92a 100644 --- a/website/source/docs/providers/aws/r/api_gateway_integration_response.html.markdown +++ b/website/source/docs/providers/aws/r/api_gateway_integration_response.html.markdown @@ -67,3 +67,5 @@ The following arguments are supported: For all other `HTTP` and `AWS` backends, the HTTP status code is matched. * `response_templates` - (Optional) A map specifying the templates used to transform the integration response body * `response_parameters_in_json` - (Optional) A map written as JSON string specifying response parameters that can be read from the backend response + For example: `response_parameters_in_json = "{\"method.response.header.X-Some-Header\":\"integration.response.header.X-Some-Other-Header\"}"`, + would add the header `X-Some-Other-Header` from the integration response to the method response as the header `X-Some-Header`. diff --git a/website/source/docs/providers/aws/r/api_gateway_method.html.markdown b/website/source/docs/providers/aws/r/api_gateway_method.html.markdown index 54e15183b..87221b65a 100644 --- a/website/source/docs/providers/aws/r/api_gateway_method.html.markdown +++ b/website/source/docs/providers/aws/r/api_gateway_method.html.markdown @@ -46,3 +46,5 @@ The following arguments are supported: and value is either `Error`, `Empty` (built-in models) or `aws_api_gateway_model`'s `name`. * `request_parameters_in_json` - (Optional) A map written as a JSON string specifying the request query string parameters and headers that should be passed to the integration + For example: `request_parameters_in_json = "{\"method.request.header.X-Some-Header\":true}"` + would define that the header X-Some-Header must be provided on the request. diff --git a/website/source/docs/providers/aws/r/api_gateway_method_response.html.markdown b/website/source/docs/providers/aws/r/api_gateway_method_response.html.markdown index 2704c07c4..d1bc4f9c0 100644 --- a/website/source/docs/providers/aws/r/api_gateway_method_response.html.markdown +++ b/website/source/docs/providers/aws/r/api_gateway_method_response.html.markdown @@ -56,3 +56,5 @@ The following arguments are supported: * `status_code` - (Required) The HTTP status code * `response_models` - (Optional) A map of the API models used for the response's content type * `response_parameters_in_json` - (Optional) A map written as a JSON string representing response parameters that can be sent to the caller + For example: `response_parameters_in_json = "{\"method.response.header.X-Some-Header\":true}"` + would define that the header X-Some-Header can be provided on the response. From 697828f81fbef58fe16a0c3b6153e7dea47b313c Mon Sep 17 00:00:00 2001 From: Raphael Randschau Date: Mon, 18 Jul 2016 11:12:15 +0200 Subject: [PATCH 0311/1238] Handle Scaleway tags correctly (#7682) --- .../scaleway/resource_scaleway_server.go | 17 +++++++++++++++-- .../scaleway/resource_scaleway_server_test.go | 3 +++ 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/builtin/providers/scaleway/resource_scaleway_server.go b/builtin/providers/scaleway/resource_scaleway_server.go index 0dddfa8e7..659c5c29f 100644 --- a/builtin/providers/scaleway/resource_scaleway_server.go +++ b/builtin/providers/scaleway/resource_scaleway_server.go @@ -82,8 +82,10 @@ func resourceScalewayServerCreate(d *schema.ResourceData, m interface{}) error { server.Bootscript = String(bootscript.(string)) } - if tags, ok := d.GetOk("tags"); ok { - server.Tags = tags.([]string) + if raw, ok := d.GetOk("tags"); ok { + for _, tag := range raw.([]interface{}) { + server.Tags = append(server.Tags, tag.(string)) + } } id, err := scaleway.PostServer(server) @@ -129,6 +131,7 @@ func resourceScalewayServerRead(d *schema.ResourceData, m interface{}) error { d.Set("ipv4_address_public", server.PublicAddress.IP) d.Set("state", server.State) d.Set("state_detail", server.StateDetail) + d.Set("tags", server.Tags) d.SetConnInfo(map[string]string{ "type": "ssh", @@ -148,6 +151,16 @@ func resourceScalewayServerUpdate(d *schema.ResourceData, m interface{}) error { req.Name = &name } + if d.HasChange("tags") { + if raw, ok := d.GetOk("tags"); ok { + var tags []string + for _, tag := range raw.([]interface{}) { + tags = append(tags, tag.(string)) + } + req.Tags = &tags + } + } + if d.HasChange("dynamic_ip_required") { req.DynamicIPRequired = Bool(d.Get("dynamic_ip_required").(bool)) } diff --git a/builtin/providers/scaleway/resource_scaleway_server_test.go b/builtin/providers/scaleway/resource_scaleway_server_test.go index b8fa3ff48..27fdab537 100644 --- a/builtin/providers/scaleway/resource_scaleway_server_test.go +++ b/builtin/providers/scaleway/resource_scaleway_server_test.go @@ -23,6 +23,8 @@ func TestAccScalewayServer_Basic(t *testing.T) { "scaleway_server.base", "type", "C1"), resource.TestCheckResourceAttr( "scaleway_server.base", "name", "test"), + resource.TestCheckResourceAttr( + "scaleway_server.base", "tags.0", "terraform-test"), ), }, }, @@ -110,4 +112,5 @@ resource "scaleway_server" "base" { # ubuntu 14.04 image = "%s" type = "C1" + tags = [ "terraform-test" ] }`, armImageIdentifier) From 9c095d66e1a3b5a2d4a6914536f6a460374ca3d8 Mon Sep 17 00:00:00 2001 From: Steven Eschinger Date: Mon, 18 Jul 2016 05:20:18 -0400 Subject: [PATCH 0312/1238] azurerm: fix vault_certificates in vm/scale_set (#7681) When setting the certificate_url and certificate_store values in os_profile_secrets / vault_certificates for a Windows VM in AzureRM, I was getting the following error: ``` [DEBUG] Error setting Virtual Machine Storage OS Profile Secrets: &errors.errorString{s:"Invalid address to set: []string{\"os_profile_secrets\", \"0\", \"vault_certificates\"}"} ``` This seems to resolve the issue. --- builtin/providers/azurerm/resource_arm_virtual_machine.go | 4 ++-- .../azurerm/resource_arm_virtual_machine_scale_set.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/builtin/providers/azurerm/resource_arm_virtual_machine.go b/builtin/providers/azurerm/resource_arm_virtual_machine.go index 64d9ac83a..e2b26584d 100644 --- a/builtin/providers/azurerm/resource_arm_virtual_machine.go +++ b/builtin/providers/azurerm/resource_arm_virtual_machine.go @@ -339,7 +339,7 @@ func resourceArmVirtualMachine() *schema.Resource { }, "vault_certificates": { - Type: schema.TypeSet, + Type: schema.TypeList, Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -908,7 +908,7 @@ func expandAzureRmVirtualMachineOsProfileSecrets(d *schema.ResourceData) *[]comp } if v := config["vault_certificates"]; v != nil { - certsConfig := v.(*schema.Set).List() + certsConfig := v.([]interface{}) certs := make([]compute.VaultCertificate, 0, len(certsConfig)) for _, certConfig := range certsConfig { config := certConfig.(map[string]interface{}) diff --git a/builtin/providers/azurerm/resource_arm_virtual_machine_scale_set.go b/builtin/providers/azurerm/resource_arm_virtual_machine_scale_set.go index abf87fb66..d8de1226a 100644 --- a/builtin/providers/azurerm/resource_arm_virtual_machine_scale_set.go +++ b/builtin/providers/azurerm/resource_arm_virtual_machine_scale_set.go @@ -111,7 +111,7 @@ func resourceArmVirtualMachineScaleSet() *schema.Resource { }, "vault_certificates": &schema.Schema{ - Type: schema.TypeSet, + Type: schema.TypeList, Optional: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -1063,7 +1063,7 @@ func expandAzureRmVirtualMachineScaleSetOsProfileSecrets(d *schema.ResourceData) } if v := config["vault_certificates"]; v != nil { - certsConfig := v.(*schema.Set).List() + certsConfig := v.([]interface{}) certs := make([]compute.VaultCertificate, 0, len(certsConfig)) for _, certConfig := range certsConfig { config := certConfig.(map[string]interface{}) From 0abf0b26661df5259e9f220aeb16f4fa6063af2a Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 18 Jul 2016 10:44:27 +0100 Subject: [PATCH 0313/1238] website: Scaleway Documentation (#7685) Scaleway wasn't added to the `website/source/assets/stylesheets/_docs.scss` file --- website/source/assets/stylesheets/_docs.scss | 1 + 1 file changed, 1 insertion(+) diff --git a/website/source/assets/stylesheets/_docs.scss b/website/source/assets/stylesheets/_docs.scss index f4742e4ed..773b2494f 100755 --- a/website/source/assets/stylesheets/_docs.scss +++ b/website/source/assets/stylesheets/_docs.scss @@ -39,6 +39,7 @@ body.layout-postgresql, body.layout-powerdns, body.layout-random, body.layout-rundeck, +body.layout-scaleway, body.layout-statuscake, body.layout-softlayer, body.layout-template, From 05aef0e66087616f4f20b358f753cd6885130ef9 Mon Sep 17 00:00:00 2001 From: David Harris Date: Mon, 18 Jul 2016 04:37:37 -0600 Subject: [PATCH 0314/1238] provider/aws: Support ec2-classic and vpc in beanstalk recurring plans. (#6491) * provider/aws: Support ec2-classic and vpc Fix Elastic Beanstalk recurring plans when additional security groups are supplied. In the previous version, only non-default vpc security groups would be handled by dropGeneratedSecurityGroup. * provider/aws: Elastic Beanstalk VPC Test --- ...ource_aws_elastic_beanstalk_environment.go | 32 ++++++- ..._aws_elastic_beanstalk_environment_test.go | 83 +++++++++++++++++++ 2 files changed, 111 insertions(+), 4 deletions(-) diff --git a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go b/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go index 861722e17..71ee9b343 100644 --- a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go +++ b/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go @@ -685,20 +685,44 @@ func dropGeneratedSecurityGroup(settingValue string, meta interface{}) string { groups := strings.Split(settingValue, ",") - resp, err := conn.DescribeSecurityGroups(&ec2.DescribeSecurityGroupsInput{ - GroupIds: aws.StringSlice(groups), - }) + // Check to see if groups are ec2-classic or vpc security groups + ec2Classic := true + beanstalkSGRegexp := "sg-[0-9a-fA-F]{8}" + for _, g := range groups { + if ok, _ := regexp.MatchString(beanstalkSGRegexp, g); ok { + ec2Classic = false + break + } + } + + var resp *ec2.DescribeSecurityGroupsOutput + var err error + + if ec2Classic { + resp, err = conn.DescribeSecurityGroups(&ec2.DescribeSecurityGroupsInput{ + GroupNames: aws.StringSlice(groups), + }) + } else { + resp, err = conn.DescribeSecurityGroups(&ec2.DescribeSecurityGroupsInput{ + GroupIds: aws.StringSlice(groups), + }) + } if err != nil { log.Printf("[DEBUG] Elastic Beanstalk error describing SecurityGroups: %v", err) return settingValue } + log.Printf("[DEBUG] Elastic Beanstalk using ec2-classic security-groups: %t", ec2Classic) var legitGroups []string for _, group := range resp.SecurityGroups { log.Printf("[DEBUG] Elastic Beanstalk SecurityGroup: %v", *group.GroupName) if !strings.HasPrefix(*group.GroupName, "awseb") { - legitGroups = append(legitGroups, *group.GroupId) + if ec2Classic { + legitGroups = append(legitGroups, *group.GroupName) + } else { + legitGroups = append(legitGroups, *group.GroupId) + } } } diff --git a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment_test.go b/builtin/providers/aws/resource_aws_elastic_beanstalk_environment_test.go index eeb4657c4..4d9481791 100644 --- a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment_test.go +++ b/builtin/providers/aws/resource_aws_elastic_beanstalk_environment_test.go @@ -158,6 +158,26 @@ func TestAccAWSBeanstalkEnv_resource(t *testing.T) { }) } +func TestAccAWSBeanstalkEnv_vpc(t *testing.T) { + var app elasticbeanstalk.EnvironmentDescription + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckBeanstalkEnvDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccBeanstalkEnv_VPC(acctest.RandString(5)), + Check: resource.ComposeTestCheckFunc( + testAccCheckBeanstalkEnvExists("aws_elastic_beanstalk_environment.default", &app), + ), + }, + }, + }) +} + func testAccCheckBeanstalkEnvDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).elasticbeanstalkconn @@ -488,3 +508,66 @@ resource "aws_elastic_beanstalk_environment" "tfenvtest" { } } ` + +func testAccBeanstalkEnv_VPC(name string) string { + return fmt.Sprintf(` +resource "aws_vpc" "tf_b_test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_internet_gateway" "tf_b_test" { + vpc_id = "${aws_vpc.tf_b_test.id}" +} + +resource "aws_route" "r" { + route_table_id = "${aws_vpc.tf_b_test.main_route_table_id}" + destination_cidr_block = "0.0.0.0/0" + gateway_id = "${aws_internet_gateway.tf_b_test.id}" +} + +resource "aws_subnet" "main" { + vpc_id = "${aws_vpc.tf_b_test.id}" + cidr_block = "10.0.0.0/24" +} + +resource "aws_security_group" "default" { + name = "tf-b-test-%s" + vpc_id = "${aws_vpc.tf_b_test.id}" +} + +resource "aws_elastic_beanstalk_application" "default" { + name = "tf-test-name" + description = "tf-test-desc" +} + +resource "aws_elastic_beanstalk_environment" "default" { + name = "tf-test-name" + application = "${aws_elastic_beanstalk_application.default.name}" + solution_stack_name = "64bit Amazon Linux running Python" + + setting { + namespace = "aws:ec2:vpc" + name = "VPCId" + value = "${aws_vpc.tf_b_test.id}" + } + + setting { + namespace = "aws:ec2:vpc" + name = "Subnets" + value = "${aws_subnet.main.id}" + } + + setting { + namespace = "aws:ec2:vpc" + name = "AssociatePublicIpAddress" + value = "true" + } + + setting { + namespace = "aws:autoscaling:launchconfiguration" + name = "SecurityGroups" + value = "${aws_security_group.default.id}" + } +} +`, name) +} From 14d82af73aa96f4157dd7b4ad71e30318de9be77 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 18 Jul 2016 11:39:53 +0100 Subject: [PATCH 0315/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1c13f9df8..f83b1b442 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -195,6 +195,7 @@ BUG FIXES: * provider/aws: Refresh CloudWatch Group from state on 404 [GH-7576] * provider/aws: Adding in additional retry logic due to latency with delete of `db_option_group` [GH-7312] * provider/aws: Safely get ELB values [GH-7585] + * provider/aws: Fix bug for recurring plans on ec2-classic and vpc in beanstalk [GH-6491] * provider/azurerm: Fixes terraform crash when using SSH keys with `azurerm_virtual_machine` [GH-6766] * provider/azurerm: Fix a bug causing 'diffs do not match' on `azurerm_network_interface` resources [GH-6790] * provider/azurerm: Normalizes `availability_set_id` casing to avoid spurious diffs in `azurerm_virtual_machine` [GH-6768] From 08291afd977f84bb3ed487d26ab3d2bba343ca78 Mon Sep 17 00:00:00 2001 From: David Kelly Date: Mon, 18 Jul 2016 13:14:35 +0100 Subject: [PATCH 0316/1238] Increased lambda event mapping creation timeout (#7657) Increased the retry timeout from 1 to 5 minutes due to the time for IAM permission propagation --- .../providers/aws/resource_aws_lambda_event_source_mapping.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/builtin/providers/aws/resource_aws_lambda_event_source_mapping.go b/builtin/providers/aws/resource_aws_lambda_event_source_mapping.go index 72804ac9d..03072307f 100644 --- a/builtin/providers/aws/resource_aws_lambda_event_source_mapping.go +++ b/builtin/providers/aws/resource_aws_lambda_event_source_mapping.go @@ -98,7 +98,7 @@ func resourceAwsLambdaEventSourceMappingCreate(d *schema.ResourceData, meta inte // // The role may exist, but the permissions may not have propagated, so we // retry - err := resource.Retry(1*time.Minute, func() *resource.RetryError { + err := resource.Retry(5*time.Minute, func() *resource.RetryError { eventSourceMappingConfiguration, err := conn.CreateEventSourceMapping(params) if err != nil { if awserr, ok := err.(awserr.Error); ok { @@ -184,7 +184,7 @@ func resourceAwsLambdaEventSourceMappingUpdate(d *schema.ResourceData, meta inte Enabled: aws.Bool(d.Get("enabled").(bool)), } - err := resource.Retry(1*time.Minute, func() *resource.RetryError { + err := resource.Retry(5*time.Minute, func() *resource.RetryError { _, err := conn.UpdateEventSourceMapping(params) if err != nil { if awserr, ok := err.(awserr.Error); ok { From 8b11bc058189f2b8c419974496e7dac3463f2b4b Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Mon, 18 Jul 2016 14:16:15 +0200 Subject: [PATCH 0317/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f83b1b442..82b26badc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -170,6 +170,7 @@ BUG FIXES: * provider/aws: Fix issue reattaching a VPN gateway to a VPC [GH-6987] * provider/aws: Fix issue with Root Block Devices and encrypted flag in Launch Configurations [GH-6512] * provider/aws: If more ENIs are attached to `aws_instance`, the one w/ DeviceIndex `0` is always used in context of `aws_instance` (previously unpredictable) [GH-6761] + * provider/aws: Increased lambda event mapping creation timeout [GH-7657] * provider/aws: Handle spurious failures in resourceAwsSecurityGroupRuleRead [GH-7377] * provider/aws: Make 'stage_name' required in api_gateway_deployment [GH-6797] * provider/aws: Mark Lambda function as gone when it's gone [GH-6924] From aabb200f2d5f676f4ac41d8bc48ea014e501f528 Mon Sep 17 00:00:00 2001 From: macheins Date: Thu, 7 Jul 2016 16:01:33 +0200 Subject: [PATCH 0318/1238] Allow empty names in aws_route53_record Added test for aws_route53_record with empty name Integrated test for aws_route53_record with empty name Changed test to use a third-level domain for zone --- .../aws/resource_aws_route53_record.go | 6 +++- .../aws/resource_aws_route53_record_test.go | 31 +++++++++++++++++++ 2 files changed, 36 insertions(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_route53_record.go b/builtin/providers/aws/resource_aws_route53_record.go index 16002b4b6..6a646335f 100644 --- a/builtin/providers/aws/resource_aws_route53_record.go +++ b/builtin/providers/aws/resource_aws_route53_record.go @@ -695,7 +695,11 @@ func expandRecordName(name, zone string) string { rn := strings.ToLower(strings.TrimSuffix(name, ".")) zone = strings.TrimSuffix(zone, ".") if !strings.HasSuffix(rn, zone) { - rn = strings.Join([]string{name, zone}, ".") + if len(name) == 0 { + rn = zone + } else { + rn = strings.Join([]string{name, zone}, ".") + } } return rn } diff --git a/builtin/providers/aws/resource_aws_route53_record_test.go b/builtin/providers/aws/resource_aws_route53_record_test.go index 531211842..823d1b67e 100644 --- a/builtin/providers/aws/resource_aws_route53_record_test.go +++ b/builtin/providers/aws/resource_aws_route53_record_test.go @@ -339,6 +339,23 @@ func TestAccAWSRoute53Record_TypeChange(t *testing.T) { }) } +func TestAccAWSRoute53Record_empty(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: "aws_route53_record.empty", + Providers: testAccProviders, + CheckDestroy: testAccCheckRoute53RecordDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccRoute53RecordConfigEmptyName, + Check: resource.ComposeTestCheckFunc( + testAccCheckRoute53RecordExists("aws_route53_record.empty"), + ), + }, + }, + }) +} + func testAccCheckRoute53RecordDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).r53conn for _, rs := range s.RootModule().Resources { @@ -972,3 +989,17 @@ resource "aws_route53_record" "sample" { records = ["127.0.0.1", "8.8.8.8"] } ` + +const testAccRoute53RecordConfigEmptyName = ` +resource "aws_route53_zone" "main" { + name = "not.example.com" +} + +resource "aws_route53_record" "empty" { + zone_id = "${aws_route53_zone.main.zone_id}" + name = "" + type = "A" + ttl = "30" + records = ["127.0.0.1"] +} +` From 8a72bfa5a9626fe9a8643ccd3c10b1f66e5be265 Mon Sep 17 00:00:00 2001 From: stack72 Date: Mon, 18 Jul 2016 13:24:14 +0100 Subject: [PATCH 0319/1238] provider/aws: Fix the Destroy func in the Route53 record tests The test didn't expand the record name - therefore, when the name was empty, it wasn't setting it to the domain name (like the normal resource does!) This was causing an error --- builtin/providers/aws/resource_aws_route53_record_test.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/builtin/providers/aws/resource_aws_route53_record_test.go b/builtin/providers/aws/resource_aws_route53_record_test.go index 823d1b67e..5ec466b7c 100644 --- a/builtin/providers/aws/resource_aws_route53_record_test.go +++ b/builtin/providers/aws/resource_aws_route53_record_test.go @@ -368,9 +368,11 @@ func testAccCheckRoute53RecordDestroy(s *terraform.State) error { name := parts[1] rType := parts[2] + en := expandRecordName(name, "notexample.com") + lopts := &route53.ListResourceRecordSetsInput{ HostedZoneId: aws.String(cleanZoneID(zone)), - StartRecordName: aws.String(name), + StartRecordName: aws.String(en), StartRecordType: aws.String(rType), } @@ -427,6 +429,7 @@ func testAccCheckRoute53RecordExists(n string) resource.TestCheckFunc { if len(resp.ResourceRecordSets) == 0 { return fmt.Errorf("Record does not exist") } + // rec := resp.ResourceRecordSets[0] for _, rec := range resp.ResourceRecordSets { recName := cleanRecordName(*rec.Name) @@ -992,7 +995,7 @@ resource "aws_route53_record" "sample" { const testAccRoute53RecordConfigEmptyName = ` resource "aws_route53_zone" "main" { - name = "not.example.com" + name = "notexample.com" } resource "aws_route53_record" "empty" { From 37b6ab45070c9853f0c800bb0207ff99aa7b50e5 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Mon, 18 Jul 2016 15:39:51 +0200 Subject: [PATCH 0320/1238] provider/aws: Bump rds_cluster timeout to 15 mins (#7604) --- builtin/providers/aws/resource_aws_rds_cluster.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/builtin/providers/aws/resource_aws_rds_cluster.go b/builtin/providers/aws/resource_aws_rds_cluster.go index fa030b044..481ef11dd 100644 --- a/builtin/providers/aws/resource_aws_rds_cluster.go +++ b/builtin/providers/aws/resource_aws_rds_cluster.go @@ -251,7 +251,7 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error Pending: []string{"creating", "backing-up", "modifying"}, Target: []string{"available"}, Refresh: resourceAwsRDSClusterStateRefreshFunc(d, meta), - Timeout: 5 * time.Minute, + Timeout: 15 * time.Minute, MinTimeout: 3 * time.Second, Delay: 30 * time.Second, // Wait 30 secs before starting } @@ -345,7 +345,7 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error Pending: []string{"creating", "backing-up", "modifying"}, Target: []string{"available"}, Refresh: resourceAwsRDSClusterStateRefreshFunc(d, meta), - Timeout: 5 * time.Minute, + Timeout: 15 * time.Minute, MinTimeout: 3 * time.Second, } @@ -509,7 +509,7 @@ func resourceAwsRDSClusterDelete(d *schema.ResourceData, meta interface{}) error Pending: []string{"available", "deleting", "backing-up", "modifying"}, Target: []string{"destroyed"}, Refresh: resourceAwsRDSClusterStateRefreshFunc(d, meta), - Timeout: 5 * time.Minute, + Timeout: 15 * time.Minute, MinTimeout: 3 * time.Second, } From d4e8616a9caf000c420a58b3c928a295230ffbe7 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 18 Jul 2016 14:40:22 +0100 Subject: [PATCH 0321/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 82b26badc..a21c4b693 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -197,6 +197,7 @@ BUG FIXES: * provider/aws: Adding in additional retry logic due to latency with delete of `db_option_group` [GH-7312] * provider/aws: Safely get ELB values [GH-7585] * provider/aws: Fix bug for recurring plans on ec2-classic and vpc in beanstalk [GH-6491] + * provider/aws: Bump rds_cluster timeout to 15 mins [GH-7604] * provider/azurerm: Fixes terraform crash when using SSH keys with `azurerm_virtual_machine` [GH-6766] * provider/azurerm: Fix a bug causing 'diffs do not match' on `azurerm_network_interface` resources [GH-6790] * provider/azurerm: Normalizes `availability_set_id` casing to avoid spurious diffs in `azurerm_virtual_machine` [GH-6768] From 5d18f41f04ab717fbeb9b4506adceb2b55998ae3 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Mon, 18 Jul 2016 12:52:10 -0500 Subject: [PATCH 0322/1238] core: Convert context vars to map[string]interface{} This is the first step in allowing overrides of map and list variables. We convert Context.variables to map[string]interface{} from map[string]string and fix up all the call sites. --- command/meta.go | 2 +- command/push.go | 12 ++++++------ command/push_test.go | 18 +++++++++--------- terraform/context.go | 10 +++++----- terraform/context_apply_test.go | 22 +++++++++++----------- terraform/context_input_test.go | 14 +++++++------- terraform/context_plan_test.go | 12 ++++++------ terraform/context_validate_test.go | 4 ++-- terraform/plan.go | 11 ++++++++--- terraform/plan_test.go | 2 +- terraform/semantics.go | 2 +- terraform/semantics_test.go | 6 +++--- 12 files changed, 60 insertions(+), 55 deletions(-) diff --git a/command/meta.go b/command/meta.go index ddc9606f1..3b0896352 100644 --- a/command/meta.go +++ b/command/meta.go @@ -294,7 +294,7 @@ func (m *Meta) contextOpts() *terraform.ContextOpts { copy(opts.Hooks[1:], m.ContextOpts.Hooks) copy(opts.Hooks[len(m.ContextOpts.Hooks)+1:], m.extraHooks) - vs := make(map[string]string) + vs := make(map[string]interface{}) for k, v := range opts.Variables { vs[k] = v } diff --git a/command/push.go b/command/push.go index 64b97516f..011b0c34f 100644 --- a/command/push.go +++ b/command/push.go @@ -276,21 +276,21 @@ func (c *PushCommand) Synopsis() string { // pushClient is implementd internally to control where pushes go. This is // either to Atlas or a mock for testing. type pushClient interface { - Get(string) (map[string]string, error) + Get(string) (map[string]interface{}, error) Upsert(*pushUpsertOptions) (int, error) } type pushUpsertOptions struct { Name string Archive *archive.Archive - Variables map[string]string + Variables map[string]interface{} } type atlasPushClient struct { Client *atlas.Client } -func (c *atlasPushClient) Get(name string) (map[string]string, error) { +func (c *atlasPushClient) Get(name string) (map[string]interface{}, error) { user, name, err := atlas.ParseSlug(name) if err != nil { return nil, err @@ -301,7 +301,7 @@ func (c *atlasPushClient) Get(name string) (map[string]string, error) { return nil, err } - var variables map[string]string + var variables map[string]interface{} if version != nil { variables = version.Variables } @@ -333,7 +333,7 @@ type mockPushClient struct { GetCalled bool GetName string - GetResult map[string]string + GetResult map[string]interface{} GetError error UpsertCalled bool @@ -342,7 +342,7 @@ type mockPushClient struct { UpsertError error } -func (c *mockPushClient) Get(name string) (map[string]string, error) { +func (c *mockPushClient) Get(name string) (map[string]interface{}, error) { c.GetCalled = true c.GetName = name return c.GetResult, c.GetError diff --git a/command/push_test.go b/command/push_test.go index a3d171a61..04dfd7fe5 100644 --- a/command/push_test.go +++ b/command/push_test.go @@ -61,7 +61,7 @@ func TestPush_good(t *testing.T) { t.Fatalf("bad: %#v", actual) } - variables := make(map[string]string) + variables := make(map[string]interface{}) if !reflect.DeepEqual(client.UpsertOptions.Variables, variables) { t.Fatalf("bad: %#v", client.UpsertOptions) } @@ -115,7 +115,7 @@ func TestPush_input(t *testing.T) { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) } - variables := map[string]string{ + variables := map[string]interface{}{ "foo": "foo", } if !reflect.DeepEqual(client.UpsertOptions.Variables, variables) { @@ -143,7 +143,7 @@ func TestPush_inputPartial(t *testing.T) { client := &mockPushClient{ File: archivePath, - GetResult: map[string]string{"foo": "bar"}, + GetResult: map[string]interface{}{"foo": "bar"}, } ui := new(cli.MockUi) c := &PushCommand{ @@ -170,7 +170,7 @@ func TestPush_inputPartial(t *testing.T) { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) } - variables := map[string]string{ + variables := map[string]interface{}{ "foo": "bar", "bar": "foo", } @@ -208,7 +208,7 @@ func TestPush_localOverride(t *testing.T) { client := &mockPushClient{File: archivePath} // Provided vars should override existing ones - client.GetResult = map[string]string{ + client.GetResult = map[string]interface{}{ "foo": "old", } ui := new(cli.MockUi) @@ -247,7 +247,7 @@ func TestPush_localOverride(t *testing.T) { t.Fatalf("bad: %#v", client.UpsertOptions) } - variables := map[string]string{ + variables := map[string]interface{}{ "foo": "bar", "bar": "foo", } @@ -285,7 +285,7 @@ func TestPush_preferAtlas(t *testing.T) { client := &mockPushClient{File: archivePath} // Provided vars should override existing ones - client.GetResult = map[string]string{ + client.GetResult = map[string]interface{}{ "foo": "old", } ui := new(cli.MockUi) @@ -323,7 +323,7 @@ func TestPush_preferAtlas(t *testing.T) { t.Fatalf("bad: %#v", client.UpsertOptions) } - variables := map[string]string{ + variables := map[string]interface{}{ "foo": "old", "bar": "foo", } @@ -394,7 +394,7 @@ func TestPush_tfvars(t *testing.T) { t.Fatalf("bad: %#v", client.UpsertOptions) } - variables := map[string]string{ + variables := map[string]interface{}{ "foo": "bar", "bar": "foo", } diff --git a/terraform/context.go b/terraform/context.go index 324cbef70..f653aba5e 100644 --- a/terraform/context.go +++ b/terraform/context.go @@ -45,7 +45,7 @@ type ContextOpts struct { Providers map[string]ResourceProviderFactory Provisioners map[string]ResourceProvisionerFactory Targets []string - Variables map[string]string + Variables map[string]interface{} UIInput UIInput } @@ -68,7 +68,7 @@ type Context struct { stateLock sync.RWMutex targets []string uiInput UIInput - variables map[string]string + variables map[string]interface{} l sync.Mutex // Lock acquired during any task parallelSem Semaphore @@ -121,7 +121,7 @@ func NewContext(opts *ContextOpts) (*Context, error) { // Setup the variables. We first take the variables given to us. // We then merge in the variables set in the environment. - variables := make(map[string]string) + variables := make(map[string]interface{}) for _, v := range os.Environ() { if !strings.HasPrefix(v, VarEnvPrefix) { continue @@ -506,12 +506,12 @@ func (c *Context) Module() *module.Tree { // Variables will return the mapping of variables that were defined // for this Context. If Input was called, this mapping may be different // than what was given. -func (c *Context) Variables() map[string]string { +func (c *Context) Variables() map[string]interface{} { return c.variables } // SetVariable sets a variable after a context has already been built. -func (c *Context) SetVariable(k, v string) { +func (c *Context) SetVariable(k string, v interface{}) { c.variables[k] = v } diff --git a/terraform/context_apply_test.go b/terraform/context_apply_test.go index ab35ca29d..cc499a482 100644 --- a/terraform/context_apply_test.go +++ b/terraform/context_apply_test.go @@ -843,7 +843,7 @@ func TestContext2Apply_compute(t *testing.T) { t.Fatalf("err: %s", err) } - ctx.variables = map[string]string{"value": "1"} + ctx.variables = map[string]interface{}{"value": "1"} state, err := ctx.Apply() if err != nil { @@ -1134,7 +1134,7 @@ func TestContext2Apply_mapVariableOverride(t *testing.T) { Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, - Variables: map[string]string{ + Variables: map[string]interface{}{ "images.us-west-2": "overridden", }, }) @@ -1510,7 +1510,7 @@ func TestContext2Apply_moduleVarResourceCount(t *testing.T) { Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, - Variables: map[string]string{ + Variables: map[string]interface{}{ "count": "2", }, Destroy: true, @@ -1529,7 +1529,7 @@ func TestContext2Apply_moduleVarResourceCount(t *testing.T) { Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, - Variables: map[string]string{ + Variables: map[string]interface{}{ "count": "5", }, }) @@ -1623,7 +1623,7 @@ func TestContext2Apply_multiVar(t *testing.T) { Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, - Variables: map[string]string{ + Variables: map[string]interface{}{ "count": "3", }, }) @@ -1651,7 +1651,7 @@ func TestContext2Apply_multiVar(t *testing.T) { Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, - Variables: map[string]string{ + Variables: map[string]interface{}{ "count": "1", }, }) @@ -1813,7 +1813,7 @@ func TestContext2Apply_Provisioner_compute(t *testing.T) { Provisioners: map[string]ResourceProvisionerFactory{ "shell": testProvisionerFuncFixed(pr), }, - Variables: map[string]string{ + Variables: map[string]interface{}{ "value": "1", }, }) @@ -1937,7 +1937,7 @@ func TestContext2Apply_provisionerFail(t *testing.T) { Provisioners: map[string]ResourceProvisionerFactory{ "shell": testProvisionerFuncFixed(pr), }, - Variables: map[string]string{ + Variables: map[string]interface{}{ "value": "1", }, }) @@ -2587,7 +2587,7 @@ func TestContext2Apply_Provisioner_ConnInfo(t *testing.T) { Provisioners: map[string]ResourceProvisionerFactory{ "shell": testProvisionerFuncFixed(pr), }, - Variables: map[string]string{ + Variables: map[string]interface{}{ "value": "1", "pass": "test", }, @@ -2813,7 +2813,7 @@ func TestContext2Apply_destroyModuleWithAttrsReferencingResource(t *testing.T) { Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, - Variables: map[string]string{ + Variables: map[string]interface{}{ "key_name": "foobarkey", }, }) @@ -4268,7 +4268,7 @@ func TestContext2Apply_vars(t *testing.T) { Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, - Variables: map[string]string{ + Variables: map[string]interface{}{ "foo": "us-west-2", "amis.us-east-1": "override", }, diff --git a/terraform/context_input_test.go b/terraform/context_input_test.go index f953645c8..9791b06fb 100644 --- a/terraform/context_input_test.go +++ b/terraform/context_input_test.go @@ -18,7 +18,7 @@ func TestContext2Input(t *testing.T) { Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, - Variables: map[string]string{ + Variables: map[string]interface{}{ "foo": "us-west-2", "amis.us-east-1": "override", }, @@ -268,7 +268,7 @@ func TestContext2Input_providerOnly(t *testing.T) { Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, - Variables: map[string]string{ + Variables: map[string]interface{}{ "foo": "us-west-2", }, UIInput: input, @@ -323,7 +323,7 @@ func TestContext2Input_providerVars(t *testing.T) { Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, - Variables: map[string]string{ + Variables: map[string]interface{}{ "foo": "bar", }, UIInput: input, @@ -400,7 +400,7 @@ func TestContext2Input_varOnly(t *testing.T) { Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, - Variables: map[string]string{ + Variables: map[string]interface{}{ "foo": "us-west-2", }, UIInput: input, @@ -455,7 +455,7 @@ func TestContext2Input_varOnlyUnset(t *testing.T) { Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, - Variables: map[string]string{ + Variables: map[string]interface{}{ "foo": "foovalue", }, UIInput: input, @@ -497,7 +497,7 @@ func TestContext2Input_varWithDefault(t *testing.T) { Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, - Variables: map[string]string{}, + Variables: map[string]interface{}{}, UIInput: input, }) @@ -543,7 +543,7 @@ func TestContext2Input_varPartiallyComputed(t *testing.T) { Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, - Variables: map[string]string{ + Variables: map[string]interface{}{ "foo": "foovalue", }, UIInput: input, diff --git a/terraform/context_plan_test.go b/terraform/context_plan_test.go index 2bccc40e5..a3a38cc7f 100644 --- a/terraform/context_plan_test.go +++ b/terraform/context_plan_test.go @@ -47,7 +47,7 @@ func TestContext2Plan_createBefore_maintainRoot(t *testing.T) { Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, - Variables: map[string]string{ + Variables: map[string]interface{}{ "in": "a,b,c", }, }) @@ -289,7 +289,7 @@ func TestContext2Plan_moduleInputFromVar(t *testing.T) { Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, - Variables: map[string]string{ + Variables: map[string]interface{}{ "foo": "52", }, }) @@ -584,7 +584,7 @@ func TestContext2Plan_moduleProviderDefaultsVar(t *testing.T) { return p, nil }, }, - Variables: map[string]string{ + Variables: map[string]interface{}{ "foo": "root", }, }) @@ -1187,7 +1187,7 @@ func TestContext2Plan_countVar(t *testing.T) { Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, - Variables: map[string]string{ + Variables: map[string]interface{}{ "count": "3", }, }) @@ -2210,7 +2210,7 @@ func TestContext2Plan_provider(t *testing.T) { Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, - Variables: map[string]string{ + Variables: map[string]interface{}{ "foo": "bar", }, }) @@ -2265,7 +2265,7 @@ func TestContext2Plan_ignoreChanges(t *testing.T) { Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, - Variables: map[string]string{ + Variables: map[string]interface{}{ "foo": "ami-1234abcd", }, State: s, diff --git a/terraform/context_validate_test.go b/terraform/context_validate_test.go index 8b47a17eb..3c88fef5d 100644 --- a/terraform/context_validate_test.go +++ b/terraform/context_validate_test.go @@ -307,7 +307,7 @@ func TestContext2Validate_moduleProviderVar(t *testing.T) { Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, - Variables: map[string]string{ + Variables: map[string]interface{}{ "provider_var": "bar", }, }) @@ -732,7 +732,7 @@ func TestContext2Validate_varRefFilled(t *testing.T) { Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, - Variables: map[string]string{ + Variables: map[string]interface{}{ "foo": "bar", }, }) diff --git a/terraform/plan.go b/terraform/plan.go index b2ff008ee..75023a0c6 100644 --- a/terraform/plan.go +++ b/terraform/plan.go @@ -24,7 +24,7 @@ type Plan struct { Diff *Diff Module *module.Tree State *State - Vars map[string]string + Vars map[string]interface{} Targets []string once sync.Once @@ -38,8 +38,13 @@ func (p *Plan) Context(opts *ContextOpts) (*Context, error) { opts.Diff = p.Diff opts.Module = p.Module opts.State = p.State - opts.Variables = p.Vars opts.Targets = p.Targets + + opts.Variables = make(map[string]interface{}) + for k, v := range p.Vars { + opts.Variables[k] = v + } + return NewContext(opts) } @@ -65,7 +70,7 @@ func (p *Plan) init() { } if p.Vars == nil { - p.Vars = make(map[string]string) + p.Vars = make(map[string]interface{}) } }) } diff --git a/terraform/plan_test.go b/terraform/plan_test.go index 8c12b896f..02331558a 100644 --- a/terraform/plan_test.go +++ b/terraform/plan_test.go @@ -50,7 +50,7 @@ func TestReadWritePlan(t *testing.T) { }, }, }, - Vars: map[string]string{ + Vars: map[string]interface{}{ "foo": "bar", }, } diff --git a/terraform/semantics.go b/terraform/semantics.go index 566a3c279..6d001226a 100644 --- a/terraform/semantics.go +++ b/terraform/semantics.go @@ -69,7 +69,7 @@ func (*SemanticCheckModulesExist) Check(g *dag.Graph, v dag.Vertex) error { // smcUserVariables does all the semantic checks to verify that the // variables given satisfy the configuration itself. -func smcUserVariables(c *config.Config, vs map[string]string) []error { +func smcUserVariables(c *config.Config, vs map[string]interface{}) []error { var errs []error cvs := make(map[string]*config.Variable) diff --git a/terraform/semantics_test.go b/terraform/semantics_test.go index bebf8f841..4a8d7bd48 100644 --- a/terraform/semantics_test.go +++ b/terraform/semantics_test.go @@ -14,13 +14,13 @@ func TestSMCUserVariables(t *testing.T) { } // Required variables set, optional variables unset - errs = smcUserVariables(c, map[string]string{"foo": "bar"}) + errs = smcUserVariables(c, map[string]interface{}{"foo": "bar"}) if len(errs) != 0 { t.Fatalf("err: %#v", errs) } // Mapping element override - errs = smcUserVariables(c, map[string]string{ + errs = smcUserVariables(c, map[string]interface{}{ "foo": "bar", "map.foo": "baz", }) @@ -29,7 +29,7 @@ func TestSMCUserVariables(t *testing.T) { } // Mapping complete override - errs = smcUserVariables(c, map[string]string{ + errs = smcUserVariables(c, map[string]interface{}{ "foo": "bar", "map": "baz", }) From 3735140286396a7cae7f0928153fb5af87488123 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Mon, 18 Jul 2016 13:10:33 -0500 Subject: [PATCH 0323/1238] core: Don't set variables for Atlas until lib is updated --- command/push.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/command/push.go b/command/push.go index 011b0c34f..67cac6740 100644 --- a/command/push.go +++ b/command/push.go @@ -303,7 +303,7 @@ func (c *atlasPushClient) Get(name string) (map[string]interface{}, error) { var variables map[string]interface{} if version != nil { - variables = version.Variables + //variables = version.Variables } return variables, nil @@ -316,7 +316,7 @@ func (c *atlasPushClient) Upsert(opts *pushUpsertOptions) (int, error) { } data := &atlas.TerraformConfigVersion{ - Variables: opts.Variables, + //Variables: opts.Variables, } version, err := c.Client.CreateTerraformConfigVersion( From 58dd41f3b183e539b96fcb5a483f0a38066d908e Mon Sep 17 00:00:00 2001 From: James Nugent Date: Thu, 7 Jul 2016 13:19:41 +0100 Subject: [PATCH 0324/1238] core: Add list() interpolation function The list() interpolation function provides a way to add support for list literals (of strings) to HIL without having to invent new syntax for it and modify the HIL parser. It presents as a function, thus: - list() -> [] - list("a") -> ["a"] - list("a", "b") -> ["a", "b"] Thanks to @wr0ngway for the idea of this approach, fixes #7460. --- config/interpolate_funcs.go | 21 ++++++++ config/interpolate_funcs_test.go | 49 +++++++++++++++++++ .../docs/configuration/interpolation.html.md | 5 ++ 3 files changed, 75 insertions(+) diff --git a/config/interpolate_funcs.go b/config/interpolate_funcs.go index e912493b6..f73df85e6 100644 --- a/config/interpolate_funcs.go +++ b/config/interpolate_funcs.go @@ -69,6 +69,7 @@ func Funcs() map[string]ast.Function { "join": interpolationFuncJoin(), "jsonencode": interpolationFuncJSONEncode(), "length": interpolationFuncLength(), + "list": interpolationFuncList(), "lower": interpolationFuncLower(), "md5": interpolationFuncMd5(), "uuid": interpolationFuncUUID(), @@ -83,6 +84,26 @@ func Funcs() map[string]ast.Function { } } +// interpolationFuncList creates a list from the parameters passed +// to it. +func interpolationFuncList() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{}, + ReturnType: ast.TypeList, + Variadic: true, + VariadicType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + var outputList []string + + for _, val := range args { + outputList = append(outputList, val.(string)) + } + + return stringSliceToVariableValue(outputList), nil + }, + } +} + // interpolationFuncCompact strips a list of multi-variable values // (e.g. as returned by "split") of any empty strings. func interpolationFuncCompact() ast.Function { diff --git a/config/interpolate_funcs_test.go b/config/interpolate_funcs_test.go index 46a6eba75..7ee10372f 100644 --- a/config/interpolate_funcs_test.go +++ b/config/interpolate_funcs_test.go @@ -12,6 +12,55 @@ import ( "github.com/hashicorp/hil/ast" ) +func TestInterpolateFuncList(t *testing.T) { + testFunction(t, testFunctionConfig{ + Cases: []testFunctionCase{ + // empty input returns empty list + { + `${list()}`, + []interface{}{}, + false, + }, + + // single input returns list of length 1 + { + `${list("hello")}`, + []interface{}{"hello"}, + false, + }, + + // two inputs returns list of length 2 + { + `${list("hello", "world")}`, + []interface{}{"hello", "world"}, + false, + }, + + // not a string input gives error + { + `${list("hello", "${var.list}")}`, + nil, + true, + }, + }, + Vars: map[string]ast.Variable{ + "var.list": { + Type: ast.TypeList, + Value: []ast.Variable{ + { + Type: ast.TypeString, + Value: "Hello", + }, + { + Type: ast.TypeString, + Value: "World", + }, + }, + }, + }, + }) +} + func TestInterpolateFuncCompact(t *testing.T) { testFunction(t, testFunctionConfig{ Cases: []testFunctionCase{ diff --git a/website/source/docs/configuration/interpolation.html.md b/website/source/docs/configuration/interpolation.html.md index c765e2d60..b2f66efd2 100644 --- a/website/source/docs/configuration/interpolation.html.md +++ b/website/source/docs/configuration/interpolation.html.md @@ -168,6 +168,11 @@ The supported built-in functions are: * `${length(split(",", "a,b,c"))}` = 3 * `${length("a,b,c")}` = 5 + * `list(items...)` - Returns a list consisting of the arguments to the function. + This function provides a way of representing list literals in interpolation. + * `${list("a", "b", "c")}` returns a list of `"a", "b", "c"`. + * `${list()}` returns an empty list. + * `lookup(map, key [, default])` - Performs a dynamic lookup into a mapping variable. The `map` parameter should be another variable, such as `var.amis`. If `key` does not exist in `map`, the interpolation will From 565733398dac38e05adbbbaecc29ad8781342324 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Tue, 19 Jul 2016 04:52:02 +0100 Subject: [PATCH 0325/1238] provider/aws: Support Import of `aws_cloudwatch_metric_alarm` (#7687) ``` % make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSCloudWatchMetricAlarm_' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSCloudWatchMetricAlarm_ -timeout 120m === RUN TestAccAWSCloudWatchMetricAlarm_importBasic --- PASS: TestAccAWSCloudWatchMetricAlarm_importBasic (17.82s) === RUN TestAccAWSCloudWatchMetricAlarm_basic --- PASS: TestAccAWSCloudWatchMetricAlarm_basic (17.11s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 34.957s ``` --- ...import_aws_cloudwatch_metric_alarm_test.go | 28 +++++++++++++++++++ .../resource_aws_cloudwatch_metric_alarm.go | 3 ++ 2 files changed, 31 insertions(+) create mode 100644 builtin/providers/aws/import_aws_cloudwatch_metric_alarm_test.go diff --git a/builtin/providers/aws/import_aws_cloudwatch_metric_alarm_test.go b/builtin/providers/aws/import_aws_cloudwatch_metric_alarm_test.go new file mode 100644 index 000000000..bc7a82647 --- /dev/null +++ b/builtin/providers/aws/import_aws_cloudwatch_metric_alarm_test.go @@ -0,0 +1,28 @@ +package aws + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAWSCloudWatchMetricAlarm_importBasic(t *testing.T) { + resourceName := "aws_cloudwatch_metric_alarm.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSCloudWatchMetricAlarmDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSCloudWatchMetricAlarmConfig, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/aws/resource_aws_cloudwatch_metric_alarm.go b/builtin/providers/aws/resource_aws_cloudwatch_metric_alarm.go index d5b03b8a9..eec4ada19 100644 --- a/builtin/providers/aws/resource_aws_cloudwatch_metric_alarm.go +++ b/builtin/providers/aws/resource_aws_cloudwatch_metric_alarm.go @@ -16,6 +16,9 @@ func resourceAwsCloudWatchMetricAlarm() *schema.Resource { Read: resourceAwsCloudWatchMetricAlarmRead, Update: resourceAwsCloudWatchMetricAlarmUpdate, Delete: resourceAwsCloudWatchMetricAlarmDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "alarm_name": &schema.Schema{ From 7369c1c9f402a07e0d0cdf510846d04d75f25508 Mon Sep 17 00:00:00 2001 From: avichalbadaya Date: Tue, 19 Jul 2016 04:01:49 -0400 Subject: [PATCH 0326/1238] Allowing ap-south-1 (Mumbai) as valid AWS region (#7688) * Update website_endpoint_url_test.go Allow ap-south-1 (Mumbai) as valid region * Update hosted_zones.go Allowing ap-south-1 (Mumbai) as valid region * Update website_endpoint_url_test.go reformatting * Update hosted_zones.go reformatting * Update resource_aws_s3_bucket.go making changes for ap-south-1 (Mumbai) region --- builtin/providers/aws/hosted_zones.go | 1 + builtin/providers/aws/resource_aws_s3_bucket.go | 2 +- builtin/providers/aws/website_endpoint_url_test.go | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/builtin/providers/aws/hosted_zones.go b/builtin/providers/aws/hosted_zones.go index fb95505ea..a99bd22fd 100644 --- a/builtin/providers/aws/hosted_zones.go +++ b/builtin/providers/aws/hosted_zones.go @@ -9,6 +9,7 @@ var hostedZoneIDsMap = map[string]string{ "us-west-1": "Z2F56UZL2M1ACD", "eu-west-1": "Z1BKCTXD74EZPE", "eu-central-1": "Z21DNDUVLTQW6Q", + "ap-south-1": "Z11RGJOFQNVJUP", "ap-southeast-1": "Z3O0J2DXBE1FTB", "ap-southeast-2": "Z1WCIGYICN2BYD", "ap-northeast-1": "Z2M4EHUR26P7ZW", diff --git a/builtin/providers/aws/resource_aws_s3_bucket.go b/builtin/providers/aws/resource_aws_s3_bucket.go index 27f8d753a..6897f1e7f 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket.go +++ b/builtin/providers/aws/resource_aws_s3_bucket.go @@ -1029,7 +1029,7 @@ func WebsiteDomainUrl(region string) string { // Frankfurt(and probably future) regions uses different syntax for website endpoints // http://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteEndpoints.html - if region == "eu-central-1" { + if region == "eu-central-1" || region == "ap-south-1" { return fmt.Sprintf("s3-website.%s.amazonaws.com", region) } diff --git a/builtin/providers/aws/website_endpoint_url_test.go b/builtin/providers/aws/website_endpoint_url_test.go index 2193ff512..e4cba6312 100644 --- a/builtin/providers/aws/website_endpoint_url_test.go +++ b/builtin/providers/aws/website_endpoint_url_test.go @@ -12,6 +12,7 @@ var websiteEndpoints = []struct { {"us-west-1", "bucket-name.s3-website-us-west-1.amazonaws.com"}, {"eu-west-1", "bucket-name.s3-website-eu-west-1.amazonaws.com"}, {"eu-central-1", "bucket-name.s3-website.eu-central-1.amazonaws.com"}, + {"ap-south-1", "bucket-name.s3-website.ap-south-1.amazonaws.com"}, {"ap-southeast-1", "bucket-name.s3-website-ap-southeast-1.amazonaws.com"}, {"ap-northeast-1", "bucket-name.s3-website-ap-northeast-1.amazonaws.com"}, {"ap-southeast-2", "bucket-name.s3-website-ap-southeast-2.amazonaws.com"}, From fadfea45f491bc15320da310bda933c98d76bffe Mon Sep 17 00:00:00 2001 From: stack72 Date: Tue, 19 Jul 2016 09:36:24 +0100 Subject: [PATCH 0327/1238] Website: Change the link to AzureRM ScaleSets as the link was throwing a 404 --- website/source/layouts/azurerm.erb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/layouts/azurerm.erb b/website/source/layouts/azurerm.erb index bb0d6137b..9d0f781e4 100644 --- a/website/source/layouts/azurerm.erb +++ b/website/source/layouts/azurerm.erb @@ -192,7 +192,7 @@ > - azurerm_virtual_machine_scale_set + azurerm_virtual_machine_scale_set From e1c3eba14440bb1b763918eb52ca45aaf7149521 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Tue, 19 Jul 2016 09:29:05 -0500 Subject: [PATCH 0328/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a21c4b693..789fd97dd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -198,6 +198,7 @@ BUG FIXES: * provider/aws: Safely get ELB values [GH-7585] * provider/aws: Fix bug for recurring plans on ec2-classic and vpc in beanstalk [GH-6491] * provider/aws: Bump rds_cluster timeout to 15 mins [GH-7604] + * provider/aws: Fix ICMP fields in `aws_network_acl_rule` to allow ICMP code 0 (echo reply) to be configured [GH-7669] * provider/azurerm: Fixes terraform crash when using SSH keys with `azurerm_virtual_machine` [GH-6766] * provider/azurerm: Fix a bug causing 'diffs do not match' on `azurerm_network_interface` resources [GH-6790] * provider/azurerm: Normalizes `availability_set_id` casing to avoid spurious diffs in `azurerm_virtual_machine` [GH-6768] From 2559c19c8d522fdf77b99f8c6b18a892a1a32ce4 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Tue, 19 Jul 2016 17:22:30 +0100 Subject: [PATCH 0329/1238] Website: Adding an import section to the bottom of the page of importable resources (#7703) * docs/digitalocean: Adding an import section to the bottom of the DO importable resources * docs/azurerm: Adding the Import sections for the AzureRM Importable resources * docs/aws: Adding the import sections to the AWS provider pages --- .../providers/aws/r/simpledb_domain.html.markdown | 8 ++++++++ .../docs/providers/aws/r/sns_topic.html.markdown | 7 +++++++ .../aws/r/sns_topic_subscription.html.markdown | 8 ++++++++ .../docs/providers/aws/r/sqs_queue.html.markdown | 8 ++++++++ .../source/docs/providers/aws/r/subnet.html.markdown | 9 +++++++++ website/source/docs/providers/aws/r/vpc.html.markdown | 8 ++++++++ .../providers/aws/r/vpc_dhcp_options.html.markdown | 9 +++++++++ .../docs/providers/aws/r/vpc_endpoint.html.markdown | 9 +++++++++ .../docs/providers/aws/r/vpc_peering.html.markdown | 8 ++++++++ .../docs/providers/aws/r/vpn_connection.html.markdown | 9 +++++++++ .../docs/providers/aws/r/vpn_gateway.html.markdown | 8 ++++++++ .../azurerm/r/availability_set.html.markdown | 11 ++++++++++- .../docs/providers/azurerm/r/dns_zone.html.markdown | 9 +++++++++ .../azurerm/r/local_network_gateway.html.markdown | 8 ++++++++ .../azurerm/r/network_security_group.html.markdown | 9 +++++++++ .../azurerm/r/network_security_rule.html.markdown | 11 ++++++++++- .../docs/providers/azurerm/r/public_ip.html.markdown | 11 ++++++++++- .../providers/azurerm/r/resource_group.html.markdown | 9 +++++++++ .../azurerm/r/sql_firewall_rule.html.markdown | 11 ++++++++++- .../providers/azurerm/r/storage_account.html.markdown | 9 +++++++++ .../providers/azurerm/r/virtual_network.html.markdown | 9 +++++++++ .../source/docs/providers/do/r/domain.html.markdown | 9 +++++++++ .../source/docs/providers/do/r/droplet.html.markdown | 8 ++++++++ .../docs/providers/do/r/floating_ip.html.markdown | 8 ++++++++ .../source/docs/providers/do/r/ssh_key.html.markdown | 8 ++++++++ website/source/docs/providers/do/r/tag.html.markdown | 9 +++++++++ website/source/docs/providers/do/r/volume.markdown | 11 ++++++++++- 27 files changed, 236 insertions(+), 5 deletions(-) diff --git a/website/source/docs/providers/aws/r/simpledb_domain.html.markdown b/website/source/docs/providers/aws/r/simpledb_domain.html.markdown index 52c324652..e63bae36d 100644 --- a/website/source/docs/providers/aws/r/simpledb_domain.html.markdown +++ b/website/source/docs/providers/aws/r/simpledb_domain.html.markdown @@ -29,3 +29,11 @@ The following arguments are supported: The following attributes are exported: * `id` - The name of the SimpleDB domain + +## Import + +SimpleDB Domains can be imported using the `name`, e.g. + +``` +terraform import aws_simpledb_domain.users users +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/sns_topic.html.markdown b/website/source/docs/providers/aws/r/sns_topic.html.markdown index b17d5536f..c0094f2bf 100644 --- a/website/source/docs/providers/aws/r/sns_topic.html.markdown +++ b/website/source/docs/providers/aws/r/sns_topic.html.markdown @@ -34,3 +34,10 @@ The following attributes are exported: * `id` - The ARN of the SNS topic * `arn` - The ARN of the SNS topic, as a more obvious property (clone of id) +## Import + +SNS Topics can be imported using the `topic arn`, e.g. + +``` +terraform import aws_sns_topic.user_updates arn:aws:sns:us-west-2:0123456789012:my-topic +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/sns_topic_subscription.html.markdown b/website/source/docs/providers/aws/r/sns_topic_subscription.html.markdown index ab4ffd594..0bc6e5b89 100644 --- a/website/source/docs/providers/aws/r/sns_topic_subscription.html.markdown +++ b/website/source/docs/providers/aws/r/sns_topic_subscription.html.markdown @@ -96,3 +96,11 @@ The following attributes are exported: * `endpoint` - The full endpoint to send data to (SQS ARN, HTTP(S) URL, Application ARN, SMS number, etc.) * `arn` - The ARN of the subscription stored as a more user-friendly property + +## Import + +SNS Topic Subscriptions can be imported using the `subscription arn`, e.g. + +``` +terraform import aws_sns_topic_subscription.user_updates_sqs_target arn:aws:sns:us-west-2:0123456789012:my-topic:8a21d249-4329-4871-acc6-7be709c6ea7f +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/sqs_queue.html.markdown b/website/source/docs/providers/aws/r/sqs_queue.html.markdown index 8fab2aa2f..1140c07e1 100644 --- a/website/source/docs/providers/aws/r/sqs_queue.html.markdown +++ b/website/source/docs/providers/aws/r/sqs_queue.html.markdown @@ -40,3 +40,11 @@ The following attributes are exported: * `id` - The URL for the created Amazon SQS queue. * `arn` - The ARN of the SQS queue + +## Import + +SQS Queues can be imported using the `queue url`, e.g. + +``` +terraform import aws_sqs_queue.public_queue https://queue.amazonaws.com/80398EXAMPLE/MyQueue +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/subnet.html.markdown b/website/source/docs/providers/aws/r/subnet.html.markdown index 3737ed380..ba96ba77f 100644 --- a/website/source/docs/providers/aws/r/subnet.html.markdown +++ b/website/source/docs/providers/aws/r/subnet.html.markdown @@ -44,3 +44,12 @@ The following attributes are exported: * `cidr_block` - The CIDR block for the subnet. * `vpc_id` - The VPC ID. + + +## Import + +Subnets can be imported using the `subnet id`, e.g. + +``` +terraform import aws_subnet.public_subnet subnet-9d4a7b6c +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/vpc.html.markdown b/website/source/docs/providers/aws/r/vpc.html.markdown index 8cf089b5c..fafcc0097 100644 --- a/website/source/docs/providers/aws/r/vpc.html.markdown +++ b/website/source/docs/providers/aws/r/vpc.html.markdown @@ -64,3 +64,11 @@ The following attributes are exported: [1]: http://docs.aws.amazon.com/fr_fr/AWSEC2/latest/UserGuide/vpc-classiclink.html + +## Import + +VPNs can be imported using the `vpn id`, e.g. + +``` +terraform import aws_vpn.test_vpn vpc-a01106c2 +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/vpc_dhcp_options.html.markdown b/website/source/docs/providers/aws/r/vpc_dhcp_options.html.markdown index a890ebdc7..5e030c509 100644 --- a/website/source/docs/providers/aws/r/vpc_dhcp_options.html.markdown +++ b/website/source/docs/providers/aws/r/vpc_dhcp_options.html.markdown @@ -61,3 +61,12 @@ The following attributes are exported: You can find more technical documentation about DHCP Options Set in the official [AWS User Guide](https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html). + + +## Import + +VPC DHCP Options can be imported using the `dhcp options id`, e.g. + +``` +terraform import aws_vpc_dhcp_options.my_options dopt-d9070ebb +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/vpc_endpoint.html.markdown b/website/source/docs/providers/aws/r/vpc_endpoint.html.markdown index c67c757a5..6bdf31ad8 100644 --- a/website/source/docs/providers/aws/r/vpc_endpoint.html.markdown +++ b/website/source/docs/providers/aws/r/vpc_endpoint.html.markdown @@ -36,3 +36,12 @@ The following attributes are exported: * `id` - The ID of the VPC endpoint. * `prefix_list_id` - The prefix list ID of the exposed service. + + +## Import + +VPN Endpoints can be imported using the `vpc endpoint id`, e.g. + +``` +terraform import aws_vpc_endpoint.endpoint1 vpce-3ecf2a57 +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/vpc_peering.html.markdown b/website/source/docs/providers/aws/r/vpc_peering.html.markdown index e8073f3bb..c20b75bcb 100644 --- a/website/source/docs/providers/aws/r/vpc_peering.html.markdown +++ b/website/source/docs/providers/aws/r/vpc_peering.html.markdown @@ -66,3 +66,11 @@ The following attributes are exported: ## Notes If you are not the owner of both VPCs, or do not enable auto_accept you will still have to accept the peering with the AWS Console, aws-cli or aws-sdk-go. + +## Import + +VPC Peering resources can be imported using the `vpc peering id`, e.g. + +``` +terraform import aws_vpc_peering_connection.test_connection pcx-111aaa111 +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/vpn_connection.html.markdown b/website/source/docs/providers/aws/r/vpn_connection.html.markdown index c1f835ab2..4d1dbdf21 100644 --- a/website/source/docs/providers/aws/r/vpn_connection.html.markdown +++ b/website/source/docs/providers/aws/r/vpn_connection.html.markdown @@ -61,3 +61,12 @@ The following attributes are exported: * `tunnel2_preshared_key` - The preshared key of the second VPN tunnel. * `type` - The type of VPN connection. * `vpn_gateway_id` - The ID of the virtual private gateway to which the connection is attached. + + +## Import + +VPN Connections can be imported using the `vpn connection id`, e.g. + +``` +terraform import aws_vpn_connection.testvpnconnection vpn-40f41529 +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/vpn_gateway.html.markdown b/website/source/docs/providers/aws/r/vpn_gateway.html.markdown index 2dcb2a363..956cfc3c2 100644 --- a/website/source/docs/providers/aws/r/vpn_gateway.html.markdown +++ b/website/source/docs/providers/aws/r/vpn_gateway.html.markdown @@ -36,3 +36,11 @@ The following attributes are exported: * `id` - The ID of the VPN Gateway. + +## Import + +VPN Gateways can be imported using the `vpn gateway id`, e.g. + +``` +terraform import aws_vpn_gateway.testvpngateway vgw-9a4cacf3 +``` \ No newline at end of file diff --git a/website/source/docs/providers/azurerm/r/availability_set.html.markdown b/website/source/docs/providers/azurerm/r/availability_set.html.markdown index cd60368d0..8e9dc1995 100644 --- a/website/source/docs/providers/azurerm/r/availability_set.html.markdown +++ b/website/source/docs/providers/azurerm/r/availability_set.html.markdown @@ -50,4 +50,13 @@ The following arguments are supported: The following attributes are exported: -* `id` - The virtual AvailabilitySet ID. \ No newline at end of file +* `id` - The virtual AvailabilitySet ID. + + +## Import + +Availability Sets can be imported using the `resource id`, e.g. + +``` +terraform import azurerm_availability_set.group1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Compute/availabilitySets/webAvailSet +``` diff --git a/website/source/docs/providers/azurerm/r/dns_zone.html.markdown b/website/source/docs/providers/azurerm/r/dns_zone.html.markdown index 23aed869c..cc6931f32 100644 --- a/website/source/docs/providers/azurerm/r/dns_zone.html.markdown +++ b/website/source/docs/providers/azurerm/r/dns_zone.html.markdown @@ -39,3 +39,12 @@ The following attributes are exported: * `id` - The DNS Zone ID. * `max_number_of_record_sets` - (Optional) Maximum number of Records in the zone. Defaults to `1000`. * `number_of_record_sets` - (Optional) The number of records already in the zone. + + +## Import + +DNS Zones can be imported using the `resource id`, e.g. + +``` +terraform import azurerm_dns_zone.zone1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/dnsZones/zone1 +``` \ No newline at end of file diff --git a/website/source/docs/providers/azurerm/r/local_network_gateway.html.markdown b/website/source/docs/providers/azurerm/r/local_network_gateway.html.markdown index 1f2bbe0f9..9d9c188b6 100644 --- a/website/source/docs/providers/azurerm/r/local_network_gateway.html.markdown +++ b/website/source/docs/providers/azurerm/r/local_network_gateway.html.markdown @@ -46,3 +46,11 @@ The following arguments are supported: The following attributes are exported: * `id` - The local network gateway unique ID within Azure. + +## Import + +Local Network Gateways can be imported using the `resource id`, e.g. + +``` +terraform import azurerm_local_network_gateway.lng1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/microsoft.network/localnetworkgateways/lng1 +``` diff --git a/website/source/docs/providers/azurerm/r/network_security_group.html.markdown b/website/source/docs/providers/azurerm/r/network_security_group.html.markdown index 019b10e8f..498f90872 100644 --- a/website/source/docs/providers/azurerm/r/network_security_group.html.markdown +++ b/website/source/docs/providers/azurerm/r/network_security_group.html.markdown @@ -88,3 +88,12 @@ The `security_rule` block supports: The following attributes are exported: * `id` - The Network Security Group ID. + + +## Import + +Network Security Groups can be imported using the `resource id`, e.g. + +``` +terraform import azurerm_network_security_group.group1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/networkSecurityGroups/mySecurityGroup +``` diff --git a/website/source/docs/providers/azurerm/r/network_security_rule.html.markdown b/website/source/docs/providers/azurerm/r/network_security_rule.html.markdown index 061175fa4..3b5e58d10 100644 --- a/website/source/docs/providers/azurerm/r/network_security_rule.html.markdown +++ b/website/source/docs/providers/azurerm/r/network_security_rule.html.markdown @@ -72,4 +72,13 @@ The following arguments are supported: The following attributes are exported: -* `id` - The Network Security Rule ID. \ No newline at end of file +* `id` - The Network Security Rule ID. + + +## Import + +Network Security Rules can be imported using the `resource id`, e.g. + +``` +terraform import azurerm_network_security_rule.rule1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/networkSecurityGroups/mySecurityGroup/securityRules/rule1 +``` \ No newline at end of file diff --git a/website/source/docs/providers/azurerm/r/public_ip.html.markdown b/website/source/docs/providers/azurerm/r/public_ip.html.markdown index 5dd1714ab..85f77e3c0 100644 --- a/website/source/docs/providers/azurerm/r/public_ip.html.markdown +++ b/website/source/docs/providers/azurerm/r/public_ip.html.markdown @@ -58,4 +58,13 @@ The following attributes are exported: * `id` - The Public IP ID. * `ip_address` - The IP address value that was allocated. -* `fqdn` - Fully qualified domain name of the A DNS record associated with the public IP. This is the concatenation of the domainNameLabel and the regionalized DNS zone \ No newline at end of file +* `fqdn` - Fully qualified domain name of the A DNS record associated with the public IP. This is the concatenation of the domainNameLabel and the regionalized DNS zone + + +## Import + +Public IPs can be imported using the `resource id`, e.g. + +``` +terraform import azurerm_public_ip.myPublicIp /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/publicIPAddresses/myPublicIpAddress1 +``` \ No newline at end of file diff --git a/website/source/docs/providers/azurerm/r/resource_group.html.markdown b/website/source/docs/providers/azurerm/r/resource_group.html.markdown index 0ba13feed..0c51cba96 100644 --- a/website/source/docs/providers/azurerm/r/resource_group.html.markdown +++ b/website/source/docs/providers/azurerm/r/resource_group.html.markdown @@ -40,3 +40,12 @@ The following arguments are supported: The following attributes are exported: * `id` - The resource group ID. + + +## Import + +Resource Groups can be imported using the `resource id`, e.g. + +``` +terraform import azurerm_resource_group.mygroup /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup +``` diff --git a/website/source/docs/providers/azurerm/r/sql_firewall_rule.html.markdown b/website/source/docs/providers/azurerm/r/sql_firewall_rule.html.markdown index 90a50332b..828e046f1 100644 --- a/website/source/docs/providers/azurerm/r/sql_firewall_rule.html.markdown +++ b/website/source/docs/providers/azurerm/r/sql_firewall_rule.html.markdown @@ -55,4 +55,13 @@ The following arguments are supported: The following attributes are exported: -* `id` - The SQL Firewall Rule ID. \ No newline at end of file +* `id` - The SQL Firewall Rule ID. + +## Import + +SQL Firewall Rules can be imported using the `resource id`, e.g. + +``` +terraform import azurerm_sql_firewall_rule.rule1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Sql/servers/myserver/firewallRules/rule1 +``` + diff --git a/website/source/docs/providers/azurerm/r/storage_account.html.markdown b/website/source/docs/providers/azurerm/r/storage_account.html.markdown index 5bc4d0a5d..8e470365a 100644 --- a/website/source/docs/providers/azurerm/r/storage_account.html.markdown +++ b/website/source/docs/providers/azurerm/r/storage_account.html.markdown @@ -72,3 +72,12 @@ The following attributes are exported in addition to the arguments listed above: * `primary_file_endpoint` - The endpoint URL for file storage in the primary location. * `primary_access_key` - The primary access key for the storage account * `secondary_access_key` - The secondary access key for the storage account + +## Import + +Virtual Networks can be imported using the `resource id`, e.g. + +``` +terraform import azurerm_storage_account.storageAcc1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Storage/storageAccounts/myaccount +``` + diff --git a/website/source/docs/providers/azurerm/r/virtual_network.html.markdown b/website/source/docs/providers/azurerm/r/virtual_network.html.markdown index 6d75cead6..4e8b87175 100644 --- a/website/source/docs/providers/azurerm/r/virtual_network.html.markdown +++ b/website/source/docs/providers/azurerm/r/virtual_network.html.markdown @@ -80,3 +80,12 @@ The `subnet` block supports: The following attributes are exported: * `id` - The virtual NetworkConfiguration ID. + + +## Import + +Virtual Networks can be imported using the `resource id`, e.g. + +``` +terraform import azurerm_virtual_network.testNetwork /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/virtualNetworks/myvnet1 +``` \ No newline at end of file diff --git a/website/source/docs/providers/do/r/domain.html.markdown b/website/source/docs/providers/do/r/domain.html.markdown index 5580b63d2..1cc3c45bd 100644 --- a/website/source/docs/providers/do/r/domain.html.markdown +++ b/website/source/docs/providers/do/r/domain.html.markdown @@ -35,3 +35,12 @@ The following attributes are exported: * `id` - The name of the domain + + +## Import + +Domains can be imported using the `domain name`, e.g. + +``` +terraform import digitalocean_domain.mydomain mytestdomain.com +``` \ No newline at end of file diff --git a/website/source/docs/providers/do/r/droplet.html.markdown b/website/source/docs/providers/do/r/droplet.html.markdown index 8265e6803..7c98a9731 100644 --- a/website/source/docs/providers/do/r/droplet.html.markdown +++ b/website/source/docs/providers/do/r/droplet.html.markdown @@ -70,3 +70,11 @@ The following attributes are exported: * `status` - The status of the droplet * `tags` - The tags associated with the droplet * `volume_ids` - A list of the attached block storage volumes + +## Import + +Droplets can be imported using the droplet `id`, e.g. + +``` +terraform import digitalocean_droplet.mydroplet 100823 +``` diff --git a/website/source/docs/providers/do/r/floating_ip.html.markdown b/website/source/docs/providers/do/r/floating_ip.html.markdown index a91644369..7f32932b0 100644 --- a/website/source/docs/providers/do/r/floating_ip.html.markdown +++ b/website/source/docs/providers/do/r/floating_ip.html.markdown @@ -42,3 +42,11 @@ The following arguments are supported: The following attributes are exported: * `ip_address` - The IP Address of the resource + +## Import + +Floating IPs can be imported using the `ip`, e.g. + +``` +terraform import digitalocean_floating_ip.myip 192.168.0.1 +``` \ No newline at end of file diff --git a/website/source/docs/providers/do/r/ssh_key.html.markdown b/website/source/docs/providers/do/r/ssh_key.html.markdown index 7a8033519..a97ded95e 100644 --- a/website/source/docs/providers/do/r/ssh_key.html.markdown +++ b/website/source/docs/providers/do/r/ssh_key.html.markdown @@ -39,3 +39,11 @@ The following attributes are exported: * `name` - The name of the SSH key * `public_key` - The text of the public key * `fingerprint` - The fingerprint of the SSH key + +## Import + +SSH Keys can be imported using the `ssh key id`, e.g. + +``` +terraform import digitalocean_ssh_key.mykey 263654 +``` \ No newline at end of file diff --git a/website/source/docs/providers/do/r/tag.html.markdown b/website/source/docs/providers/do/r/tag.html.markdown index 02e0a9694..4e25daa3f 100644 --- a/website/source/docs/providers/do/r/tag.html.markdown +++ b/website/source/docs/providers/do/r/tag.html.markdown @@ -43,3 +43,12 @@ The following attributes are exported: * `id` - The name of the tag * `name` - The name of the tag + + +## Import + +Tags can be imported using the `name`, e.g. + +``` +terraform import digitalocean_tag.mytag tagname +``` \ No newline at end of file diff --git a/website/source/docs/providers/do/r/volume.markdown b/website/source/docs/providers/do/r/volume.markdown index d14416eb6..e8441caa7 100644 --- a/website/source/docs/providers/do/r/volume.markdown +++ b/website/source/docs/providers/do/r/volume.markdown @@ -42,4 +42,13 @@ The following arguments are supported: The following attributes are exported: -* `id` - The unique identifier for the block storage volume. \ No newline at end of file +* `id` - The unique identifier for the block storage volume. + + +## Import + +Volumes can be imported using the `volume id`, e.g. + +``` +terraform import digitalocean_volume.volumea 506f78a4-e098-11e5-ad9f-000f53306ae1 +``` \ No newline at end of file From 2bd7cfd5fe7b9e3cccbef8b96fd49f59e419cc7b Mon Sep 17 00:00:00 2001 From: James Bardin Date: Tue, 19 Jul 2016 13:39:40 -0400 Subject: [PATCH 0330/1238] Expand list interpolation to lists and maps Allow lists and maps within the list interpolation function via variable interpolation. Since this requires setting the variadic type to TypeAny, we check for non-heterogeneous lists in the callback. --- config/interpolate_funcs.go | 29 ++++++++-- config/interpolate_funcs_test.go | 55 ++++++++++++++++++- .../docs/configuration/interpolation.html.md | 2 +- 3 files changed, 79 insertions(+), 7 deletions(-) diff --git a/config/interpolate_funcs.go b/config/interpolate_funcs.go index f73df85e6..a3cdf828b 100644 --- a/config/interpolate_funcs.go +++ b/config/interpolate_funcs.go @@ -91,15 +91,34 @@ func interpolationFuncList() ast.Function { ArgTypes: []ast.Type{}, ReturnType: ast.TypeList, Variadic: true, - VariadicType: ast.TypeString, + VariadicType: ast.TypeAny, Callback: func(args []interface{}) (interface{}, error) { - var outputList []string + var outputList []ast.Variable - for _, val := range args { - outputList = append(outputList, val.(string)) + for i, val := range args { + switch v := val.(type) { + case string: + outputList = append(outputList, ast.Variable{Type: ast.TypeString, Value: v}) + case []ast.Variable: + outputList = append(outputList, ast.Variable{Type: ast.TypeList, Value: v}) + case map[string]ast.Variable: + outputList = append(outputList, ast.Variable{Type: ast.TypeMap, Value: v}) + default: + return nil, fmt.Errorf("unexpected type %T for argument %d in list", v, i) + } } - return stringSliceToVariableValue(outputList), nil + // we don't support heterogeneous types, so make sure all types match the first + if len(outputList) > 0 { + firstType := outputList[0].Type + for i, v := range outputList[1:] { + if v.Type != firstType { + return nil, fmt.Errorf("unexpected type %s for argument %d in list", v.Type, i+1) + } + } + } + + return outputList, nil }, } } diff --git a/config/interpolate_funcs_test.go b/config/interpolate_funcs_test.go index 7ee10372f..5b9800c14 100644 --- a/config/interpolate_funcs_test.go +++ b/config/interpolate_funcs_test.go @@ -38,7 +38,28 @@ func TestInterpolateFuncList(t *testing.T) { // not a string input gives error { - `${list("hello", "${var.list}")}`, + `${list("hello", 42)}`, + nil, + true, + }, + + // list of lists + { + `${list("${var.list}", "${var.list2}")}`, + []interface{}{[]interface{}{"Hello", "World"}, []interface{}{"bar", "baz"}}, + false, + }, + + // list of maps + { + `${list("${var.map}", "${var.map2}")}`, + []interface{}{map[string]interface{}{"key": "bar"}, map[string]interface{}{"key2": "baz"}}, + false, + }, + + // error on a heterogeneous list + { + `${list("first", "${var.list}")}`, nil, true, }, @@ -57,6 +78,38 @@ func TestInterpolateFuncList(t *testing.T) { }, }, }, + "var.list2": { + Type: ast.TypeList, + Value: []ast.Variable{ + { + Type: ast.TypeString, + Value: "bar", + }, + { + Type: ast.TypeString, + Value: "baz", + }, + }, + }, + + "var.map": { + Type: ast.TypeMap, + Value: map[string]ast.Variable{ + "key": { + Type: ast.TypeString, + Value: "bar", + }, + }, + }, + "var.map2": { + Type: ast.TypeMap, + Value: map[string]ast.Variable{ + "key2": { + Type: ast.TypeString, + Value: "baz", + }, + }, + }, }, }) } diff --git a/website/source/docs/configuration/interpolation.html.md b/website/source/docs/configuration/interpolation.html.md index b2f66efd2..338550377 100644 --- a/website/source/docs/configuration/interpolation.html.md +++ b/website/source/docs/configuration/interpolation.html.md @@ -172,7 +172,7 @@ The supported built-in functions are: This function provides a way of representing list literals in interpolation. * `${list("a", "b", "c")}` returns a list of `"a", "b", "c"`. * `${list()}` returns an empty list. - + * `lookup(map, key [, default])` - Performs a dynamic lookup into a mapping variable. The `map` parameter should be another variable, such as `var.amis`. If `key` does not exist in `map`, the interpolation will From 8dcbc0b0a0d6b985c9fa4c096c9f9214cacdfa7b Mon Sep 17 00:00:00 2001 From: James Bardin Date: Tue, 19 Jul 2016 17:21:14 -0400 Subject: [PATCH 0331/1238] Add concat to accept lists of lists and maps This will allow the concat interpolation function to accept lists of lists, and lists of maps as well as strings. We still allow bare strings for backwards compatibility, but remove some of the old comment wording as it could cause confusion of this function with actual string concatenation. Since maps are now supported in the config, this removes the superfluous (and failing) TestInterpolationFuncConcatListOfMaps. --- config/interpolate_funcs.go | 51 ++++++++------ config/interpolate_funcs_test.go | 113 +++++++++++++++++++++---------- 2 files changed, 107 insertions(+), 57 deletions(-) diff --git a/config/interpolate_funcs.go b/config/interpolate_funcs.go index a3cdf828b..91aa66da7 100644 --- a/config/interpolate_funcs.go +++ b/config/interpolate_funcs.go @@ -258,10 +258,8 @@ func interpolationFuncCoalesce() ast.Function { } } -// interpolationFuncConcat implements the "concat" function that -// concatenates multiple strings. This isn't actually necessary anymore -// since our language supports string concat natively, but for backwards -// compat we do this. +// interpolationFuncConcat implements the "concat" function that concatenates +// multiple lists. func interpolationFuncConcat() ast.Function { return ast.Function{ ArgTypes: []ast.Type{ast.TypeAny}, @@ -269,33 +267,42 @@ func interpolationFuncConcat() ast.Function { Variadic: true, VariadicType: ast.TypeAny, Callback: func(args []interface{}) (interface{}, error) { - var finalListElements []string + var outputList []ast.Variable for _, arg := range args { - // Append strings for backward compatibility - if argument, ok := arg.(string); ok { - finalListElements = append(finalListElements, argument) - continue - } - - // Otherwise variables - if argument, ok := arg.([]ast.Variable); ok { - for _, element := range argument { - t := element.Type - switch t { + switch arg := arg.(type) { + case string: + outputList = append(outputList, ast.Variable{Type: ast.TypeString, Value: arg}) + case []ast.Variable: + for _, v := range arg { + switch v.Type { case ast.TypeString: - finalListElements = append(finalListElements, element.Value.(string)) + outputList = append(outputList, v) + case ast.TypeList: + outputList = append(outputList, v) + case ast.TypeMap: + outputList = append(outputList, v) default: - return nil, fmt.Errorf("concat() does not support lists of %s", t.Printable()) + return nil, fmt.Errorf("concat() does not support lists of %s", v.Type.Printable()) } } - continue - } - return nil, fmt.Errorf("arguments to concat() must be a string or list of strings") + default: + return nil, fmt.Errorf("concat() does not support %T", arg) + } } - return stringSliceToVariableValue(finalListElements), nil + // we don't support heterogeneous types, so make sure all types match the first + if len(outputList) > 0 { + firstType := outputList[0].Type + for _, v := range outputList[1:] { + if v.Type != firstType { + return nil, fmt.Errorf("unexpected %s in list of %s", v.Type.Printable(), firstType.Printable()) + } + } + } + + return outputList, nil }, } } diff --git a/config/interpolate_funcs_test.go b/config/interpolate_funcs_test.go index 5b9800c14..541bcffab 100644 --- a/config/interpolate_funcs_test.go +++ b/config/interpolate_funcs_test.go @@ -5,7 +5,6 @@ import ( "io/ioutil" "os" "reflect" - "strings" "testing" "github.com/hashicorp/hil" @@ -325,44 +324,88 @@ func TestInterpolateFuncConcat(t *testing.T) { []interface{}{"a", "b", "c", "d", "e", "f", "0", "1"}, false, }, + + // list vars + { + `${concat("${var.list}", "${var.list}")}`, + []interface{}{"a", "b", "a", "b"}, + false, + }, + // lists of lists + { + `${concat("${var.lists}", "${var.lists}")}`, + []interface{}{[]interface{}{"c", "d"}, []interface{}{"c", "d"}}, + false, + }, + + // lists of maps + { + `${concat("${var.maps}", "${var.maps}")}`, + []interface{}{map[string]interface{}{"key1": "a", "key2": "b"}, map[string]interface{}{"key1": "a", "key2": "b"}}, + false, + }, + + // mismatched types + { + `${concat("${var.lists}", "${var.maps}")}`, + nil, + true, + }, + }, + Vars: map[string]ast.Variable{ + "var.list": { + Type: ast.TypeList, + Value: []ast.Variable{ + { + Type: ast.TypeString, + Value: "a", + }, + { + Type: ast.TypeString, + Value: "b", + }, + }, + }, + "var.lists": { + Type: ast.TypeList, + Value: []ast.Variable{ + { + Type: ast.TypeList, + Value: []ast.Variable{ + { + Type: ast.TypeString, + Value: "c", + }, + { + Type: ast.TypeString, + Value: "d", + }, + }, + }, + }, + }, + "var.maps": { + Type: ast.TypeList, + Value: []ast.Variable{ + { + Type: ast.TypeMap, + Value: map[string]ast.Variable{ + "key1": { + Type: ast.TypeString, + Value: "a", + }, + "key2": { + Type: ast.TypeString, + Value: "b", + }, + }, + }, + }, + }, }, }) } -// TODO: This test is split out and calls a private function -// because there's no good way to get a list of maps into the unit -// tests due to GH-7142 - once lists of maps can be expressed properly as -// literals this unit test can be wrapped back into the suite above. -// -// Reproduces crash reported in GH-7030. -func TestInterpolationFuncConcatListOfMaps(t *testing.T) { - listOfMapsOne := ast.Variable{ - Type: ast.TypeList, - Value: []ast.Variable{ - { - Type: ast.TypeMap, - Value: map[string]interface{}{"one": "foo"}, - }, - }, - } - listOfMapsTwo := ast.Variable{ - Type: ast.TypeList, - Value: []ast.Variable{ - { - Type: ast.TypeMap, - Value: map[string]interface{}{"two": "bar"}, - }, - }, - } - args := []interface{}{listOfMapsOne.Value, listOfMapsTwo.Value} - - _, err := interpolationFuncConcat().Callback(args) - - if err == nil || !strings.Contains(err.Error(), "concat() does not support lists of type map") { - t.Fatalf("Expected err, got: %v", err) - } -} - func TestInterpolateFuncDistinct(t *testing.T) { testFunction(t, testFunctionConfig{ Cases: []testFunctionCase{ From af26350f49b392b5adaf0132646b1f599c816ba6 Mon Sep 17 00:00:00 2001 From: Joe Topjian Date: Wed, 20 Jul 2016 08:24:32 -0600 Subject: [PATCH 0332/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 789fd97dd..4202cebd7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -224,6 +224,7 @@ BUG FIXES: * provider/openstack: Rebuild Instances On Network Changes [GH-6844] * provider/openstack: Firewall rules are applied in the correct order [GH-7194] * provider/openstack: Fix Security Group EOF Error when Adding / Removing Multiple Groups [GH-7468] + * provider/openstack: Fixing boot volumes interfering with block storage volumes list [GH-7649] * provider/vsphere: `gateway` and `ipv6_gateway` are now read from `vsphere_virtual_machine` resources [GH-6522] * provider/vsphere: `ipv*_gateway` parameters won't force a new `vsphere_virtual_machine` [GH-6635] * provider/vsphere: adding a `vsphere_virtual_machine` migration [GH-7023] From f529cae42abae9a0da05590bef234ec19ab875cf Mon Sep 17 00:00:00 2001 From: Joe Topjian Date: Wed, 20 Jul 2016 09:37:17 -0600 Subject: [PATCH 0333/1238] provider/openstack: Documenting resources that support importing (#7716) Also removing some unneeded region arguments in the examples. --- .../openstack/r/blockstorage_volume_v1.html.markdown | 8 ++++++++ .../openstack/r/blockstorage_volume_v2.html.markdown | 8 ++++++++ .../openstack/r/compute_floatingip_v2.html.markdown | 9 ++++++++- .../openstack/r/compute_keypair_v2.html.markdown | 8 ++++++++ .../openstack/r/compute_secgroup_v2.html.markdown | 8 ++++++++ .../openstack/r/compute_servergroup_v2.html.markdown | 8 ++++++++ .../providers/openstack/r/fw_firewall_v1.html.markdown | 10 ++++++++-- .../providers/openstack/r/fw_policy_v1.html.markdown | 9 ++++++++- .../providers/openstack/r/fw_rule_v1.html.markdown | 8 ++++++++ .../providers/openstack/r/lb_member_v1.html.markdown | 8 ++++++++ .../providers/openstack/r/lb_monitor_v1.html.markdown | 8 ++++++++ .../providers/openstack/r/lb_pool_v1.html.markdown | 8 ++++++++ .../docs/providers/openstack/r/lb_vip_v1.html.markdown | 8 ++++++++ .../openstack/r/networking_floatingip_v2.html.markdown | 9 ++++++++- .../openstack/r/networking_network_v2.html.markdown | 8 ++++++++ .../openstack/r/networking_port_v2.html.markdown | 8 ++++++++ .../r/networking_router_interface_v2.html.markdown | 2 -- .../openstack/r/networking_router_v2.html.markdown | 1 - .../r/networking_secgroup_rule_v2.html.markdown | 8 ++++++++ .../openstack/r/networking_secgroup_v2.html.markdown | 8 ++++++++ .../openstack/r/networking_subnet_v2.html.markdown | 8 ++++++++ 21 files changed, 152 insertions(+), 8 deletions(-) diff --git a/website/source/docs/providers/openstack/r/blockstorage_volume_v1.html.markdown b/website/source/docs/providers/openstack/r/blockstorage_volume_v1.html.markdown index 70ab709a0..bc6e1cd9a 100644 --- a/website/source/docs/providers/openstack/r/blockstorage_volume_v1.html.markdown +++ b/website/source/docs/providers/openstack/r/blockstorage_volume_v1.html.markdown @@ -73,3 +73,11 @@ The following attributes are exported: * `attachment` - If a volume is attached to an instance, this attribute will display the Attachment ID, Instance ID, and the Device as the Instance sees it. + +## Import + +Volumes can be imported using the `id`, e.g. + +``` +terraform import openstack_blockstorage_volume_v1.volume_1 ea257959-eeb1-4c10-8d33-26f0409a755d +``` diff --git a/website/source/docs/providers/openstack/r/blockstorage_volume_v2.html.markdown b/website/source/docs/providers/openstack/r/blockstorage_volume_v2.html.markdown index edbe592f7..dee3a4f0a 100644 --- a/website/source/docs/providers/openstack/r/blockstorage_volume_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/blockstorage_volume_v2.html.markdown @@ -78,3 +78,11 @@ The following attributes are exported: * `attachment` - If a volume is attached to an instance, this attribute will display the Attachment ID, Instance ID, and the Device as the Instance sees it. + +## Import + +Volumes can be imported using the `id`, e.g. + +``` +terraform import openstack_blockstorage_volume_v2.volume_1 ea257959-eeb1-4c10-8d33-26f0409a755d +``` diff --git a/website/source/docs/providers/openstack/r/compute_floatingip_v2.html.markdown b/website/source/docs/providers/openstack/r/compute_floatingip_v2.html.markdown index ce7638800..fc4359b22 100644 --- a/website/source/docs/providers/openstack/r/compute_floatingip_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/compute_floatingip_v2.html.markdown @@ -17,7 +17,6 @@ but only networking floating IPs can be used with load balancers. ``` resource "openstack_compute_floatingip_v2" "floatip_1" { - region = "" pool = "public" } ``` @@ -44,3 +43,11 @@ The following attributes are exported: * `address` - The actual floating IP address itself. * `fixed_ip` - The fixed IP address corresponding to the floating IP. * `instance_id` - UUID of the compute instance associated with the floating IP. + +## Import + +Floating IPs can be imported using the `id`, e.g. + +``` +terraform import openstack_compute_floatingip_v2.floatip_1 89c60255-9bd6-460c-822a-e2b959ede9d2 +``` diff --git a/website/source/docs/providers/openstack/r/compute_keypair_v2.html.markdown b/website/source/docs/providers/openstack/r/compute_keypair_v2.html.markdown index 0c3beae27..991fdfb8f 100644 --- a/website/source/docs/providers/openstack/r/compute_keypair_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/compute_keypair_v2.html.markdown @@ -41,3 +41,11 @@ The following attributes are exported: * `region` - See Argument Reference above. * `name` - See Argument Reference above. * `public_key` - See Argument Reference above. + +## Import + +Keypairs can be imported using the `name`, e.g. + +``` +terraform import openstack_compute_keypair_v2.my-keypair test-keypair +``` diff --git a/website/source/docs/providers/openstack/r/compute_secgroup_v2.html.markdown b/website/source/docs/providers/openstack/r/compute_secgroup_v2.html.markdown index 2005c9aea..a9c1c8385 100644 --- a/website/source/docs/providers/openstack/r/compute_secgroup_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/compute_secgroup_v2.html.markdown @@ -114,3 +114,11 @@ resource "openstack_compute_instance_v2" "test-server" { security_groups = ["${openstack_compute_secgroup_v2.secgroup_1.name}"] } ``` + +## Import + +Security Groups can be imported using the `id`, e.g. + +``` +terraform import openstack_compute_secgroup_v2.my_secgroup 1bc30ee9-9d5b-4c30-bdd5-7f1e663f5edf +``` diff --git a/website/source/docs/providers/openstack/r/compute_servergroup_v2.html.markdown b/website/source/docs/providers/openstack/r/compute_servergroup_v2.html.markdown index 0e64db81f..f3af0a5f6 100644 --- a/website/source/docs/providers/openstack/r/compute_servergroup_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/compute_servergroup_v2.html.markdown @@ -51,3 +51,11 @@ The following attributes are exported: * `name` - See Argument Reference above. * `policies` - See Argument Reference above. * `members` - The instances that are part of this server group. + +## Import + +Server Groups can be imported using the `id`, e.g. + +``` +terraform import openstack_compute_servergroup_v2.test-sg 1bc30ee9-9d5b-4c30-bdd5-7f1e663f5edf +``` diff --git a/website/source/docs/providers/openstack/r/fw_firewall_v1.html.markdown b/website/source/docs/providers/openstack/r/fw_firewall_v1.html.markdown index 07a1aa683..d3a1e9739 100644 --- a/website/source/docs/providers/openstack/r/fw_firewall_v1.html.markdown +++ b/website/source/docs/providers/openstack/r/fw_firewall_v1.html.markdown @@ -32,14 +32,12 @@ resource "openstack_fw_rule_v1" "rule_2" { } resource "openstack_fw_policy_v1" "policy_1" { - region = "" name = "my-policy" rules = ["${openstack_fw_rule_v1.rule_1.id}", "${openstack_fw_rule_v1.rule_2.id}"] } resource "openstack_fw_firewall_v1" "firewall_1" { - region = "" name = "my-firewall" policy_id = "${openstack_fw_policy_v1.policy_1.id}" } @@ -81,3 +79,11 @@ The following attributes are exported: * `description` - See Argument Reference above. * `admin_state_up` - See Argument Reference above. * `tenant_id` - See Argument Reference above. + +## Import + +Firewalls can be imported using the `id`, e.g. + +``` +terraform import openstack_fw_firewall_v1.firewall_1 c9e39fb2-ce20-46c8-a964-25f3898c7a97 +``` diff --git a/website/source/docs/providers/openstack/r/fw_policy_v1.html.markdown b/website/source/docs/providers/openstack/r/fw_policy_v1.html.markdown index 4c6b52e26..e8be6f4c2 100644 --- a/website/source/docs/providers/openstack/r/fw_policy_v1.html.markdown +++ b/website/source/docs/providers/openstack/r/fw_policy_v1.html.markdown @@ -32,7 +32,6 @@ resource "openstack_fw_rule_v1" "rule_2" { } resource "openstack_fw_policy_v1" "policy_1" { - region = "" name = "my-policy" rules = ["${openstack_fw_rule_v1.rule_1.id}", "${openstack_fw_rule_v1.rule_2.id}"] @@ -78,3 +77,11 @@ The following attributes are exported: * `description` - See Argument Reference above. * `audited` - See Argument Reference above. * `shared` - See Argument Reference above. + +## Import + +Firewall Policies can be imported using the `id`, e.g. + +``` +terraform import openstack_fw_policy_v1.policy_1 07f422e6-c596-474b-8b94-fe2c12506ce0 +``` diff --git a/website/source/docs/providers/openstack/r/fw_rule_v1.html.markdown b/website/source/docs/providers/openstack/r/fw_rule_v1.html.markdown index 22156d763..a6c55f96b 100644 --- a/website/source/docs/providers/openstack/r/fw_rule_v1.html.markdown +++ b/website/source/docs/providers/openstack/r/fw_rule_v1.html.markdown @@ -88,3 +88,11 @@ The following attributes are exported: * `destination_port` - See Argument Reference above. * `enabled` - See Argument Reference above. * `tenant_id` - See Argument Reference above. + +## Import + +Firewall Rules can be imported using the `id`, e.g. + +``` +terraform import openstack_fw_rule_v1.rule_1 8dbc0c28-e49c-463f-b712-5c5d1bbac327 +``` diff --git a/website/source/docs/providers/openstack/r/lb_member_v1.html.markdown b/website/source/docs/providers/openstack/r/lb_member_v1.html.markdown index e4d1f303c..d11366d6a 100644 --- a/website/source/docs/providers/openstack/r/lb_member_v1.html.markdown +++ b/website/source/docs/providers/openstack/r/lb_member_v1.html.markdown @@ -56,3 +56,11 @@ The following attributes are exported: * `admin_state_up` - See Argument Reference above. * `weight` - The load balancing weight of the member. This is currently unable to be set through Terraform. + +## Import + +Load Balancer Members can be imported using the `id`, e.g. + +``` +terraform import openstack_lb_member_v1.member_1 a7498676-4fe4-4243-a864-2eaaf18c73df +``` diff --git a/website/source/docs/providers/openstack/r/lb_monitor_v1.html.markdown b/website/source/docs/providers/openstack/r/lb_monitor_v1.html.markdown index cbf6b2b87..e210ad3d9 100644 --- a/website/source/docs/providers/openstack/r/lb_monitor_v1.html.markdown +++ b/website/source/docs/providers/openstack/r/lb_monitor_v1.html.markdown @@ -80,3 +80,11 @@ The following attributes are exported: * `expected_codes` - See Argument Reference above. * `admin_state_up` - See Argument Reference above. * `tenant_id` - See Argument Reference above. + +## Import + +Load Balancer Members can be imported using the `id`, e.g. + +``` +terraform import openstack_lb_monitor_v1.monitor_1 119d7530-72e9-449a-aa97-124a5ef1992c +``` diff --git a/website/source/docs/providers/openstack/r/lb_pool_v1.html.markdown b/website/source/docs/providers/openstack/r/lb_pool_v1.html.markdown index c1439421d..cab6e9d5b 100644 --- a/website/source/docs/providers/openstack/r/lb_pool_v1.html.markdown +++ b/website/source/docs/providers/openstack/r/lb_pool_v1.html.markdown @@ -178,3 +178,11 @@ The following attributes are exported: ## Notes The `member` block is deprecated in favor of the `openstack_lb_member_v1` resource. + +## Import + +Load Balancer Pools can be imported using the `id`, e.g. + +``` +terraform import openstack_lb_pool_v1.pool_1 b255e6ba-02ad-43e6-8951-3428ca26b713 +``` diff --git a/website/source/docs/providers/openstack/r/lb_vip_v1.html.markdown b/website/source/docs/providers/openstack/r/lb_vip_v1.html.markdown index 178806be7..8e1d64561 100644 --- a/website/source/docs/providers/openstack/r/lb_vip_v1.html.markdown +++ b/website/source/docs/providers/openstack/r/lb_vip_v1.html.markdown @@ -98,3 +98,11 @@ The following attributes are exported: * `floating_ip` - See Argument Reference above. * `admin_state_up` - See Argument Reference above. * `port_id` - Port UUID for this VIP at associated floating IP (if any). + +## Import + +Load Balancer VIPs can be imported using the `id`, e.g. + +``` +terraform import openstack_lb_vip_v1.vip_1 50e16b26-89c1-475e-a492-76167182511e +``` diff --git a/website/source/docs/providers/openstack/r/networking_floatingip_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_floatingip_v2.html.markdown index 2813c815e..6106f2d19 100644 --- a/website/source/docs/providers/openstack/r/networking_floatingip_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/networking_floatingip_v2.html.markdown @@ -17,7 +17,6 @@ but only compute floating IPs can be used with compute instances. ``` resource "openstack_networking_floatingip_v2" "floatip_1" { - region = "" pool = "public" } ``` @@ -56,3 +55,11 @@ The following attributes are exported: * `port_id` - ID of associated port. * `tenant_id` - the ID of the tenant in which to create the floating IP. * `fixed_ip` - The fixed IP which the floating IP maps to. + +## Import + +Floating IPs can be imported using the `id`, e.g. + +``` +terraform import openstack_networking_floatingip_v2.floatip_1 2c7f39f3-702b-48d1-940c-b50384177ee1 +``` diff --git a/website/source/docs/providers/openstack/r/networking_network_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_network_v2.html.markdown index 27c3b4868..5ce7dcec5 100644 --- a/website/source/docs/providers/openstack/r/networking_network_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/networking_network_v2.html.markdown @@ -91,3 +91,11 @@ The following attributes are exported: * `shared` - See Argument Reference above. * `tenant_id` - See Argument Reference above. * `admin_state_up` - See Argument Reference above. + +## Import + +Networks can be imported using the `id`, e.g. + +``` +terraform import openstack_networking_network_v2.network_1 d90ce693-5ccf-4136-a0ed-152ce412b6b9 +``` diff --git a/website/source/docs/providers/openstack/r/networking_port_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_port_v2.html.markdown index 6215951b0..4b8134b38 100644 --- a/website/source/docs/providers/openstack/r/networking_port_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/networking_port_v2.html.markdown @@ -86,6 +86,14 @@ The following attributes are exported: * `device_id` - See Argument Reference above. * `fixed_ip/ip_address` - See Argument Reference above. +## Import + +Ports can be imported using the `id`, e.g. + +``` +terraform import openstack_networking_port_v2.port_1 eae26a3e-1c33-4cc1-9c31-0cd729c438a1 +``` + ## Notes ### Ports and Instances diff --git a/website/source/docs/providers/openstack/r/networking_router_interface_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_router_interface_v2.html.markdown index 13046d64d..80c698ae2 100644 --- a/website/source/docs/providers/openstack/r/networking_router_interface_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/networking_router_interface_v2.html.markdown @@ -25,13 +25,11 @@ resource "openstack_networking_subnet_v2" "subnet_1" { } resource "openstack_networking_router_v2" "router_1" { - region = "" name = "my_router" external_gateway = "f67f0d72-0ddf-11e4-9d95-e1f29f417e2f" } resource "openstack_networking_router_interface_v2" "router_interface_1" { - region = "" router_id = "${openstack_networking_router_v2.router_1.id}" subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" } diff --git a/website/source/docs/providers/openstack/r/networking_router_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_router_v2.html.markdown index 5540adb62..7c4e36a7e 100644 --- a/website/source/docs/providers/openstack/r/networking_router_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/networking_router_v2.html.markdown @@ -14,7 +14,6 @@ Manages a V2 router resource within OpenStack. ``` resource "openstack_networking_router_v2" "router_1" { - region = "" name = "my_router" external_gateway = "f67f0d72-0ddf-11e4-9d95-e1f29f417e2f" } diff --git a/website/source/docs/providers/openstack/r/networking_secgroup_rule_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_secgroup_rule_v2.html.markdown index e80ac6cf1..b0c4e7b3f 100644 --- a/website/source/docs/providers/openstack/r/networking_secgroup_rule_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/networking_secgroup_rule_v2.html.markdown @@ -87,3 +87,11 @@ The following attributes are exported: * `remote_group_id` - See Argument Reference above. * `security_group_id` - See Argument Reference above. * `tenant_id` - See Argument Reference above. + +## Import + +Security Group Rules can be imported using the `id`, e.g. + +``` +terraform import openstack_networking_secgroup_rule_v2.secgroup_rule_1 aeb68ee3-6e9d-4256-955c-9584a6212745 +``` diff --git a/website/source/docs/providers/openstack/r/networking_secgroup_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_secgroup_v2.html.markdown index ca49e4b66..b964c2934 100644 --- a/website/source/docs/providers/openstack/r/networking_secgroup_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/networking_secgroup_v2.html.markdown @@ -48,3 +48,11 @@ The following attributes are exported: * `name` - See Argument Reference above. * `description` - See Argument Reference above. * `tenant_id` - See Argument Reference above. + +## Import + +Security Groups can be imported using the `id`, e.g. + +``` +terraform import openstack_networking_secgroup_v2.secgroup_1 38809219-5e8a-4852-9139-6f461c90e8bc +``` diff --git a/website/source/docs/providers/openstack/r/networking_subnet_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_subnet_v2.html.markdown index d32d3d6cd..a990d2e96 100644 --- a/website/source/docs/providers/openstack/r/networking_subnet_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/networking_subnet_v2.html.markdown @@ -100,3 +100,11 @@ The following attributes are exported: * `enable_dhcp` - See Argument Reference above. * `dns_nameservers` - See Argument Reference above. * `host_routes` - See Argument Reference above. + +## Import + +Subnets can be imported using the `id`, e.g. + +``` +terraform import openstack_networking_subnet_v2.subnet_1 da4faf16-5546-41e4-8330-4d0002b74048 +``` From 0edf83008c11e5aca44d3ed22e4280dc7fad3908 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 20 Jul 2016 17:00:57 +0100 Subject: [PATCH 0334/1238] provider/aws: Support `task_role_arn` on `aws_ecs_task_definition` (#7653) Fixes #7633 --- .../aws/resource_aws_ecs_task_definition.go | 11 +++ .../resource_aws_ecs_task_definition_test.go | 79 +++++++++++++++++++ 2 files changed, 90 insertions(+) diff --git a/builtin/providers/aws/resource_aws_ecs_task_definition.go b/builtin/providers/aws/resource_aws_ecs_task_definition.go index b9b40d6bf..117bbcdf2 100644 --- a/builtin/providers/aws/resource_aws_ecs_task_definition.go +++ b/builtin/providers/aws/resource_aws_ecs_task_definition.go @@ -46,6 +46,12 @@ func resourceAwsEcsTaskDefinition() *schema.Resource { }, }, + "task_role_arn": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "volume": &schema.Schema{ Type: schema.TypeSet, Optional: true, @@ -83,6 +89,10 @@ func resourceAwsEcsTaskDefinitionCreate(d *schema.ResourceData, meta interface{} Family: aws.String(d.Get("family").(string)), } + if v, ok := d.GetOk("task_role_arn"); ok { + input.TaskRoleArn = aws.String(v.(string)) + } + if v, ok := d.GetOk("volume"); ok { volumes, err := expandEcsVolumes(v.(*schema.Set).List()) if err != nil { @@ -127,6 +137,7 @@ func resourceAwsEcsTaskDefinitionRead(d *schema.ResourceData, meta interface{}) d.Set("family", *taskDefinition.Family) d.Set("revision", *taskDefinition.Revision) d.Set("container_definitions", taskDefinition.ContainerDefinitions) + d.Set("task_role_arn", taskDefinition.TaskRoleArn) d.Set("volumes", flattenEcsVolumes(taskDefinition.Volumes)) return nil diff --git a/builtin/providers/aws/resource_aws_ecs_task_definition_test.go b/builtin/providers/aws/resource_aws_ecs_task_definition_test.go index 9972bba7b..d075a429a 100644 --- a/builtin/providers/aws/resource_aws_ecs_task_definition_test.go +++ b/builtin/providers/aws/resource_aws_ecs_task_definition_test.go @@ -74,6 +74,22 @@ func TestAccAWSEcsTaskDefinition_withEcsService(t *testing.T) { }) } +func TestAccAWSEcsTaskDefinition_withTaskRoleArn(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSEcsTaskDefinitionDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSEcsTaskDefinitionWithTaskRoleArn, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSEcsTaskDefinitionExists("aws_ecs_task_definition.sleep"), + ), + }, + }, + }) +} + func testAccCheckAWSEcsTaskDefinitionDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).ecsconn @@ -182,6 +198,69 @@ TASK_DEFINITION } ` +var testAccAWSEcsTaskDefinitionWithTaskRoleArn = ` +resource "aws_iam_role" "role_test" { + name = "tf_old_name" + path = "/test/" + assume_role_policy = < Date: Wed, 20 Jul 2016 17:01:35 +0100 Subject: [PATCH 0335/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4202cebd7..c34f5ecf7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -111,6 +111,7 @@ IMPROVEMENTS: * provider/aws: Retry creation of IAM role depending on new IAM user [GH-7324] * provider/aws: Allow `port` on `aws_db_instance` to be updated [GH-7441] * provider/aws: Allow VPC Classic Linking in Autoscaling Launch Configs [GH-7470] + * provider/aws: Support `task_role_arn` on `aws_ecs_task_definition [GH-7653] * provider/azurerm: Add support for EnableIPForwarding to `azurerm_network_interface` [GH-6807] * provider/azurerm: Add support for exporting the `azurerm_storage_account` access keys [GH-6742] * provider/azurerm: The Azure SDK now exposes better error messages [GH-6976] From 87a5ce8045897a3fa2a15db779c1dc048e05aef7 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Wed, 20 Jul 2016 13:45:04 -0400 Subject: [PATCH 0336/1238] Add tests for maps with dots This adds some unit tests for config maps with dots in the key values. We check for maps with keys which have overlapping names. There are however still issues with nested maps which create overlapping flattened names, as well as nested lists with dots in the key. --- terraform/resource_test.go | 94 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) diff --git a/terraform/resource_test.go b/terraform/resource_test.go index 38e066f5f..dba1636a2 100644 --- a/terraform/resource_test.go +++ b/terraform/resource_test.go @@ -88,6 +88,100 @@ func TestResourceConfigGet(t *testing.T) { Key: "foo.5", Value: nil, }, + + // get from map + { + Config: map[string]interface{}{ + "mapname": []map[string]interface{}{ + map[string]interface{}{"key": 1}, + }, + }, + Key: "mapname.0.key", + Value: 1, + }, + + // get from map with dot in key + { + Config: map[string]interface{}{ + "mapname": []map[string]interface{}{ + map[string]interface{}{"key.name": 1}, + }, + }, + Key: "mapname.0.key.name", + Value: 1, + }, + + // get from map with overlapping key names + { + Config: map[string]interface{}{ + "mapname": []map[string]interface{}{ + map[string]interface{}{ + "key.name": 1, + "key.name.2": 2, + }, + }, + }, + Key: "mapname.0.key.name.2", + Value: 2, + }, + { + Config: map[string]interface{}{ + "mapname": []map[string]interface{}{ + map[string]interface{}{ + "key.name": 1, + "key.name.foo": 2, + }, + }, + }, + Key: "mapname.0.key.name", + Value: 1, + }, + { + Config: map[string]interface{}{ + "mapname": []map[string]interface{}{ + map[string]interface{}{ + "listkey": []map[string]interface{}{ + {"key": 3}, + }, + }, + }, + }, + Key: "mapname.0.listkey.0.key", + Value: 3, + }, + // FIXME: this is ambiguous, and matches the nested map + // leaving here to catch this behaviour if it changes. + { + Config: map[string]interface{}{ + "mapname": []map[string]interface{}{ + map[string]interface{}{ + "key.name": 1, + "key.name.0": 2, + "key": map[string]interface{}{"name": 3}, + }, + }, + }, + Key: "mapname.0.key.name", + Value: 3, + }, + /* + // TODO: can't access this nested list at all. + // FIXME: key with name matching substring of nested list can panic + { + Config: map[string]interface{}{ + "mapname": []map[string]interface{}{ + map[string]interface{}{ + "key.name": []map[string]interface{}{ + {"subkey": 1}, + }, + "key": 3, + }, + }, + }, + Key: "mapname.0.key.name.0.subkey", + Value: 3, + }, + */ } for i, tc := range cases { From ee9114bcc482e9876f033c24f768a643a564c5b1 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 20 Jul 2016 19:36:45 +0100 Subject: [PATCH 0337/1238] provider/aws: Fix bug with Updating `aws_autoscaling_group` (#7698) `enabled_metrics` Fixes #7693 The metrics_granularity parameter was not being passed to the `EnableMetricsCollection` when we were calling it from the Update func. this was causing the API call to silently fail and not update the metrics for collection - unfortunately the enabled_metrics were still being added to the state :( By passing the granularity, we now get the correct metrics for collection ``` % make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSAutoScalingGroup_' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSAutoScalingGroup_ -timeout 120m === RUN TestAccAWSAutoScalingGroup_importBasic --- PASS: TestAccAWSAutoScalingGroup_importBasic (166.86s) === RUN TestAccAWSAutoScalingGroup_basic --- PASS: TestAccAWSAutoScalingGroup_basic (240.23s) === RUN TestAccAWSAutoScalingGroup_autoGeneratedName --- PASS: TestAccAWSAutoScalingGroup_autoGeneratedName (50.29s) === RUN TestAccAWSAutoScalingGroup_terminationPolicies --- PASS: TestAccAWSAutoScalingGroup_terminationPolicies (79.93s) === RUN TestAccAWSAutoScalingGroup_tags --- PASS: TestAccAWSAutoScalingGroup_tags (270.79s) === RUN TestAccAWSAutoScalingGroup_VpcUpdates --- PASS: TestAccAWSAutoScalingGroup_VpcUpdates (77.76s) === RUN TestAccAWSAutoScalingGroup_WithLoadBalancer --- PASS: TestAccAWSAutoScalingGroup_WithLoadBalancer (400.67s) === RUN TestAccAWSAutoScalingGroup_withPlacementGroup --- PASS: TestAccAWSAutoScalingGroup_withPlacementGroup (134.39s) === RUN TestAccAWSAutoScalingGroup_enablingMetrics --- PASS: TestAccAWSAutoScalingGroup_enablingMetrics (305.32s) === RUN TestAccAWSAutoScalingGroup_withMetrics --- PASS: TestAccAWSAutoScalingGroup_withMetrics (48.56s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 1774.819s ``` --- .../aws/resource_aws_autoscaling_group.go | 1 + .../resource_aws_autoscaling_group_test.go | 30 +++++++++++++++++++ 2 files changed, 31 insertions(+) diff --git a/builtin/providers/aws/resource_aws_autoscaling_group.go b/builtin/providers/aws/resource_aws_autoscaling_group.go index 9d57d5395..98c77a81c 100644 --- a/builtin/providers/aws/resource_aws_autoscaling_group.go +++ b/builtin/providers/aws/resource_aws_autoscaling_group.go @@ -612,6 +612,7 @@ func updateASGMetricsCollection(d *schema.ResourceData, conn *autoscaling.AutoSc props := &autoscaling.EnableMetricsCollectionInput{ AutoScalingGroupName: aws.String(d.Id()), Metrics: expandStringList(enabledMetrics.List()), + Granularity: aws.String(d.Get("metrics_granularity").(string)), } _, err := conn.EnableMetricsCollection(props) diff --git a/builtin/providers/aws/resource_aws_autoscaling_group_test.go b/builtin/providers/aws/resource_aws_autoscaling_group_test.go index 9cc714a07..bd737872c 100644 --- a/builtin/providers/aws/resource_aws_autoscaling_group_test.go +++ b/builtin/providers/aws/resource_aws_autoscaling_group_test.go @@ -259,6 +259,36 @@ func TestAccAWSAutoScalingGroup_withPlacementGroup(t *testing.T) { }) } +func TestAccAWSAutoScalingGroup_enablingMetrics(t *testing.T) { + var group autoscaling.Group + randName := fmt.Sprintf("terraform-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAutoScalingGroupDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSAutoScalingGroupConfig(randName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), + resource.TestCheckResourceAttr( + "aws_autoscaling_group.bar", "enabled_metrics.#", ""), + ), + }, + + resource.TestStep{ + Config: testAccAWSAutoscalingMetricsCollectionConfig_updatingMetricsCollected, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), + resource.TestCheckResourceAttr( + "aws_autoscaling_group.bar", "enabled_metrics.#", "5"), + ), + }, + }, + }) +} + func TestAccAWSAutoScalingGroup_withMetrics(t *testing.T) { var group autoscaling.Group From 2de5d31819de66b63b6f9a06587b1e9e5edf9970 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 20 Jul 2016 19:37:19 +0100 Subject: [PATCH 0338/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c34f5ecf7..383927abd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -200,6 +200,7 @@ BUG FIXES: * provider/aws: Fix bug for recurring plans on ec2-classic and vpc in beanstalk [GH-6491] * provider/aws: Bump rds_cluster timeout to 15 mins [GH-7604] * provider/aws: Fix ICMP fields in `aws_network_acl_rule` to allow ICMP code 0 (echo reply) to be configured [GH-7669] + * provider/aws: Fix bug with Updating `aws_autoscaling_group` `enabled_metrics` [GH-7698] * provider/azurerm: Fixes terraform crash when using SSH keys with `azurerm_virtual_machine` [GH-6766] * provider/azurerm: Fix a bug causing 'diffs do not match' on `azurerm_network_interface` resources [GH-6790] * provider/azurerm: Normalizes `availability_set_id` casing to avoid spurious diffs in `azurerm_virtual_machine` [GH-6768] From 5cd1b6624a9a1f64c563ef5adc9300ac866c03e2 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 20 Jul 2016 19:37:26 +0100 Subject: [PATCH 0339/1238] provider/aws: Support Tags on `aws_rds_cluster` (#7695) Fixes #7692 ``` % make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSRDSCluster_' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSRDSCluster_ -timeout 120m === RUN TestAccAWSRDSCluster_basic --- PASS: TestAccAWSRDSCluster_basic (160.77s) === RUN TestAccAWSRDSCluster_updateTags --- PASS: TestAccAWSRDSCluster_updateTags (329.20s) === RUN TestAccAWSRDSCluster_encrypted --- PASS: TestAccAWSRDSCluster_encrypted (227.29s) === RUN TestAccAWSRDSCluster_backupsUpdate --- PASS: TestAccAWSRDSCluster_backupsUpdate (196.92s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 914.199s ``` --- .../providers/aws/resource_aws_rds_cluster.go | 49 +++++++++++++++++-- .../aws/resource_aws_rds_cluster_test.go | 48 ++++++++++++++++++ 2 files changed, 94 insertions(+), 3 deletions(-) diff --git a/builtin/providers/aws/resource_aws_rds_cluster.go b/builtin/providers/aws/resource_aws_rds_cluster.go index 481ef11dd..6cb76a7bb 100644 --- a/builtin/providers/aws/resource_aws_rds_cluster.go +++ b/builtin/providers/aws/resource_aws_rds_cluster.go @@ -192,18 +192,22 @@ func resourceAwsRDSCluster() *schema.Resource { return }, }, + + "tags": tagsSchema(), }, } } func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).rdsconn + tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{})) if _, ok := d.GetOk("snapshot_identifier"); ok { opts := rds.RestoreDBClusterFromSnapshotInput{ DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)), SnapshotIdentifier: aws.String(d.Get("snapshot_identifier").(string)), Engine: aws.String("aurora"), + Tags: tags, } if attr := d.Get("availability_zones").(*schema.Set); attr.Len() > 0 { @@ -282,6 +286,7 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error MasterUserPassword: aws.String(d.Get("master_password").(string)), MasterUsername: aws.String(d.Get("master_username").(string)), StorageEncrypted: aws.Bool(d.Get("storage_encrypted").(bool)), + Tags: tags, } if v := d.Get("database_name"); v.(string) != "" { @@ -430,11 +435,22 @@ func resourceAwsRDSClusterRead(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("[DEBUG] Error saving RDS Cluster Members to state for RDS Cluster (%s): %s", d.Id(), err) } + // Fetch and save tags + arn, err := buildRDSClusterARN(d.Id(), meta.(*AWSClient).accountid, meta.(*AWSClient).region) + if err != nil { + log.Printf("[DEBUG] Error building ARN for RDS Cluster (%s), not setting Tags", *dbc.DBClusterIdentifier) + } else { + if err := saveTagsRDS(conn, d, arn); err != nil { + log.Printf("[WARN] Failed to save tags for RDS Cluster (%s): %s", *dbc.DBClusterIdentifier, err) + } + } + return nil } func resourceAwsRDSClusterUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).rdsconn + requestUpdate := false req := &rds.ModifyDBClusterInput{ ApplyImmediately: aws.Bool(d.Get("apply_immediately").(bool)), @@ -443,6 +459,7 @@ func resourceAwsRDSClusterUpdate(d *schema.ResourceData, meta interface{}) error if d.HasChange("master_password") { req.MasterUserPassword = aws.String(d.Get("master_password").(string)) + requestUpdate = true } if d.HasChange("vpc_security_group_ids") { @@ -451,33 +468,49 @@ func resourceAwsRDSClusterUpdate(d *schema.ResourceData, meta interface{}) error } else { req.VpcSecurityGroupIds = []*string{} } + requestUpdate = true } if d.HasChange("preferred_backup_window") { req.PreferredBackupWindow = aws.String(d.Get("preferred_backup_window").(string)) + requestUpdate = true } if d.HasChange("preferred_maintenance_window") { req.PreferredMaintenanceWindow = aws.String(d.Get("preferred_maintenance_window").(string)) + requestUpdate = true } if d.HasChange("backup_retention_period") { req.BackupRetentionPeriod = aws.Int64(int64(d.Get("backup_retention_period").(int))) + requestUpdate = true } if d.HasChange("parameter_group_name") { d.SetPartial("parameter_group_name") req.DBClusterParameterGroupName = aws.String(d.Get("parameter_group_name").(string)) + requestUpdate = true } if d.HasChange("db_cluster_parameter_group_name") { d.SetPartial("db_cluster_parameter_group_name") req.DBClusterParameterGroupName = aws.String(d.Get("db_cluster_parameter_group_name").(string)) + requestUpdate = true } - _, err := conn.ModifyDBCluster(req) - if err != nil { - return fmt.Errorf("[WARN] Error modifying RDS Cluster (%s): %s", d.Id(), err) + if requestUpdate { + _, err := conn.ModifyDBCluster(req) + if err != nil { + return fmt.Errorf("[WARN] Error modifying RDS Cluster (%s): %s", d.Id(), err) + } + } + + if arn, err := buildRDSClusterARN(d.Id(), meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil { + if err := setTagsRDS(conn, d, arn); err != nil { + return err + } else { + d.SetPartial("tags") + } } return resourceAwsRDSClusterRead(d, meta) @@ -560,3 +593,13 @@ func resourceAwsRDSClusterStateRefreshFunc( return dbc, *dbc.Status, nil } } + +func buildRDSClusterARN(identifier, accountid, region string) (string, error) { + if accountid == "" { + return "", fmt.Errorf("Unable to construct RDS Cluster ARN because of missing AWS Account ID") + } + + arn := fmt.Sprintf("arn:aws:rds:%s:%s:cluster:%s", region, accountid, identifier) + return arn, nil + +} diff --git a/builtin/providers/aws/resource_aws_rds_cluster_test.go b/builtin/providers/aws/resource_aws_rds_cluster_test.go index ec59f44fe..b3eb1c790 100644 --- a/builtin/providers/aws/resource_aws_rds_cluster_test.go +++ b/builtin/providers/aws/resource_aws_rds_cluster_test.go @@ -35,6 +35,35 @@ func TestAccAWSRDSCluster_basic(t *testing.T) { }) } +func TestAccAWSRDSCluster_updateTags(t *testing.T) { + var v rds.DBCluster + ri := acctest.RandInt() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSClusterConfig(ri), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSClusterExists("aws_rds_cluster.default", &v), + resource.TestCheckResourceAttr( + "aws_rds_cluster.default", "tags.%", "1"), + ), + }, + resource.TestStep{ + Config: testAccAWSClusterConfigUpdatedTags(ri), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSClusterExists("aws_rds_cluster.default", &v), + resource.TestCheckResourceAttr( + "aws_rds_cluster.default", "tags.%", "2"), + ), + }, + }, + }) +} + func TestAccAWSRDSCluster_encrypted(t *testing.T) { var v rds.DBCluster @@ -169,6 +198,25 @@ resource "aws_rds_cluster" "default" { master_username = "foo" master_password = "mustbeeightcharaters" db_cluster_parameter_group_name = "default.aurora5.6" + tags { + Environment = "production" + } +}`, n) +} + +func testAccAWSClusterConfigUpdatedTags(n int) string { + return fmt.Sprintf(` +resource "aws_rds_cluster" "default" { + cluster_identifier = "tf-aurora-cluster-%d" + availability_zones = ["us-west-2a","us-west-2b","us-west-2c"] + database_name = "mydb" + master_username = "foo" + master_password = "mustbeeightcharaters" + db_cluster_parameter_group_name = "default.aurora5.6" + tags { + Environment = "production" + AnotherTag = "test" + } }`, n) } From 4ab654a6f6073332cd64c30b98a5c73154ced80c Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 20 Jul 2016 19:37:54 +0100 Subject: [PATCH 0340/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 383927abd..5622c172d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -112,6 +112,7 @@ IMPROVEMENTS: * provider/aws: Allow `port` on `aws_db_instance` to be updated [GH-7441] * provider/aws: Allow VPC Classic Linking in Autoscaling Launch Configs [GH-7470] * provider/aws: Support `task_role_arn` on `aws_ecs_task_definition [GH-7653] + * provider/aws: Support Tags on `aws_rds_cluster` [GH-7695] * provider/azurerm: Add support for EnableIPForwarding to `azurerm_network_interface` [GH-6807] * provider/azurerm: Add support for exporting the `azurerm_storage_account` access keys [GH-6742] * provider/azurerm: The Azure SDK now exposes better error messages [GH-6976] From 7879450cf3024f42627f16ef1114342e54427f02 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 20 Jul 2016 19:38:44 +0100 Subject: [PATCH 0341/1238] provider/aws: Fix the import of `aws_redshift_cluster` breaking (#7677) `skip_final_snapshot` ``` % make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSRedshiftCluster_importBasic' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSRedshiftCluster_importBasic -timeout 120m === RUN TestAccAWSRedshiftCluster_importBasic --- PASS: TestAccAWSRedshiftCluster_importBasic (641.87s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 641.888s ``` --- .../aws/resource_aws_redshift_cluster.go | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/builtin/providers/aws/resource_aws_redshift_cluster.go b/builtin/providers/aws/resource_aws_redshift_cluster.go index 7f03eb5b9..68651c28b 100644 --- a/builtin/providers/aws/resource_aws_redshift_cluster.go +++ b/builtin/providers/aws/resource_aws_redshift_cluster.go @@ -21,7 +21,7 @@ func resourceAwsRedshiftCluster() *schema.Resource { Update: resourceAwsRedshiftClusterUpdate, Delete: resourceAwsRedshiftClusterDelete, Importer: &schema.ResourceImporter{ - State: schema.ImportStatePassthrough, + State: resourceAwsRedshiftClusterImport, }, Schema: map[string]*schema.Schema{ @@ -212,6 +212,15 @@ func resourceAwsRedshiftCluster() *schema.Resource { } } +func resourceAwsRedshiftClusterImport( + d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + // Neither skip_final_snapshot nor final_snapshot_identifier can be fetched + // from any API call, so we need to default skip_final_snapshot to true so + // that final_snapshot_identifier is not required + d.Set("skip_final_snapshot", true) + return []*schema.ResourceData{d}, nil +} + func resourceAwsRedshiftClusterCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).redshiftconn @@ -557,11 +566,8 @@ func resourceAwsRedshiftClusterDelete(d *schema.ResourceData, meta interface{}) ClusterIdentifier: aws.String(d.Id()), } - skipFinalSnapshot, exists := d.GetOk("skip_final_snapshot") - if !exists { - skipFinalSnapshot = true - } - deleteOpts.SkipFinalClusterSnapshot = aws.Bool(skipFinalSnapshot.(bool)) + skipFinalSnapshot := d.Get("skip_final_snapshot").(bool) + deleteOpts.SkipFinalClusterSnapshot = aws.Bool(skipFinalSnapshot) if skipFinalSnapshot == false { if name, present := d.GetOk("final_snapshot_identifier"); present { From b4fa54e3c0590780851e6f73ef658432b1daddbf Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 20 Jul 2016 19:53:37 +0100 Subject: [PATCH 0342/1238] provider/aws: Support Import `aws_rds_cluster` (#7366) ``` make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSRDSCluster_importBasic' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSRDSCluster_importBasic -timeout 120m === RUN TestAccAWSRDSCluster_importBasic --- FAIL: TestAccAWSRDSCluster_importBasic (122.71s) testing.go:255: Step 1 error: ImportStateVerify attributes not equivalent. Difference is shown below. Top is actual, bottom is expected. (map[string]string) { } (map[string]string) (len=1) { (string) (len=19) "skip_final_snapshot": (string) (len=4) "true" } FAIL exit status 1 FAIL github.com/hashicorp/terraform/builtin/providers/aws 122.733s make: *** [testacc] Error 1 ``` --- ....go => import_aws_db_subnet_group_test.go} | 0 .../aws/import_aws_rds_cluster_test.go | 32 +++++++++++++++++++ .../providers/aws/resource_aws_rds_cluster.go | 13 ++++++++ 3 files changed, 45 insertions(+) rename builtin/providers/aws/{import_aws_db_subnet_group_group_test.go => import_aws_db_subnet_group_test.go} (100%) create mode 100644 builtin/providers/aws/import_aws_rds_cluster_test.go diff --git a/builtin/providers/aws/import_aws_db_subnet_group_group_test.go b/builtin/providers/aws/import_aws_db_subnet_group_test.go similarity index 100% rename from builtin/providers/aws/import_aws_db_subnet_group_group_test.go rename to builtin/providers/aws/import_aws_db_subnet_group_test.go diff --git a/builtin/providers/aws/import_aws_rds_cluster_test.go b/builtin/providers/aws/import_aws_rds_cluster_test.go new file mode 100644 index 000000000..be7e8bea3 --- /dev/null +++ b/builtin/providers/aws/import_aws_rds_cluster_test.go @@ -0,0 +1,32 @@ +package aws + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAWSRDSCluster_importBasic(t *testing.T) { + resourceName := "aws_rds_cluster.default" + ri := acctest.RandInt() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSClusterConfig(ri), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "master_password", "skip_final_snapshot"}, + }, + }, + }) +} diff --git a/builtin/providers/aws/resource_aws_rds_cluster.go b/builtin/providers/aws/resource_aws_rds_cluster.go index 6cb76a7bb..60981c1ae 100644 --- a/builtin/providers/aws/resource_aws_rds_cluster.go +++ b/builtin/providers/aws/resource_aws_rds_cluster.go @@ -20,6 +20,9 @@ func resourceAwsRDSCluster() *schema.Resource { Read: resourceAwsRDSClusterRead, Update: resourceAwsRDSClusterUpdate, Delete: resourceAwsRDSClusterDelete, + Importer: &schema.ResourceImporter{ + State: resourceAwsRdsClusterImport, + }, Schema: map[string]*schema.Schema{ @@ -198,6 +201,15 @@ func resourceAwsRDSCluster() *schema.Resource { } } +func resourceAwsRdsClusterImport( + d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + // Neither skip_final_snapshot nor final_snapshot_identifier can be fetched + // from any API call, so we need to default skip_final_snapshot to true so + // that final_snapshot_identifier is not required + d.Set("skip_final_snapshot", true) + return []*schema.ResourceData{d}, nil +} + func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).rdsconn tags := tagsFromMapRDS(d.Get("tags").(map[string]interface{})) @@ -407,6 +419,7 @@ func resourceAwsRDSClusterRead(d *schema.ResourceData, meta interface{}) error { d.Set("database_name", dbc.DatabaseName) } + d.Set("cluster_identifier", dbc.DBClusterIdentifier) d.Set("db_subnet_group_name", dbc.DBSubnetGroup) d.Set("parameter_group_name", dbc.DBClusterParameterGroup) d.Set("db_cluster_parameter_group_name", dbc.DBClusterParameterGroup) From c9138daacced47f117cc337e228a05aed5c27d44 Mon Sep 17 00:00:00 2001 From: Peter McAtominey Date: Wed, 20 Jul 2016 20:02:40 +0100 Subject: [PATCH 0343/1238] provider/azurerm: dump entire Request/Response in autorest Decorator (#7719) --- builtin/providers/azurerm/config.go | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/builtin/providers/azurerm/config.go b/builtin/providers/azurerm/config.go index d6b9a2706..50e9c89f4 100644 --- a/builtin/providers/azurerm/config.go +++ b/builtin/providers/azurerm/config.go @@ -4,6 +4,7 @@ import ( "fmt" "log" "net/http" + "net/http/httputil" "github.com/Azure/azure-sdk-for-go/arm/cdn" "github.com/Azure/azure-sdk-for-go/arm/compute" @@ -65,10 +66,23 @@ type ArmClient struct { func withRequestLogging() autorest.SendDecorator { return func(s autorest.Sender) autorest.Sender { return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { - log.Printf("[DEBUG] Sending Azure RM Request %q to %q\n", r.Method, r.URL) + // dump request to wire format + if dump, err := httputil.DumpRequestOut(r, true); err == nil { + log.Printf("[DEBUG] AzureRM Request: \n%s\n", dump) + } else { + // fallback to basic message + log.Printf("[DEBUG] AzureRM Request: %s to %s\n", r.Method, r.URL) + } + resp, err := s.Do(r) if resp != nil { - log.Printf("[DEBUG] Received Azure RM Request status code %s for %s\n", resp.Status, r.URL) + // dump response to wire format + if dump, err := httputil.DumpResponse(resp, true); err == nil { + log.Printf("[DEBUG] AzureRM Response for %s: \n%s\n", r.URL, dump) + } else { + // fallback to basic message + log.Printf("[DEBUG] AzureRM Response: %s for %s\n", resp.Status, r.URL) + } } else { log.Printf("[DEBUG] Request to %s completed with no response", r.URL) } From 7ad4220ea93cb1eee54964523c46f47bb4c5f32a Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 20 Jul 2016 20:03:46 +0100 Subject: [PATCH 0344/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5622c172d..fa56128f2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -117,6 +117,7 @@ IMPROVEMENTS: * provider/azurerm: Add support for exporting the `azurerm_storage_account` access keys [GH-6742] * provider/azurerm: The Azure SDK now exposes better error messages [GH-6976] * provider/azurerm: `azurerm_dns_zone` now returns `name_servers` [GH-7434] + * provider/azurerm: dump entire Request/Response in autorest Decorator [GH-7719] * provider/clc: Add support for hyperscale and bareMetal server types and package installation * provider/clc: Fix optional server password [GH-6414] * provider/cloudstack: Add support for affinity groups to `cloudstack_instance` [GH-6898] From af4cc20ec0e336ea543f848b4c42dfb4c4ad2a6c Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 20 Jul 2016 20:03:54 +0100 Subject: [PATCH 0345/1238] provider/azurerm: `azurerm_virtual_machine` computer_name now Required (#7308) Fixes #7299 where it was found that computer_name is not optional (as the msdn documentation states) ``` make testacc TEST=./builtin/providers/azurerm TESTARGS='-run=TestAccAzureRMVirtualMachine_' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /vendor/) TF_ACC=1 go test ./builtin/providers/azurerm -v -run=TestAccAzureRMVirtualMachine_ -timeout 120m === RUN TestAccAzureRMVirtualMachine_basicLinuxMachine --- PASS: TestAccAzureRMVirtualMachine_basicLinuxMachine (403.53s) === RUN TestAccAzureRMVirtualMachine_tags --- PASS: TestAccAzureRMVirtualMachine_tags (488.46s) === RUN TestAccAzureRMVirtualMachine_updateMachineSize --- PASS: TestAccAzureRMVirtualMachine_updateMachineSize (601.82s) === RUN TestAccAzureRMVirtualMachine_basicWindowsMachine --- PASS: TestAccAzureRMVirtualMachine_basicWindowsMachine (646.75s) === RUN TestAccAzureRMVirtualMachine_windowsUnattendedConfig --- PASS: TestAccAzureRMVirtualMachine_windowsUnattendedConfig (891.42s) === RUN TestAccAzureRMVirtualMachine_winRMConfig --- PASS: TestAccAzureRMVirtualMachine_winRMConfig (768.73s) PASS ok github.com/hashicorp/terraform/builtin/providers/azurerm 3800.734s ``` --- builtin/providers/azurerm/resource_arm_virtual_machine.go | 7 +++---- .../docs/providers/azurerm/r/virtual_machine.html.markdown | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/builtin/providers/azurerm/resource_arm_virtual_machine.go b/builtin/providers/azurerm/resource_arm_virtual_machine.go index e2b26584d..b2121df8a 100644 --- a/builtin/providers/azurerm/resource_arm_virtual_machine.go +++ b/builtin/providers/azurerm/resource_arm_virtual_machine.go @@ -214,8 +214,8 @@ func resourceArmVirtualMachine() *schema.Resource { Schema: map[string]*schema.Schema{ "computer_name": { Type: schema.TypeString, - Optional: true, ForceNew: true, + Required: true, }, "admin_username": { @@ -847,9 +847,11 @@ func expandAzureRmVirtualMachineOsProfile(d *schema.ResourceData) (*compute.OSPr adminUsername := osProfile["admin_username"].(string) adminPassword := osProfile["admin_password"].(string) + computerName := osProfile["computer_name"].(string) profile := &compute.OSProfile{ AdminUsername: &adminUsername, + ComputerName: &computerName, } if adminPassword != "" { @@ -883,9 +885,6 @@ func expandAzureRmVirtualMachineOsProfile(d *schema.ResourceData) (*compute.OSPr } } - if v := osProfile["computer_name"].(string); v != "" { - profile.ComputerName = &v - } if v := osProfile["custom_data"].(string); v != "" { profile.CustomData = &v } diff --git a/website/source/docs/providers/azurerm/r/virtual_machine.html.markdown b/website/source/docs/providers/azurerm/r/virtual_machine.html.markdown index 2938c0a13..7f719337c 100644 --- a/website/source/docs/providers/azurerm/r/virtual_machine.html.markdown +++ b/website/source/docs/providers/azurerm/r/virtual_machine.html.markdown @@ -253,7 +253,7 @@ For more information on the different example configurations, please check out t `os_profile` supports the following: -* `computer_name` - (Optional) Specifies the name of the virtual machine. +* `computer_name` - (Required) Specifies the name of the virtual machine. * `admin_username` - (Required) Specifies the name of the administrator account. * `admin_password` - (Required) Specifies the password of the administrator account. * `custom_data` - (Optional) Specifies a base-64 encoded string of custom data. The base-64 encoded string is decoded to a binary array that is saved as a file on the Virtual Machine. The maximum length of the binary array is 65535 bytes. From 2b6cd48ff0a6e64ff3c760fbf6752803ce6195b9 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 20 Jul 2016 20:04:52 +0100 Subject: [PATCH 0346/1238] Update CHANGELOG.md --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fa56128f2..2d943e9c7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ BACKWARDS INCOMPATIBILITIES / NOTES: * You now access the values of maps using the syntax `var.map["key"]` or the `lookup` function instead of `var.map.key`. * Outputs on `terraform_remote_state` resources are now top level attributes rather than inside the `output` map. In order to access outputs, use the syntax: `terraform_remote_state.name.outputname`. Currently outputs cannot be named `config` or `backend`. * `azurerm_dns_cname_record` now accepts a single record rather than a list of records + * `azurerm_virtual_machine` computer_name now Required * `aws_db_instance` now defaults `publicly_accessible` to false * `openstack_fw_policy_v1` now correctly applies rules in the order they are specified. Upon the next apply, current rules might be re-ordered. * `atlas_artifact` resource has be depracated. Please use the new `atlas_artifact` Data Source @@ -217,6 +218,7 @@ BUG FIXES: * provider/azurerm: catch `azurerm_template_deployment` erroring silently [GH-7644] * provider/azurerm: changing the name of an `azurerm_virtual_machine` now forces a new resource [GH-7646] * provider/azurerm: azurerm_storage_account now returns storage keys value instead of their names [GH-7674] + * provider/azurerm: `azurerm_virtual_machine` computer_name now Required [GH-7308] * provider/cloudflare: Fix issue upgrading CloudFlare Records created before v0.6.15 [GH-6969] * provider/cloudstack: Fix using `cloudstack_network_acl` within a project [GH-6743] * provider/digitalocean: Stop `digitocean_droplet` forcing new resource on uppercase region [GH-7044] From 3ccb23f0e39f23b827ec34bd4c737758bcc3aa56 Mon Sep 17 00:00:00 2001 From: Golo Roden Date: Wed, 20 Jul 2016 21:05:12 +0200 Subject: [PATCH 0347/1238] Explain domain and name parameters. --- website/source/docs/providers/dnsimple/r/record.html.markdown | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/source/docs/providers/dnsimple/r/record.html.markdown b/website/source/docs/providers/dnsimple/r/record.html.markdown index 6c167df6c..25df83a78 100644 --- a/website/source/docs/providers/dnsimple/r/record.html.markdown +++ b/website/source/docs/providers/dnsimple/r/record.html.markdown @@ -33,6 +33,8 @@ The following arguments are supported: * `type` - (Required) The type of the record * `ttl` - (Optional) The TTL of the record +Please note that the `domain` must not include the sub-domain. Instead, use the `name` argument for the sub-domain. + ## Attributes Reference The following attributes are exported: @@ -45,4 +47,3 @@ The following attributes are exported: * `priority` - The priority of the record * `domain_id` - The domain ID of the record * `hostname` - The FQDN of the record - From 4d126aaf6f905b5b8754542d50a12f9d823620b1 Mon Sep 17 00:00:00 2001 From: Clint Date: Wed, 20 Jul 2016 15:47:10 -0500 Subject: [PATCH 0348/1238] provider/aws: Fix regression in Security Group Rules with self reference (#7706) * provider/aws: Failing test for #7670 * provider/aws: Fix security group rule regression with self (#7670) --- .../aws/import_aws_security_group.go | 28 ++++++++++++ .../aws/resource_aws_security_group_rule.go | 13 ------ .../resource_aws_security_group_rule_test.go | 43 +++++++++++++++++++ 3 files changed, 71 insertions(+), 13 deletions(-) diff --git a/builtin/providers/aws/import_aws_security_group.go b/builtin/providers/aws/import_aws_security_group.go index 88cbb12a6..d710169dd 100644 --- a/builtin/providers/aws/import_aws_security_group.go +++ b/builtin/providers/aws/import_aws_security_group.go @@ -49,6 +49,34 @@ func resourceAwsSecurityGroupImportState( d.SetType("aws_security_group_rule") d.Set("security_group_id", sgId) d.Set("type", ruleType) + + // 'self' is false by default. Below, we range over the group ids and set true + // if the parent sg id is found + d.Set("self", false) + + if len(perm.UserIdGroupPairs) > 0 { + s := perm.UserIdGroupPairs[0] + + // Check for Pair that is the same as the Security Group, to denote self. + // Otherwise, mark the group id in source_security_group_id + isVPC := sg.VpcId != nil && *sg.VpcId != "" + if isVPC { + if *s.GroupId == *sg.GroupId { + d.Set("self", true) + // prune the self reference from the UserIdGroupPairs, so we don't + // have duplicate sg ids (both self and in source_security_group_id) + perm.UserIdGroupPairs = append(perm.UserIdGroupPairs[:0], perm.UserIdGroupPairs[0+1:]...) + } + } else { + if *s.GroupName == *sg.GroupName { + d.Set("self", true) + // prune the self reference from the UserIdGroupPairs, so we don't + // have duplicate sg ids (both self and in source_security_group_id) + perm.UserIdGroupPairs = append(perm.UserIdGroupPairs[:0], perm.UserIdGroupPairs[0+1:]...) + } + } + } + // XXX If the rule contained more than one source security group, this // will choose one of them. We actually need to create one rule for each // source security group. diff --git a/builtin/providers/aws/resource_aws_security_group_rule.go b/builtin/providers/aws/resource_aws_security_group_rule.go index d170b3d20..ef34e9122 100644 --- a/builtin/providers/aws/resource_aws_security_group_rule.go +++ b/builtin/providers/aws/resource_aws_security_group_rule.go @@ -498,7 +498,6 @@ func expandIPPerm(d *schema.ResourceData, sg *ec2.SecurityGroup) (*ec2.IpPermiss } if v, ok := d.GetOk("self"); ok && v.(bool) { - // if sg.GroupId != nil { if sg.VpcId != nil && *sg.VpcId != "" { groups[*sg.GroupId] = true } else { @@ -574,10 +573,6 @@ func setFromIPPerm(d *schema.ResourceData, sg *ec2.SecurityGroup, rule *ec2.IpPe d.Set("cidr_blocks", cb) - // 'self' is false by default. Below, we range over the group ids and set true - // if the parent sg id is found - d.Set("self", false) - var pl []string for _, p := range rule.PrefixListIds { pl = append(pl, *p.PrefixListId) @@ -587,17 +582,9 @@ func setFromIPPerm(d *schema.ResourceData, sg *ec2.SecurityGroup, rule *ec2.IpPe if len(rule.UserIdGroupPairs) > 0 { s := rule.UserIdGroupPairs[0] - // Check for Pair that is the same as the Security Group, to denote self. - // Otherwise, mark the group id in source_security_group_id if isVPC { - if *s.GroupId == *sg.GroupId { - d.Set("self", true) - } d.Set("source_security_group_id", *s.GroupId) } else { - if *s.GroupName == *sg.GroupName { - d.Set("self", true) - } d.Set("source_security_group_id", *s.GroupName) } } diff --git a/builtin/providers/aws/resource_aws_security_group_rule_test.go b/builtin/providers/aws/resource_aws_security_group_rule_test.go index 4dbaec474..045a0731e 100644 --- a/builtin/providers/aws/resource_aws_security_group_rule_test.go +++ b/builtin/providers/aws/resource_aws_security_group_rule_test.go @@ -416,6 +416,24 @@ func TestAccAWSSecurityGroupRule_Race(t *testing.T) { }) } +func TestAccAWSSecurityGroupRule_SelfSource(t *testing.T) { + var group ec2.SecurityGroup + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSecurityGroupRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSSecurityGroupRuleSelfInSource, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSecurityGroupRuleExists("aws_security_group.web", &group), + ), + }, + }, + }) +} + func TestAccAWSSecurityGroupRule_PrefixListEgress(t *testing.T) { var group ec2.SecurityGroup var endpoint ec2.VpcEndpoint @@ -1001,3 +1019,28 @@ resource "aws_security_group_rule" "egress_1" { security_group_id = "${aws_security_group.egress.id}" } ` + +const testAccAWSSecurityGroupRuleSelfInSource = ` +resource "aws_vpc" "foo" { + cidr_block = "10.1.0.0/16" + + tags { + Name = "tf_sg_rule_self_group" + } +} + +resource "aws_security_group" "web" { + name = "allow_all" + description = "Allow all inbound traffic" + vpc_id = "${aws_vpc.foo.id}" +} + +resource "aws_security_group_rule" "allow_self" { + type = "ingress" + from_port = 0 + to_port = 0 + protocol = "-1" + security_group_id = "${aws_security_group.web.id}" + source_security_group_id = "${aws_security_group.web.id}" +} +` From 6ef7f9cd4d641419ad89ec101753ab88fed18cad Mon Sep 17 00:00:00 2001 From: Ross Delinger Date: Tue, 12 Jul 2016 14:18:37 -0700 Subject: [PATCH 0349/1238] Add import support to CloudFront Distributions. * Import support and acceptance tests for import support have been added. * geo_restriction.location is now guarnteed to be in sorted order (was causing a failure in the test) --- ...nt_distribution_configuration_structure.go | 11 +++++++ .../aws/import_aws_cloudfront_distribution.go | 28 +++++++++++++++++ ...import_aws_cloudfront_distribution_test.go | 30 +++++++++++++++++++ .../resource_aws_cloudfront_distribution.go | 3 ++ 4 files changed, 72 insertions(+) create mode 100644 builtin/providers/aws/import_aws_cloudfront_distribution.go create mode 100644 builtin/providers/aws/import_aws_cloudfront_distribution_test.go diff --git a/builtin/providers/aws/cloudfront_distribution_configuration_structure.go b/builtin/providers/aws/cloudfront_distribution_configuration_structure.go index e6c2d998e..6d0896c01 100644 --- a/builtin/providers/aws/cloudfront_distribution_configuration_structure.go +++ b/builtin/providers/aws/cloudfront_distribution_configuration_structure.go @@ -11,6 +11,7 @@ import ( "bytes" "fmt" "reflect" + "sort" "strconv" "time" @@ -25,6 +26,14 @@ import ( // is used to set the zone_id attribute. const cloudFrontRoute53ZoneID = "Z2FDTNDATAQYW2" +// Define Sort interface for []*string so we can ensure the order of +// geo_restrictions.locations +type StringPtrSlice []*string + +func (p StringPtrSlice) Len() int { return len(p) } +func (p StringPtrSlice) Less(i, j int) bool { return *p[i] < *p[j] } +func (p StringPtrSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + // Assemble the *cloudfront.DistributionConfig variable. Calls out to various // expander functions to convert attributes and sub-attributes to the various // complex structures which are necessary to properly build the @@ -873,6 +882,7 @@ func expandGeoRestriction(m map[string]interface{}) *cloudfront.GeoRestriction { if v, ok := m["locations"]; ok { gr.Quantity = aws.Int64(int64(len(v.([]interface{})))) gr.Items = expandStringList(v.([]interface{})) + sort.Sort(StringPtrSlice(gr.Items)) } else { gr.Quantity = aws.Int64(0) } @@ -884,6 +894,7 @@ func flattenGeoRestriction(gr *cloudfront.GeoRestriction) map[string]interface{} m["restriction_type"] = *gr.RestrictionType if gr.Items != nil { + sort.Sort(StringPtrSlice(gr.Items)) m["locations"] = flattenStringList(gr.Items) } return m diff --git a/builtin/providers/aws/import_aws_cloudfront_distribution.go b/builtin/providers/aws/import_aws_cloudfront_distribution.go new file mode 100644 index 000000000..dcb8792a3 --- /dev/null +++ b/builtin/providers/aws/import_aws_cloudfront_distribution.go @@ -0,0 +1,28 @@ +package aws + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/cloudfront" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsCloudFrontDistributionImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + conn := meta.(*AWSClient).cloudfrontconn + id := d.Id() + resp, err := conn.GetDistributionConfig(&cloudfront.GetDistributionConfigInput{ + Id: aws.String(id), + }) + + if err != nil { + return nil, err + } + + distConfig := resp.DistributionConfig + results := make([]*schema.ResourceData, 1) + err = flattenDistributionConfig(d, distConfig) + if err != nil { + return nil, err + } + results[0] = d + return results, nil +} diff --git a/builtin/providers/aws/import_aws_cloudfront_distribution_test.go b/builtin/providers/aws/import_aws_cloudfront_distribution_test.go new file mode 100644 index 000000000..9c2bbed63 --- /dev/null +++ b/builtin/providers/aws/import_aws_cloudfront_distribution_test.go @@ -0,0 +1,30 @@ +package aws + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccCloudFrontDistribution_importBasic(t *testing.T) { + resourceName := "aws_cloudfront_distribution.s3_distribution" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudFrontDistributionDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSCloudFrontDistributionS3Config, + }, + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + // Ignore retain_on_delete since it doesn't come from the AWS + // API. + ImportStateVerifyIgnore: []string{"retain_on_delete"}, + }, + }, + }) +} diff --git a/builtin/providers/aws/resource_aws_cloudfront_distribution.go b/builtin/providers/aws/resource_aws_cloudfront_distribution.go index 9fc333279..4d6af9d94 100644 --- a/builtin/providers/aws/resource_aws_cloudfront_distribution.go +++ b/builtin/providers/aws/resource_aws_cloudfront_distribution.go @@ -16,6 +16,9 @@ func resourceAwsCloudFrontDistribution() *schema.Resource { Read: resourceAwsCloudFrontDistributionRead, Update: resourceAwsCloudFrontDistributionUpdate, Delete: resourceAwsCloudFrontDistributionDelete, + Importer: &schema.ResourceImporter{ + State: resourceAwsCloudFrontDistributionImport, + }, Schema: map[string]*schema.Schema{ "aliases": &schema.Schema{ From 5f6ea8b18ee18be7c8fdf31949a860c51f9958dd Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 20 Jul 2016 23:28:59 +0100 Subject: [PATCH 0350/1238] documentation/aws: More additions of Import documention to the AWS (#7729) resources --- .../providers/aws/r/api_gateway_account.html.markdown | 9 +++++++++ .../providers/aws/r/api_gateway_api_key.html.markdown | 9 +++++++++ .../providers/aws/r/autoscaling_group.html.markdown | 9 +++++++++ .../r/cloudfront_origin_access_identity.html.markdown | 9 +++++++++ .../docs/providers/aws/r/cloudtrail.html.markdown | 9 +++++++++ .../aws/r/cloudwatch_event_rule.html.markdown | 9 +++++++++ .../providers/aws/r/cloudwatch_log_group.html.markdown | 9 +++++++++ .../aws/r/cloudwatch_metric_alarm.html.markdown | 9 +++++++++ .../providers/aws/r/customer_gateway.html.markdown | 9 +++++++++ .../docs/providers/aws/r/db_instance.html.markdown | 8 ++++++++ .../docs/providers/aws/r/db_option_group.html.markdown | 8 ++++++++ .../providers/aws/r/db_parameter_group.html.markdown | 8 ++++++++ .../docs/providers/aws/r/db_subnet_group.html.markdown | 8 ++++++++ website/source/docs/providers/aws/r/ebs_volume.html.md | 7 +++++++ .../docs/providers/aws/r/ecr_repository.html.markdown | 9 +++++++++ .../docs/providers/aws/r/efs_file_system.html.markdown | 9 +++++++++ .../providers/aws/r/efs_mount_target.html.markdown | 9 +++++++++ .../aws/r/elastic_beanstalk_application.html.markdown | 9 +++++++++ .../aws/r/elastic_beanstalk_environment.html.markdown | 9 +++++++++ .../aws/r/elasticache_parameter_group.html.markdown | 9 +++++++++ .../aws/r/elasticache_subnet_group.html.markdown | 8 ++++++++ website/source/docs/providers/aws/r/elb.html.markdown | 8 ++++++++ .../source/docs/providers/aws/r/flow_log.html.markdown | 8 ++++++++ .../docs/providers/aws/r/glacier_vault.html.markdown | 8 ++++++++ .../aws/r/iam_account_password_policy.html.markdown | 9 +++++++++ .../docs/providers/aws/r/iam_group.html.markdown | 8 ++++++++ .../providers/aws/r/iam_saml_provider.html.markdown | 8 ++++++++ .../source/docs/providers/aws/r/iam_user.html.markdown | 9 +++++++++ .../source/docs/providers/aws/r/instance.html.markdown | 9 +++++++++ .../providers/aws/r/internet_gateway.html.markdown | 8 ++++++++ .../source/docs/providers/aws/r/key_pair.html.markdown | 8 ++++++++ .../source/docs/providers/aws/r/kms_key.html.markdown | 8 ++++++++ .../docs/providers/aws/r/lambda_function.html.markdown | 8 ++++++++ .../providers/aws/r/launch_configuration.html.markdown | 8 ++++++++ .../docs/providers/aws/r/nat_gateway.html.markdown | 8 ++++++++ .../docs/providers/aws/r/network_acl.html.markdown | 8 ++++++++ .../docs/providers/aws/r/network_interface.markdown | 9 +++++++++ .../docs/providers/aws/r/placement_group.html.markdown | 8 ++++++++ .../docs/providers/aws/r/rds_cluster.html.markdown | 8 ++++++++ .../providers/aws/r/rds_cluster_instance.html.markdown | 8 ++++++++ .../aws/r/rds_cluster_parameter_group.markdown | 8 ++++++++ .../providers/aws/r/redshift_cluster.html.markdown | 8 ++++++++ .../aws/r/redshift_parameter_group.html.markdown | 8 ++++++++ .../aws/r/redshift_subnet_group.html.markdown | 7 +++++++ .../aws/r/route53_delegation_set.html.markdown | 10 ++++++++++ .../providers/aws/r/route53_health_check.html.markdown | 8 ++++++++ .../docs/providers/aws/r/route53_zone.html.markdown | 9 +++++++++ .../docs/providers/aws/r/route_table.html.markdown | 8 ++++++++ .../docs/providers/aws/r/security_group.html.markdown | 9 +++++++++ .../docs/providers/aws/r/simpledb_domain.html.markdown | 2 +- .../docs/providers/aws/r/sns_topic.html.markdown | 2 +- .../aws/r/sns_topic_subscription.html.markdown | 2 +- .../docs/providers/aws/r/sqs_queue.html.markdown | 2 +- .../source/docs/providers/aws/r/subnet.html.markdown | 2 +- website/source/docs/providers/aws/r/vpc.html.markdown | 2 +- .../providers/aws/r/vpc_dhcp_options.html.markdown | 2 +- .../docs/providers/aws/r/vpc_endpoint.html.markdown | 2 +- .../docs/providers/aws/r/vpc_peering.html.markdown | 2 +- .../docs/providers/aws/r/vpn_connection.html.markdown | 2 +- .../docs/providers/aws/r/vpn_gateway.html.markdown | 2 +- 60 files changed, 424 insertions(+), 11 deletions(-) diff --git a/website/source/docs/providers/aws/r/api_gateway_account.html.markdown b/website/source/docs/providers/aws/r/api_gateway_account.html.markdown index 23afc8dfb..1ea2fe392 100644 --- a/website/source/docs/providers/aws/r/api_gateway_account.html.markdown +++ b/website/source/docs/providers/aws/r/api_gateway_account.html.markdown @@ -82,3 +82,12 @@ The following attribute is exported: * `burst_limit` - The absolute maximum number of times API Gateway allows the API to be called per second (RPS). * `rate_limit` - The number of times API Gateway allows the API to be called per second on average (RPS). + + +## Import + +API Gateway Accounts can be imported using the word `api-gateway-account`, e.g. + +``` +$ terraform import aws_api_gateway_account.demo api-gateway-account +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/api_gateway_api_key.html.markdown b/website/source/docs/providers/aws/r/api_gateway_api_key.html.markdown index becdb07f8..eeb8f14c7 100644 --- a/website/source/docs/providers/aws/r/api_gateway_api_key.html.markdown +++ b/website/source/docs/providers/aws/r/api_gateway_api_key.html.markdown @@ -51,3 +51,12 @@ The following arguments are supported: The following attributes are exported: * `id` - The ID of the API key + + +## Import + +API Gateway Keys can be imported using the `id`, e.g. + +``` +$ terraform import aws_api_gateway_api_key.my_demo_key 8bklk8bl1k3sB38D9B3l0enyWT8c09B30lkq0blk +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/autoscaling_group.html.markdown b/website/source/docs/providers/aws/r/autoscaling_group.html.markdown index e8599f44c..f3caab2d9 100644 --- a/website/source/docs/providers/aws/r/autoscaling_group.html.markdown +++ b/website/source/docs/providers/aws/r/autoscaling_group.html.markdown @@ -178,3 +178,12 @@ If ASG creation takes more than a few minutes, this could indicate one of a number of configuration problems. See the [AWS Docs on Load Balancer Troubleshooting](https://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-troubleshooting.html) for more information. + + +## Import + +AutoScaling Groups can be imported using the `name`, e.g. + +``` +$ terraform import aws_autoscaling_group.web web-asg +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/cloudfront_origin_access_identity.html.markdown b/website/source/docs/providers/aws/r/cloudfront_origin_access_identity.html.markdown index ebc54775a..6e023a931 100644 --- a/website/source/docs/providers/aws/r/cloudfront_origin_access_identity.html.markdown +++ b/website/source/docs/providers/aws/r/cloudfront_origin_access_identity.html.markdown @@ -101,3 +101,12 @@ aws_s3_bucket "bucket" { [2]: http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-restricting-access-to-s3.html [3]: /docs/providers/aws/r/cloudfront_distribution.html [4]: /docs/providers/aws/r/s3_bucket.html + + +## Import + +Cloudfront Origin Access Identities can be imported using the `id`, e.g. + +``` +$ terraform import aws_cloudfront_origin_access_identity.origin_access E74FTE3AEXAMPLE +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/cloudtrail.html.markdown b/website/source/docs/providers/aws/r/cloudtrail.html.markdown index 2afe727bf..3d8d3d290 100644 --- a/website/source/docs/providers/aws/r/cloudtrail.html.markdown +++ b/website/source/docs/providers/aws/r/cloudtrail.html.markdown @@ -87,3 +87,12 @@ The following attributes are exported: * `id` - The name of the trail. * `home_region` - The region in which the trail was created. * `arn` - The Amazon Resource Name of the trail. + + +## Import + +Cloudtrails can be imported using the `name`, e.g. + +``` +$ terraform import aws_cloudtrail.sample my-sample-trail +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/cloudwatch_event_rule.html.markdown b/website/source/docs/providers/aws/r/cloudwatch_event_rule.html.markdown index f3ed87cb7..a1bd581c9 100644 --- a/website/source/docs/providers/aws/r/cloudwatch_event_rule.html.markdown +++ b/website/source/docs/providers/aws/r/cloudwatch_event_rule.html.markdown @@ -55,3 +55,12 @@ The following arguments are supported: The following attributes are exported: * `arn` - The Amazon Resource Name (ARN) of the rule. + + +## Import + +Cloudwatch Event Rules can be imported using the `name`, e.g. + +``` +$ terraform import aws_cloudwatch_event_rule.console capture-console-sign-in +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/cloudwatch_log_group.html.markdown b/website/source/docs/providers/aws/r/cloudwatch_log_group.html.markdown index e784c6389..7293a0c17 100644 --- a/website/source/docs/providers/aws/r/cloudwatch_log_group.html.markdown +++ b/website/source/docs/providers/aws/r/cloudwatch_log_group.html.markdown @@ -31,3 +31,12 @@ The following arguments are supported: The following attributes are exported: * `arn` - The Amazon Resource Name (ARN) specifying the log group. + + +## Import + +Cloudwatch Log Groups can be imported using the `name`, e.g. + +``` +$ terraform import aws_cloudwatch_log_group.test_group yada +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/cloudwatch_metric_alarm.html.markdown b/website/source/docs/providers/aws/r/cloudwatch_metric_alarm.html.markdown index 75ae6ceb1..d91b8863e 100644 --- a/website/source/docs/providers/aws/r/cloudwatch_metric_alarm.html.markdown +++ b/website/source/docs/providers/aws/r/cloudwatch_metric_alarm.html.markdown @@ -84,3 +84,12 @@ The following attributes are exported: * `id` - The ID of the health check + + +## Import + +Cloud Metric Alarms can be imported using the `alarm_name`, e.g. + +``` +$ terraform import aws_cloudwatch_metric_alarm.test alarm-12345 +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/customer_gateway.html.markdown b/website/source/docs/providers/aws/r/customer_gateway.html.markdown index 5a83e6c43..0c40b4450 100644 --- a/website/source/docs/providers/aws/r/customer_gateway.html.markdown +++ b/website/source/docs/providers/aws/r/customer_gateway.html.markdown @@ -44,3 +44,12 @@ The following attributes are exported: * `ip_address` - The IP address of the gateway's Internet-routable external interface. * `type` - The type of customer gateway. * `tags` - Tags applied to the gateway. + + +## Import + +Customer Gateways can be imported using the `id`, e.g. + +``` +$ terraform import aws_customer_gateway.main cgw-b4dc3961 +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/db_instance.html.markdown b/website/source/docs/providers/aws/r/db_instance.html.markdown index 652d2158f..234a97985 100644 --- a/website/source/docs/providers/aws/r/db_instance.html.markdown +++ b/website/source/docs/providers/aws/r/db_instance.html.markdown @@ -141,3 +141,11 @@ On Oracle instances the following is exported additionally: [1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Replication.html [2]: https://docs.aws.amazon.com/fr_fr/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html + +## Import + +DB Instances can be imported using the `identifier`, e.g. + +``` +$ terraform import aws_db_instance.default mydb-rds-instance +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/db_option_group.html.markdown b/website/source/docs/providers/aws/r/db_option_group.html.markdown index 209663cec..2c6feaab5 100644 --- a/website/source/docs/providers/aws/r/db_option_group.html.markdown +++ b/website/source/docs/providers/aws/r/db_option_group.html.markdown @@ -62,3 +62,11 @@ Option Settings blocks support the following: The following attributes are exported: * `arn` - The ARN of the db option group. + +## Import + +DB Option groups can be imported using the `name`, e.g. + +``` +$ terraform import aws_db_option_group.bar mysql-option-group +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/db_parameter_group.html.markdown b/website/source/docs/providers/aws/r/db_parameter_group.html.markdown index 4a60e79a3..88addff48 100644 --- a/website/source/docs/providers/aws/r/db_parameter_group.html.markdown +++ b/website/source/docs/providers/aws/r/db_parameter_group.html.markdown @@ -51,3 +51,11 @@ The following attributes are exported: * `id` - The db parameter group name. * `arn` - The ARN of the db parameter group. + +## Import + +DB Parameter groups can be imported using the `name`, e.g. + +``` +$ terraform import aws_db_parameter_group.rds_pg rds-pg +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/db_subnet_group.html.markdown b/website/source/docs/providers/aws/r/db_subnet_group.html.markdown index 608336d9c..087186845 100644 --- a/website/source/docs/providers/aws/r/db_subnet_group.html.markdown +++ b/website/source/docs/providers/aws/r/db_subnet_group.html.markdown @@ -38,3 +38,11 @@ The following attributes are exported: * `id` - The db subnet group name. * `arn` - The ARN of the db subnet group. + +## Import + +DB Subnet groups can be imported using the `name`, e.g. + +``` +$ terraform import aws_db_subnet_group.default production-subnet-group +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/ebs_volume.html.md b/website/source/docs/providers/aws/r/ebs_volume.html.md index baf18c6ec..395d56666 100644 --- a/website/source/docs/providers/aws/r/ebs_volume.html.md +++ b/website/source/docs/providers/aws/r/ebs_volume.html.md @@ -44,3 +44,10 @@ The following attributes are exported: * `id` - The volume ID (e.g. vol-59fcb34e). +## Import + +EBS Volumes can be imported using the `id`, e.g. + +``` +$ terraform import aws_ebs_volume.data vol-049df61146c4d7901 +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/ecr_repository.html.markdown b/website/source/docs/providers/aws/r/ecr_repository.html.markdown index 2ab3bfac3..f10bfc0f6 100644 --- a/website/source/docs/providers/aws/r/ecr_repository.html.markdown +++ b/website/source/docs/providers/aws/r/ecr_repository.html.markdown @@ -36,3 +36,12 @@ The following attributes are exported: * `name` - The name of the repository. * `registry_id` - The registry ID where the repository was created. * `repository_url` - The URL of the repository (in the form `https://aws_account_id.dkr.ecr.region.amazonaws.com/repositoryName` + + +## Import + +ECR Repositories can be imported using the `name`, e.g. + +``` +$ terraform import aws_ecr_repository.service test-service +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/efs_file_system.html.markdown b/website/source/docs/providers/aws/r/efs_file_system.html.markdown index ac4cbb0b3..ac017144f 100644 --- a/website/source/docs/providers/aws/r/efs_file_system.html.markdown +++ b/website/source/docs/providers/aws/r/efs_file_system.html.markdown @@ -33,3 +33,12 @@ The following arguments are supported: The following attributes are exported: * `id` - The ID that identifies the file system + + +## Import + +EFS Filesystems can be imported using the `id`, e.g. + +``` +$ terraform import aws_efs_file_system.foo fs-6fa144c6 +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/efs_mount_target.html.markdown b/website/source/docs/providers/aws/r/efs_mount_target.html.markdown index b54ff9730..3764926df 100644 --- a/website/source/docs/providers/aws/r/efs_mount_target.html.markdown +++ b/website/source/docs/providers/aws/r/efs_mount_target.html.markdown @@ -48,3 +48,12 @@ The following attributes are exported: * `id` - The ID of the mount target * `dns_name` - The DNS name for the given subnet/AZ per [documented convention](http://docs.aws.amazon.com/efs/latest/ug/mounting-fs-mount-cmd-dns-name.html) * `network_interface_id` - The ID of the network interface that Amazon EFS created when it created the mount target. + + +## Import + +EFS Mount Targets can be imported using the `id`, e.g. + +``` +$ terraform import aws_efs_mount_target.alpha fsmt-52a643fb +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/elastic_beanstalk_application.html.markdown b/website/source/docs/providers/aws/r/elastic_beanstalk_application.html.markdown index 25bbe7f66..ab029d374 100644 --- a/website/source/docs/providers/aws/r/elastic_beanstalk_application.html.markdown +++ b/website/source/docs/providers/aws/r/elastic_beanstalk_application.html.markdown @@ -37,3 +37,12 @@ The following attributes are exported: * `name` * `description` + + +## Import + +Elastic Beanstalk Applications can be imported using the `name`, e.g. + +``` +$ terraform import aws_elastic_beanstalk_application.tf_test tf-test-name +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/elastic_beanstalk_environment.html.markdown b/website/source/docs/providers/aws/r/elastic_beanstalk_environment.html.markdown index bdd008477..d5492517f 100644 --- a/website/source/docs/providers/aws/r/elastic_beanstalk_environment.html.markdown +++ b/website/source/docs/providers/aws/r/elastic_beanstalk_environment.html.markdown @@ -95,3 +95,12 @@ The following attributes are exported: [1]: http://docs.aws.amazon.com/fr_fr/elasticbeanstalk/latest/dg/concepts.platforms.html + + +## Import + +Elastic Beanstalk Environments can be imported using the `id`, e.g. + +``` +$ terraform import aws_elastic_beanstalk_environment.prodenv e-rpqsewtp2j +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/elasticache_parameter_group.html.markdown b/website/source/docs/providers/aws/r/elasticache_parameter_group.html.markdown index 726dff4bb..e2c58427a 100644 --- a/website/source/docs/providers/aws/r/elasticache_parameter_group.html.markdown +++ b/website/source/docs/providers/aws/r/elasticache_parameter_group.html.markdown @@ -46,3 +46,12 @@ Parameter blocks support the following: The following attributes are exported: * `id` - The ElastiCache parameter group name. + + +## Import + +ElastiCache Parameter Groups can be imported using the `name`, e.g. + +``` +$ terraform import aws_elasticache_parameter_group.default redis-params +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/elasticache_subnet_group.html.markdown b/website/source/docs/providers/aws/r/elasticache_subnet_group.html.markdown index 6676fe83b..b3a15eac6 100644 --- a/website/source/docs/providers/aws/r/elasticache_subnet_group.html.markdown +++ b/website/source/docs/providers/aws/r/elasticache_subnet_group.html.markdown @@ -55,3 +55,11 @@ The following attributes are exported: * `name` * `subnet_ids` + +## Import + +ElastiCache Subnet Groups can be imported using the `name`, e.g. + +``` +$ terraform import aws_elasticache_subnet_group.bar tf-test-cache-subnet +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/elb.html.markdown b/website/source/docs/providers/aws/r/elb.html.markdown index c8e1eabdc..18486b528 100644 --- a/website/source/docs/providers/aws/r/elb.html.markdown +++ b/website/source/docs/providers/aws/r/elb.html.markdown @@ -128,3 +128,11 @@ The following attributes are exported: part of your inbound rules for your load balancer's back-end application instances. Only available on ELBs launched in a VPC. * `zone_id` - The canonical hosted zone ID of the ELB (to be used in a Route 53 Alias record) + +## Import + +ELBs can be imported using the `name`, e.g. + +``` +$ terraform import aws_elb.bar elb-production-12345 +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/flow_log.html.markdown b/website/source/docs/providers/aws/r/flow_log.html.markdown index 3f73634e7..d0ea141ef 100644 --- a/website/source/docs/providers/aws/r/flow_log.html.markdown +++ b/website/source/docs/providers/aws/r/flow_log.html.markdown @@ -82,3 +82,11 @@ The following arguments are supported: The following attributes are exported: * `id` - The Flow Log ID + +## Import + +Flow Logs can be imported using the `id`, e.g. + +``` +$ terraform import aws_flow_log.test_flow_log fl-1a2b3c4d +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/glacier_vault.html.markdown b/website/source/docs/providers/aws/r/glacier_vault.html.markdown index c0b6d8685..734c93b1d 100644 --- a/website/source/docs/providers/aws/r/glacier_vault.html.markdown +++ b/website/source/docs/providers/aws/r/glacier_vault.html.markdown @@ -72,3 +72,11 @@ The following attributes are exported: * `location` - The URI of the vault that was created. * `arn` - The ARN of the vault. + +## Import + +Glacier Vaults can be imported using the `name`, e.g. + +``` +$ terraform import aws_glacier_vault.archive my_archive +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/iam_account_password_policy.html.markdown b/website/source/docs/providers/aws/r/iam_account_password_policy.html.markdown index 072f2c48c..208e1e450 100644 --- a/website/source/docs/providers/aws/r/iam_account_password_policy.html.markdown +++ b/website/source/docs/providers/aws/r/iam_account_password_policy.html.markdown @@ -49,3 +49,12 @@ The following attributes are exported: * `expire_passwords` - Indicates whether passwords in the account expire. Returns `true` if `max_password_age` contains a value greater than `0`. Returns `false` if it is `0` or _not present_. + + +## Import + +IAM Account Password Policy can be imported using the work `iam-account-password-policy`, e.g. + +``` +$ terraform import aws_iam_account_password_policy.strict iam-account-password-policy +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/iam_group.html.markdown b/website/source/docs/providers/aws/r/iam_group.html.markdown index 692dc3d49..3440a25dd 100644 --- a/website/source/docs/providers/aws/r/iam_group.html.markdown +++ b/website/source/docs/providers/aws/r/iam_group.html.markdown @@ -37,3 +37,11 @@ The following attributes are exported: * `unique_id` - The [unique ID][1] assigned by AWS. [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#GUIDs + +## Import + +IAM Groups can be imported using the `name`, e.g. + +``` +$ terraform import aws_iam_group.developers developers +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/iam_saml_provider.html.markdown b/website/source/docs/providers/aws/r/iam_saml_provider.html.markdown index 49fe6ec73..f656cd86a 100644 --- a/website/source/docs/providers/aws/r/iam_saml_provider.html.markdown +++ b/website/source/docs/providers/aws/r/iam_saml_provider.html.markdown @@ -32,3 +32,11 @@ The following attributes are exported: * `arn` - The ARN assigned by AWS for this provider. * `valid_until` - The expiration date and time for the SAML provider in RFC1123 format, e.g. `Mon, 02 Jan 2006 15:04:05 MST`. + +## Import + +IAM SAML Providers can be imported using the `arn`, e.g. + +``` +$ terraform import aws_iam_saml_provider.default arn:aws:iam::123456789012:saml-provider/SAMLADFS +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/iam_user.html.markdown b/website/source/docs/providers/aws/r/iam_user.html.markdown index ef53316e8..acc79b749 100644 --- a/website/source/docs/providers/aws/r/iam_user.html.markdown +++ b/website/source/docs/providers/aws/r/iam_user.html.markdown @@ -57,3 +57,12 @@ The following attributes are exported: * `arn` - The ARN assigned by AWS for this user. [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#GUIDs + + +## Import + +IAM Users can be imported using the `name`, e.g. + +``` +$ terraform import aws_iam_user.lb loadbalancer +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/instance.html.markdown b/website/source/docs/providers/aws/r/instance.html.markdown index 227a5f3d5..5681109b5 100644 --- a/website/source/docs/providers/aws/r/instance.html.markdown +++ b/website/source/docs/providers/aws/r/instance.html.markdown @@ -159,3 +159,12 @@ The following attributes are exported: * `security_groups` - The associated security groups. * `vpc_security_group_ids` - The associated security groups in non-default VPC * `subnet_id` - The VPC subnet ID. + + +## Import + +Instances can be imported using the `id`, e.g. + +``` +$ terraform import aws_instance.web i-12345678 +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/internet_gateway.html.markdown b/website/source/docs/providers/aws/r/internet_gateway.html.markdown index cefedc6ad..7b3cf86d1 100644 --- a/website/source/docs/providers/aws/r/internet_gateway.html.markdown +++ b/website/source/docs/providers/aws/r/internet_gateway.html.markdown @@ -47,3 +47,11 @@ The following attributes are exported: * `id` - The ID of the Internet Gateway. + +## Import + +Internet Gateways can be imported using the `id`, e.g. + +``` +$ terraform import aws_internet_gateway.gw igw-c0a643a9 +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/key_pair.html.markdown b/website/source/docs/providers/aws/r/key_pair.html.markdown index df2e3200f..3bb37c948 100644 --- a/website/source/docs/providers/aws/r/key_pair.html.markdown +++ b/website/source/docs/providers/aws/r/key_pair.html.markdown @@ -40,3 +40,11 @@ The following attributes are exported: * `key_name` - The key pair name. * `fingerprint` - The MD5 public key fingerprint as specified in section 4 of RFC 4716. + +## Import + +Key Pairs can be imported using the `key_name`, e.g. + +``` +$ terraform import aws_key_pair.deployer deployer-key +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/kms_key.html.markdown b/website/source/docs/providers/aws/r/kms_key.html.markdown index 751babb71..9864a8a53 100644 --- a/website/source/docs/providers/aws/r/kms_key.html.markdown +++ b/website/source/docs/providers/aws/r/kms_key.html.markdown @@ -39,3 +39,11 @@ The following attributes are exported: * `arn` - The Amazon Resource Name (ARN) of the key. * `key_id` - The globally unique identifier for the key. + +## Import + +KMS Keys can be imported using the `id`, e.g. + +``` +$ terraform import aws_kms_key.a arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/lambda_function.html.markdown b/website/source/docs/providers/aws/r/lambda_function.html.markdown index ef95b7831..9466565da 100644 --- a/website/source/docs/providers/aws/r/lambda_function.html.markdown +++ b/website/source/docs/providers/aws/r/lambda_function.html.markdown @@ -79,3 +79,11 @@ resource "aws_lambda_function" "test_lambda" { [5]: https://docs.aws.amazon.com/lambda/latest/dg/limits.html [6]: https://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html#SSS-CreateFunction-request-Runtime [7]: http://docs.aws.amazon.com/lambda/latest/dg/vpc.html + +## Import + +Lambda Functions can be imported using the `function_name`, e.g. + +``` +$ terraform import aws_lambda_function.tesr_lambda my_test_lambda_function +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/launch_configuration.html.markdown b/website/source/docs/providers/aws/r/launch_configuration.html.markdown index e35fc51f6..940538b0b 100644 --- a/website/source/docs/providers/aws/r/launch_configuration.html.markdown +++ b/website/source/docs/providers/aws/r/launch_configuration.html.markdown @@ -214,3 +214,11 @@ The following attributes are exported: [1]: /docs/providers/aws/r/autoscaling_group.html [2]: /docs/configuration/resources.html#lifecycle [3]: /docs/providers/aws/r/spot_instance_request.html + +## Import + +Launch configurations can be imported using the `name`, e.g. + +``` +$ terraform import aws_launch_configuration.as_conf terraform-lg-123456 +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/nat_gateway.html.markdown b/website/source/docs/providers/aws/r/nat_gateway.html.markdown index 5f831c043..d87913a3e 100644 --- a/website/source/docs/providers/aws/r/nat_gateway.html.markdown +++ b/website/source/docs/providers/aws/r/nat_gateway.html.markdown @@ -49,3 +49,11 @@ The following attributes are exported: * `network_interface_id` - The ENI ID of the network interface created by the NAT gateway. * `private_ip` - The private IP address of the NAT Gateway. * `public_ip` - The public IP address of the NAT Gateway. + +## Import + +NAT Gateways can be imported using the `id`, e.g. + +``` +$ terraform import aws_nat_gateway.private_gw nat-05dba92075d71c408 +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/network_acl.html.markdown b/website/source/docs/providers/aws/r/network_acl.html.markdown index 1dc11e902..e78fa6783 100644 --- a/website/source/docs/providers/aws/r/network_acl.html.markdown +++ b/website/source/docs/providers/aws/r/network_acl.html.markdown @@ -73,3 +73,11 @@ The following attributes are exported: * `id` - The ID of the network ACL + +## Import + +Network ACLs can be imported using the `id`, e.g. + +``` +$ terraform import aws_network_acl.main acl-7aaabd18 +``` diff --git a/website/source/docs/providers/aws/r/network_interface.markdown b/website/source/docs/providers/aws/r/network_interface.markdown index a04b3b191..e52033ed3 100644 --- a/website/source/docs/providers/aws/r/network_interface.markdown +++ b/website/source/docs/providers/aws/r/network_interface.markdown @@ -53,3 +53,12 @@ The following attributes are exported: * `source_dest_check` - Whether source destination checking is enabled * `tags` - Tags assigned to the ENI. + + +## Import + +Network Interfaces can be imported using the `id`, e.g. + +``` +$ terraform import aws_network_interface.test eni-e5aa89a3 +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/placement_group.html.markdown b/website/source/docs/providers/aws/r/placement_group.html.markdown index 1e7c024fa..56cafeb7c 100644 --- a/website/source/docs/providers/aws/r/placement_group.html.markdown +++ b/website/source/docs/providers/aws/r/placement_group.html.markdown @@ -32,3 +32,11 @@ The following arguments are supported: The following attributes are exported: * `id` - The name of the placement group. + +## Import + +Placement groups can be imported using the `name`, e.g. + +``` +$ terraform import aws_placement_group.prod_pg production-placement-group +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/rds_cluster.html.markdown b/website/source/docs/providers/aws/r/rds_cluster.html.markdown index f6b8f9b40..4d628bdb5 100644 --- a/website/source/docs/providers/aws/r/rds_cluster.html.markdown +++ b/website/source/docs/providers/aws/r/rds_cluster.html.markdown @@ -109,3 +109,11 @@ The following attributes are exported: [2]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html [3]: /docs/providers/aws/r/rds_cluster_instance.html [4]: http://docs.aws.amazon.com/fr_fr/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html + +## Import + +RDS Clusters can be imported using the `cluster_identifier`, e.g. + +``` +$ terraform import aws_rds_cluster.aurora_cluster aurora-prod-cluster +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown b/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown index dfca6cdbc..7408f90d5 100644 --- a/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown +++ b/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown @@ -90,3 +90,11 @@ this instance is a read replica [4]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Aurora.Managing.html [5]: /docs/configuration/resources.html#count [6]: https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html + +## Import + +Redshift Cluster Instances can be imported using the `identifier`, e.g. + +``` +$ terraform import aws_rds_cluster_instance.prod_instance_1 aurora-cluster-instance-1 +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/rds_cluster_parameter_group.markdown b/website/source/docs/providers/aws/r/rds_cluster_parameter_group.markdown index 617ec6f35..085dd38a6 100644 --- a/website/source/docs/providers/aws/r/rds_cluster_parameter_group.markdown +++ b/website/source/docs/providers/aws/r/rds_cluster_parameter_group.markdown @@ -53,3 +53,11 @@ The following attributes are exported: * `id` - The db cluster parameter group name. * `arn` - The ARN of the db cluster parameter group. + +## Import + +Redshift Clusters can be imported using the `name`, e.g. + +``` +$ terraform import aws_rds_cluster_parameter_group.cluster_pg production-pg-1 +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/redshift_cluster.html.markdown b/website/source/docs/providers/aws/r/redshift_cluster.html.markdown index fd57c192d..daa00fdfd 100644 --- a/website/source/docs/providers/aws/r/redshift_cluster.html.markdown +++ b/website/source/docs/providers/aws/r/redshift_cluster.html.markdown @@ -82,3 +82,11 @@ The following attributes are exported: * `cluster_subnet_group_name` - The name of a cluster subnet group to be associated with this cluster * `cluster_public_key` - The public key for the cluster * `cluster_revision_number` - The specific revision number of the database in the cluster + +## Import + +Redshift Clusters can be imported using the `cluster_identifier`, e.g. + +``` +$ terraform import aws_redshift_cluster.myprodcluster tf-redshift-cluster-12345 +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/redshift_parameter_group.html.markdown b/website/source/docs/providers/aws/r/redshift_parameter_group.html.markdown index 896ae9f96..3b60b7a29 100644 --- a/website/source/docs/providers/aws/r/redshift_parameter_group.html.markdown +++ b/website/source/docs/providers/aws/r/redshift_parameter_group.html.markdown @@ -50,3 +50,11 @@ You can read more about the parameters that Redshift supports in the [documentat The following attributes are exported: * `id` - The Redshift parameter group name. + +## Import + +Redshift Parameter Groups can be imported using the `name`, e.g. + +``` +$ terraform import aws_redshift_parameter_group.paramgroup1 parameter-group-test-terraform +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/redshift_subnet_group.html.markdown b/website/source/docs/providers/aws/r/redshift_subnet_group.html.markdown index 5c73b5f0b..a25a9c387 100644 --- a/website/source/docs/providers/aws/r/redshift_subnet_group.html.markdown +++ b/website/source/docs/providers/aws/r/redshift_subnet_group.html.markdown @@ -56,3 +56,10 @@ The following attributes are exported: * `id` - The Redshift Subnet group ID. +## Import + +Redshift subnet groups can be imported using the `name`, e.g. + +``` +$ terraform import aws_redshift_subnet_group.testgroup1 test-cluster-subnet-group +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/route53_delegation_set.html.markdown b/website/source/docs/providers/aws/r/route53_delegation_set.html.markdown index cf6ddb59e..f74adb438 100644 --- a/website/source/docs/providers/aws/r/route53_delegation_set.html.markdown +++ b/website/source/docs/providers/aws/r/route53_delegation_set.html.markdown @@ -42,3 +42,13 @@ The following attributes are exported: * `id` - The delegation set ID * `name_servers` - A list of authoritative name servers for the hosted zone (effectively a list of NS records). + + + +## Import + +Route53 Delegation Sets can be imported using the `delegation set id`, e.g. + +``` +$ terraform import aws_route53_delegation_set.set1 N1PA6795SAMPLE +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/route53_health_check.html.markdown b/website/source/docs/providers/aws/r/route53_health_check.html.markdown index 357972eea..6d2c66a5c 100644 --- a/website/source/docs/providers/aws/r/route53_health_check.html.markdown +++ b/website/source/docs/providers/aws/r/route53_health_check.html.markdown @@ -56,3 +56,11 @@ The following arguments are supported: At least one of either `fqdn` or `ip_address` must be specified. + +## Import + +Route53 Health Checks can be imported using the `health check id`, e.g. + +``` +$ terraform import aws_route53_health_check.http_check abcdef11-2222-3333-4444-555555fedcba +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/route53_zone.html.markdown b/website/source/docs/providers/aws/r/route53_zone.html.markdown index 79543a46f..061946a9c 100644 --- a/website/source/docs/providers/aws/r/route53_zone.html.markdown +++ b/website/source/docs/providers/aws/r/route53_zone.html.markdown @@ -69,3 +69,12 @@ The following attributes are exported: * `zone_id` - The Hosted Zone ID. This can be referenced by zone records. * `name_servers` - A list of name servers in associated (or default) delegation set. Find more about delegation sets in [AWS docs](https://docs.aws.amazon.com/Route53/latest/APIReference/actions-on-reusable-delegation-sets.html). + + +## Import + +Route53 Zones can be imported using the `zone id`, e.g. + +``` +$ terraform import aws_route53_zone.myzone Z1D633PJN98FT9 +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/route_table.html.markdown b/website/source/docs/providers/aws/r/route_table.html.markdown index 0b9c036c1..dae3c2c19 100644 --- a/website/source/docs/providers/aws/r/route_table.html.markdown +++ b/website/source/docs/providers/aws/r/route_table.html.markdown @@ -61,3 +61,11 @@ The following attributes are exported: attribute once the route resource is created. * `id` - The ID of the routing table + +## Import + +Route Tables can be imported using the `route table id`, e.g. + +``` +$ terraform import aws_route_table.public_rt rtb-22574640 +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/security_group.html.markdown b/website/source/docs/providers/aws/r/security_group.html.markdown index 470e6e68c..15e5a9ed8 100644 --- a/website/source/docs/providers/aws/r/security_group.html.markdown +++ b/website/source/docs/providers/aws/r/security_group.html.markdown @@ -152,3 +152,12 @@ The following attributes are exported: * `description` - The description of the security group * `ingress` - The ingress rules. See above for more. * `egress` - The egress rules. See above for more. + + +## Import + +Security Groups can be imported using the `security group id`, e.g. + +``` +$ terraform import aws_security_group.elb_sg sg-903004f8 +``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/simpledb_domain.html.markdown b/website/source/docs/providers/aws/r/simpledb_domain.html.markdown index e63bae36d..d19d47981 100644 --- a/website/source/docs/providers/aws/r/simpledb_domain.html.markdown +++ b/website/source/docs/providers/aws/r/simpledb_domain.html.markdown @@ -35,5 +35,5 @@ The following attributes are exported: SimpleDB Domains can be imported using the `name`, e.g. ``` -terraform import aws_simpledb_domain.users users +$ terraform import aws_simpledb_domain.users users ``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/sns_topic.html.markdown b/website/source/docs/providers/aws/r/sns_topic.html.markdown index c0094f2bf..093c44fc1 100644 --- a/website/source/docs/providers/aws/r/sns_topic.html.markdown +++ b/website/source/docs/providers/aws/r/sns_topic.html.markdown @@ -39,5 +39,5 @@ The following attributes are exported: SNS Topics can be imported using the `topic arn`, e.g. ``` -terraform import aws_sns_topic.user_updates arn:aws:sns:us-west-2:0123456789012:my-topic +$ terraform import aws_sns_topic.user_updates arn:aws:sns:us-west-2:0123456789012:my-topic ``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/sns_topic_subscription.html.markdown b/website/source/docs/providers/aws/r/sns_topic_subscription.html.markdown index 0bc6e5b89..85ebe3148 100644 --- a/website/source/docs/providers/aws/r/sns_topic_subscription.html.markdown +++ b/website/source/docs/providers/aws/r/sns_topic_subscription.html.markdown @@ -102,5 +102,5 @@ The following attributes are exported: SNS Topic Subscriptions can be imported using the `subscription arn`, e.g. ``` -terraform import aws_sns_topic_subscription.user_updates_sqs_target arn:aws:sns:us-west-2:0123456789012:my-topic:8a21d249-4329-4871-acc6-7be709c6ea7f +$ terraform import aws_sns_topic_subscription.user_updates_sqs_target arn:aws:sns:us-west-2:0123456789012:my-topic:8a21d249-4329-4871-acc6-7be709c6ea7f ``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/sqs_queue.html.markdown b/website/source/docs/providers/aws/r/sqs_queue.html.markdown index 1140c07e1..b741fdd76 100644 --- a/website/source/docs/providers/aws/r/sqs_queue.html.markdown +++ b/website/source/docs/providers/aws/r/sqs_queue.html.markdown @@ -46,5 +46,5 @@ The following attributes are exported: SQS Queues can be imported using the `queue url`, e.g. ``` -terraform import aws_sqs_queue.public_queue https://queue.amazonaws.com/80398EXAMPLE/MyQueue +$ terraform import aws_sqs_queue.public_queue https://queue.amazonaws.com/80398EXAMPLE/MyQueue ``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/subnet.html.markdown b/website/source/docs/providers/aws/r/subnet.html.markdown index ba96ba77f..388514459 100644 --- a/website/source/docs/providers/aws/r/subnet.html.markdown +++ b/website/source/docs/providers/aws/r/subnet.html.markdown @@ -51,5 +51,5 @@ The following attributes are exported: Subnets can be imported using the `subnet id`, e.g. ``` -terraform import aws_subnet.public_subnet subnet-9d4a7b6c +$ terraform import aws_subnet.public_subnet subnet-9d4a7b6c ``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/vpc.html.markdown b/website/source/docs/providers/aws/r/vpc.html.markdown index fafcc0097..0125ef3d2 100644 --- a/website/source/docs/providers/aws/r/vpc.html.markdown +++ b/website/source/docs/providers/aws/r/vpc.html.markdown @@ -70,5 +70,5 @@ The following attributes are exported: VPNs can be imported using the `vpn id`, e.g. ``` -terraform import aws_vpn.test_vpn vpc-a01106c2 +$ terraform import aws_vpn.test_vpn vpc-a01106c2 ``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/vpc_dhcp_options.html.markdown b/website/source/docs/providers/aws/r/vpc_dhcp_options.html.markdown index 5e030c509..e30ae0b67 100644 --- a/website/source/docs/providers/aws/r/vpc_dhcp_options.html.markdown +++ b/website/source/docs/providers/aws/r/vpc_dhcp_options.html.markdown @@ -68,5 +68,5 @@ official [AWS User Guide](https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide VPC DHCP Options can be imported using the `dhcp options id`, e.g. ``` -terraform import aws_vpc_dhcp_options.my_options dopt-d9070ebb +$ terraform import aws_vpc_dhcp_options.my_options dopt-d9070ebb ``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/vpc_endpoint.html.markdown b/website/source/docs/providers/aws/r/vpc_endpoint.html.markdown index 6bdf31ad8..d7531a356 100644 --- a/website/source/docs/providers/aws/r/vpc_endpoint.html.markdown +++ b/website/source/docs/providers/aws/r/vpc_endpoint.html.markdown @@ -43,5 +43,5 @@ The following attributes are exported: VPN Endpoints can be imported using the `vpc endpoint id`, e.g. ``` -terraform import aws_vpc_endpoint.endpoint1 vpce-3ecf2a57 +$ terraform import aws_vpc_endpoint.endpoint1 vpce-3ecf2a57 ``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/vpc_peering.html.markdown b/website/source/docs/providers/aws/r/vpc_peering.html.markdown index c20b75bcb..c083591f4 100644 --- a/website/source/docs/providers/aws/r/vpc_peering.html.markdown +++ b/website/source/docs/providers/aws/r/vpc_peering.html.markdown @@ -72,5 +72,5 @@ If you are not the owner of both VPCs, or do not enable auto_accept you will sti VPC Peering resources can be imported using the `vpc peering id`, e.g. ``` -terraform import aws_vpc_peering_connection.test_connection pcx-111aaa111 +$ terraform import aws_vpc_peering_connection.test_connection pcx-111aaa111 ``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/vpn_connection.html.markdown b/website/source/docs/providers/aws/r/vpn_connection.html.markdown index 4d1dbdf21..878a7346f 100644 --- a/website/source/docs/providers/aws/r/vpn_connection.html.markdown +++ b/website/source/docs/providers/aws/r/vpn_connection.html.markdown @@ -68,5 +68,5 @@ The following attributes are exported: VPN Connections can be imported using the `vpn connection id`, e.g. ``` -terraform import aws_vpn_connection.testvpnconnection vpn-40f41529 +$ terraform import aws_vpn_connection.testvpnconnection vpn-40f41529 ``` \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/vpn_gateway.html.markdown b/website/source/docs/providers/aws/r/vpn_gateway.html.markdown index 956cfc3c2..3d478e880 100644 --- a/website/source/docs/providers/aws/r/vpn_gateway.html.markdown +++ b/website/source/docs/providers/aws/r/vpn_gateway.html.markdown @@ -42,5 +42,5 @@ The following attributes are exported: VPN Gateways can be imported using the `vpn gateway id`, e.g. ``` -terraform import aws_vpn_gateway.testvpngateway vgw-9a4cacf3 +$ terraform import aws_vpn_gateway.testvpngateway vgw-9a4cacf3 ``` \ No newline at end of file From f7fa4610cd563ac5bcce85b70349648a7ac0faa1 Mon Sep 17 00:00:00 2001 From: Golo Roden Date: Thu, 21 Jul 2016 00:33:05 +0200 Subject: [PATCH 0351/1238] Add note that provider uses API v1. (#7727) --- website/source/docs/providers/dnsimple/index.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/providers/dnsimple/index.html.markdown b/website/source/docs/providers/dnsimple/index.html.markdown index 23828c6d1..2dcca6095 100644 --- a/website/source/docs/providers/dnsimple/index.html.markdown +++ b/website/source/docs/providers/dnsimple/index.html.markdown @@ -33,7 +33,7 @@ resource "dnsimple_record" "www" { The following arguments are supported: -* `token` - (Required) The DNSimple API token. It must be provided, but it can also be sourced from the `DNSIMPLE_TOKEN` environment variable. +* `token` - (Required) The DNSimple API token. It must be provided, but it can also be sourced from the `DNSIMPLE_TOKEN` environment variable. Please note that this must be an API v1 token. * `email` - (Required) The email associated with the token. It must be provided, but it can also be sourced from the `DNSIMPLE_EMAIL` environment variable. From 40782219573726a6a5a9f8bff3be54e47bdaf45d Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 20 Jul 2016 17:49:57 -0500 Subject: [PATCH 0352/1238] provider/aws: Clean up aws config path a bit (#7672) Rearrange client setup, and remove the extraneous log lines we make per connection. There's no need to log one line per API client - we're just setting up structs for most of them. Since this collapses the file down quite a bit, switch to alphabetized client setup, since previously there wasn't much of an order to things. --- builtin/providers/aws/config.go | 162 +++++++++----------------------- 1 file changed, 43 insertions(+), 119 deletions(-) diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go index ab50b8ae2..823e9ecd5 100644 --- a/builtin/providers/aws/config.go +++ b/builtin/providers/aws/config.go @@ -179,19 +179,6 @@ func (c *Config) Client() (interface{}, error) { sess := session.New(awsConfig) sess.Handlers.Build.PushFrontNamed(addTerraformVersionToUserAgent) - log.Println("[INFO] Initializing IAM Connection") - awsIamSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.IamEndpoint)}) - client.iamconn = iam.New(awsIamSess) - - log.Println("[INFO] Initializing STS connection") - client.stsconn = sts.New(sess) - - err = c.ValidateCredentials(client.stsconn) - if err != nil { - errs = append(errs, err) - return nil, &multierror.Error{Errors: errs} - } - // Some services exist only in us-east-1, e.g. because they manage // resources that can span across multiple regions, or because // signature format v4 requires region to be us-east-1 for global @@ -199,128 +186,65 @@ func (c *Config) Client() (interface{}, error) { // http://docs.aws.amazon.com/general/latest/gr/sigv4_changes.html usEast1Sess := sess.Copy(&aws.Config{Region: aws.String("us-east-1")}) + // Some services have user-configurable endpoints + awsEc2Sess := sess.Copy(&aws.Config{Endpoint: aws.String(c.Ec2Endpoint)}) + awsElbSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.ElbEndpoint)}) + awsIamSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.IamEndpoint)}) + dynamoSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.DynamoDBEndpoint)}) + kinesisSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.KinesisEndpoint)}) + + client.stsconn = sts.New(sess) + err = c.ValidateCredentials(client.stsconn) + if err != nil { + errs = append(errs, err) + return nil, &multierror.Error{Errors: errs} + } accountId, err := GetAccountId(client.iamconn, client.stsconn, cp.ProviderName) if err == nil { client.accountid = accountId } - log.Println("[INFO] Initializing DynamoDB connection") - dynamoSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.DynamoDBEndpoint)}) - client.dynamodbconn = dynamodb.New(dynamoSess) - - log.Println("[INFO] Initializing Cloudfront connection") - client.cloudfrontconn = cloudfront.New(sess) - - log.Println("[INFO] Initializing ELB connection") - awsElbSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.ElbEndpoint)}) - client.elbconn = elb.New(awsElbSess) - - log.Println("[INFO] Initializing S3 connection") - client.s3conn = s3.New(sess) - - log.Println("[INFO] Initializing SES connection") - client.sesConn = ses.New(sess) - - log.Println("[INFO] Initializing SimpleDB connection") - client.simpledbconn = simpledb.New(sess) - - log.Println("[INFO] Initializing SQS connection") - client.sqsconn = sqs.New(sess) - - log.Println("[INFO] Initializing SNS connection") - client.snsconn = sns.New(sess) - - log.Println("[INFO] Initializing RDS Connection") - client.rdsconn = rds.New(sess) - - log.Println("[INFO] Initializing Kinesis Connection") - kinesisSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.KinesisEndpoint)}) - client.kinesisconn = kinesis.New(kinesisSess) - - log.Println("[INFO] Initializing Elastic Beanstalk Connection") - client.elasticbeanstalkconn = elasticbeanstalk.New(sess) - - log.Println("[INFO] Initializing Elastic Transcoder Connection") - client.elastictranscoderconn = elastictranscoder.New(sess) - authErr := c.ValidateAccountId(client.accountid) if authErr != nil { errs = append(errs, authErr) } - log.Println("[INFO] Initializing Kinesis Firehose Connection") - client.firehoseconn = firehose.New(sess) - - log.Println("[INFO] Initializing AutoScaling connection") - client.autoscalingconn = autoscaling.New(sess) - - log.Println("[INFO] Initializing EC2 Connection") - - awsEc2Sess := sess.Copy(&aws.Config{Endpoint: aws.String(c.Ec2Endpoint)}) - client.ec2conn = ec2.New(awsEc2Sess) - - log.Println("[INFO] Initializing ECR Connection") - client.ecrconn = ecr.New(sess) - - log.Println("[INFO] Initializing API Gateway") client.apigateway = apigateway.New(sess) - - log.Println("[INFO] Initializing ECS Connection") - client.ecsconn = ecs.New(sess) - - log.Println("[INFO] Initializing EFS Connection") - client.efsconn = efs.New(sess) - - log.Println("[INFO] Initializing ElasticSearch Connection") - client.esconn = elasticsearch.New(sess) - - log.Println("[INFO] Initializing EMR Connection") - client.emrconn = emr.New(sess) - - log.Println("[INFO] Initializing Route 53 connection") - client.r53conn = route53.New(usEast1Sess) - - log.Println("[INFO] Initializing Elasticache Connection") - client.elasticacheconn = elasticache.New(sess) - - log.Println("[INFO] Initializing Lambda Connection") - client.lambdaconn = lambda.New(sess) - - log.Println("[INFO] Initializing Cloudformation Connection") + client.autoscalingconn = autoscaling.New(sess) client.cfconn = cloudformation.New(sess) - - log.Println("[INFO] Initializing CloudWatch SDK connection") - client.cloudwatchconn = cloudwatch.New(sess) - - log.Println("[INFO] Initializing CloudWatch Events connection") - client.cloudwatcheventsconn = cloudwatchevents.New(sess) - - log.Println("[INFO] Initializing CloudTrail connection") + client.cloudfrontconn = cloudfront.New(sess) client.cloudtrailconn = cloudtrail.New(sess) - - log.Println("[INFO] Initializing CloudWatch Logs connection") + client.cloudwatchconn = cloudwatch.New(sess) + client.cloudwatcheventsconn = cloudwatchevents.New(sess) client.cloudwatchlogsconn = cloudwatchlogs.New(sess) - - log.Println("[INFO] Initializing OpsWorks Connection") - client.opsworksconn = opsworks.New(usEast1Sess) - - log.Println("[INFO] Initializing Directory Service connection") - client.dsconn = directoryservice.New(sess) - - log.Println("[INFO] Initializing Glacier connection") - client.glacierconn = glacier.New(sess) - - log.Println("[INFO] Initializing CodeDeploy Connection") - client.codedeployconn = codedeploy.New(sess) - - log.Println("[INFO] Initializing CodeCommit SDK connection") client.codecommitconn = codecommit.New(usEast1Sess) - - log.Println("[INFO] Initializing Redshift SDK connection") - client.redshiftconn = redshift.New(sess) - - log.Println("[INFO] Initializing KMS connection") + client.codedeployconn = codedeploy.New(sess) + client.dsconn = directoryservice.New(sess) + client.dynamodbconn = dynamodb.New(dynamoSess) + client.ec2conn = ec2.New(awsEc2Sess) + client.ecrconn = ecr.New(sess) + client.ecsconn = ecs.New(sess) + client.efsconn = efs.New(sess) + client.elasticacheconn = elasticache.New(sess) + client.elasticbeanstalkconn = elasticbeanstalk.New(sess) + client.elastictranscoderconn = elastictranscoder.New(sess) + client.elbconn = elb.New(awsElbSess) + client.emrconn = emr.New(sess) + client.esconn = elasticsearch.New(sess) + client.firehoseconn = firehose.New(sess) + client.glacierconn = glacier.New(sess) + client.iamconn = iam.New(awsIamSess) + client.kinesisconn = kinesis.New(kinesisSess) client.kmsconn = kms.New(sess) + client.lambdaconn = lambda.New(sess) + client.opsworksconn = opsworks.New(usEast1Sess) + client.r53conn = route53.New(usEast1Sess) + client.rdsconn = rds.New(sess) + client.redshiftconn = redshift.New(sess) + client.s3conn = s3.New(sess) + client.sesConn = ses.New(sess) + client.snsconn = sns.New(sess) + client.sqsconn = sqs.New(sess) } if len(errs) > 0 { From a2c5b31490eac3cb72621298f1e3ee48d0312ce1 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 20 Jul 2016 23:52:34 +0100 Subject: [PATCH 0353/1238] provider/aws: Support kms_key_id for `aws_rds_cluster` (#7662) * provider/aws: Support kms_key_id for `aws_rds_cluster` ``` % make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSRDSCluster_' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSRDSCluster_ -timeout 120m === RUN TestAccAWSRDSCluster_basic --- PASS: TestAccAWSRDSCluster_basic (127.57s) === RUN TestAccAWSRDSCluster_kmsKey --- PASS: TestAccAWSRDSCluster_kmsKey (323.72s) === RUN TestAccAWSRDSCluster_encrypted --- PASS: TestAccAWSRDSCluster_encrypted (173.25s) === RUN TestAccAWSRDSCluster_backupsUpdate --- PASS: TestAccAWSRDSCluster_backupsUpdate (264.07s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 888.638s ``` * provider/aws: Add KMS Key ID to `aws_rds_cluster_instance` ``` ``` --- .../providers/aws/resource_aws_rds_cluster.go | 12 +++ .../aws/resource_aws_rds_cluster_instance.go | 14 ++++ .../resource_aws_rds_cluster_instance_test.go | 82 +++++++++++++++++++ .../aws/resource_aws_rds_cluster_test.go | 58 +++++++++++++ .../providers/aws/r/rds_cluster.html.markdown | 1 + .../aws/r/rds_cluster_instance.html.markdown | 2 + 6 files changed, 169 insertions(+) diff --git a/builtin/providers/aws/resource_aws_rds_cluster.go b/builtin/providers/aws/resource_aws_rds_cluster.go index 60981c1ae..348018000 100644 --- a/builtin/providers/aws/resource_aws_rds_cluster.go +++ b/builtin/providers/aws/resource_aws_rds_cluster.go @@ -196,6 +196,13 @@ func resourceAwsRDSCluster() *schema.Resource { }, }, + "kms_key_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + "tags": tagsSchema(), }, } @@ -341,6 +348,10 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error createOpts.PreferredMaintenanceWindow = aws.String(v.(string)) } + if attr, ok := d.GetOk("kms_key_id"); ok { + createOpts.KmsKeyId = aws.String(attr.(string)) + } + log.Printf("[DEBUG] RDS Cluster create options: %s", createOpts) resp, err := conn.CreateDBCluster(createOpts) if err != nil { @@ -431,6 +442,7 @@ func resourceAwsRDSClusterRead(d *schema.ResourceData, meta interface{}) error { d.Set("backup_retention_period", dbc.BackupRetentionPeriod) d.Set("preferred_backup_window", dbc.PreferredBackupWindow) d.Set("preferred_maintenance_window", dbc.PreferredMaintenanceWindow) + d.Set("kms_key_id", dbc.KmsKeyId) var vpcg []string for _, g := range dbc.VpcSecurityGroups { diff --git a/builtin/providers/aws/resource_aws_rds_cluster_instance.go b/builtin/providers/aws/resource_aws_rds_cluster_instance.go index 5289e4782..795665685 100644 --- a/builtin/providers/aws/resource_aws_rds_cluster_instance.go +++ b/builtin/providers/aws/resource_aws_rds_cluster_instance.go @@ -83,6 +83,20 @@ func resourceAwsRDSClusterInstance() *schema.Resource { Computed: true, }, + "kms_key_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + + "storage_encrypted": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: true, + }, + "tags": tagsSchema(), }, } diff --git a/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go b/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go index da4e1f9fe..81e79f488 100644 --- a/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go +++ b/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go @@ -2,6 +2,7 @@ package aws import ( "fmt" + "regexp" "strings" "testing" "time" @@ -34,6 +35,27 @@ func TestAccAWSRDSClusterInstance_basic(t *testing.T) { }) } +func TestAccAWSRDSClusterInstance_kmsKey(t *testing.T) { + var v rds.DBInstance + keyRegex := regexp.MustCompile("^arn:aws:kms:") + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSClusterInstanceConfigKmsKey(acctest.RandInt()), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSClusterInstanceExists("aws_rds_cluster_instance.cluster_instances", &v), + resource.TestMatchResourceAttr( + "aws_rds_cluster_instance.cluster_instances", "kms_key_id", keyRegex), + ), + }, + }, + }) +} + // https://github.com/hashicorp/terraform/issues/5350 func TestAccAWSRDSClusterInstance_disappears(t *testing.T) { var v rds.DBInstance @@ -199,3 +221,63 @@ resource "aws_db_parameter_group" "bar" { } `, n, n, n) } + +func testAccAWSClusterInstanceConfigKmsKey(n int) string { + return fmt.Sprintf(` + +resource "aws_kms_key" "foo" { + description = "Terraform acc test %d" + policy = < Date: Wed, 20 Jul 2016 23:53:26 +0100 Subject: [PATCH 0354/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2d943e9c7..e26ce1abe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -114,6 +114,7 @@ IMPROVEMENTS: * provider/aws: Allow VPC Classic Linking in Autoscaling Launch Configs [GH-7470] * provider/aws: Support `task_role_arn` on `aws_ecs_task_definition [GH-7653] * provider/aws: Support Tags on `aws_rds_cluster` [GH-7695] + * provider/aws: Support kms_key_id for `aws_rds_cluster` [GH-7662] * provider/azurerm: Add support for EnableIPForwarding to `azurerm_network_interface` [GH-6807] * provider/azurerm: Add support for exporting the `azurerm_storage_account` access keys [GH-6742] * provider/azurerm: The Azure SDK now exposes better error messages [GH-6976] From 50959a654c9ba372b910d81662490210c97b8b4b Mon Sep 17 00:00:00 2001 From: David Glasser Date: Wed, 20 Jul 2016 15:55:05 -0700 Subject: [PATCH 0355/1238] command: Remove second DefaultDataDirectory const (#7666) --- command/command.go | 4 ---- command/init.go | 2 +- command/meta.go | 2 +- 3 files changed, 2 insertions(+), 6 deletions(-) diff --git a/command/command.go b/command/command.go index 80e82e78c..d9ad0cf30 100644 --- a/command/command.go +++ b/command/command.go @@ -22,10 +22,6 @@ const DefaultVarsFilename = "terraform.tfvars" // DefaultBackupExtension is added to the state file to form the path const DefaultBackupExtension = ".backup" -// DefaultDataDirectory is the directory where local state is stored -// by default. -const DefaultDataDirectory = ".terraform" - // DefaultParallelism is the limit Terraform places on total parallel // operations as it walks the dependency graph. const DefaultParallelism = 10 diff --git a/command/init.go b/command/init.go index 627fcb229..bcc339bee 100644 --- a/command/init.go +++ b/command/init.go @@ -58,7 +58,7 @@ func (c *InitCommand) Run(args []string) int { // Set the state out path to be the path requested for the module // to be copied. This ensures any remote states gets setup in the // proper directory. - c.Meta.dataDir = filepath.Join(path, DefaultDataDirectory) + c.Meta.dataDir = filepath.Join(path, DefaultDataDir) source := args[0] diff --git a/command/meta.go b/command/meta.go index 3b0896352..69f3e3e5c 100644 --- a/command/meta.go +++ b/command/meta.go @@ -174,7 +174,7 @@ func (m *Meta) Context(copts contextOpts) (*terraform.Context, bool, error) { // DataDir returns the directory where local data will be stored. func (m *Meta) DataDir() string { - dataDir := DefaultDataDirectory + dataDir := DefaultDataDir if m.dataDir != "" { dataDir = m.dataDir } From 28d10d6eb5770c068c7ea1c9c000e01c755270ce Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 20 Jul 2016 23:55:44 +0100 Subject: [PATCH 0356/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e26ce1abe..1092f3989 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -81,6 +81,7 @@ IMPROVEMENTS: * core: The `jsonencode` interpolation function now supports encoding lists and maps [GH-6749] * core: Add the ability for resource definitions to mark attributes as "sensitive" which will omit them from UI output. [GH-6923] * core: Support `.` in map keys [GH-7654] + * command: Remove second DefaultDataDirectory const [GH-7666] * provider/aws: Add `dns_name` to `aws_efs_mount_target` [GH-7428] * provider/aws: Add `option_settings` to `aws_db_option_group` [GH-6560] * provider/aws: Add more explicit support for Skipping Final Snapshot in RDS Cluster [GH-6795] From ecb4b5aada4a1df5553a79a2efb28109c9126b7f Mon Sep 17 00:00:00 2001 From: Jan Schumann Date: Thu, 21 Jul 2016 01:29:33 +0200 Subject: [PATCH 0357/1238] providers/aws: Opsworks permission resource (#6304) * add opsworks permission resource * add docs * remove permission from state if the permission object could not be found * remove nil validate function. validation is done in schema.Resource. * add id to the list of exported values * renge over permission to check that we have found got the correct one * removed comment * removed set id * fix unknown region us-east-1c * add user_profile resource * add docs * add default value --- builtin/providers/aws/provider.go | 2 + .../aws/resource_aws_opsworks_permission.go | 155 ++++++++++++++++++ .../resource_aws_opsworks_permission_test.go | 41 +++++ .../aws/resource_aws_opsworks_stack_test.go | 6 +- .../aws/resource_aws_opsworks_user_profile.go | 136 +++++++++++++++ ...resource_aws_opsworks_user_profile_test.go | 37 +++++ .../aws/r/opsworks_permission.html.markdown | 39 +++++ .../aws/r/opsworks_user_profile.html.markdown | 35 ++++ 8 files changed, 448 insertions(+), 3 deletions(-) create mode 100644 builtin/providers/aws/resource_aws_opsworks_permission.go create mode 100644 builtin/providers/aws/resource_aws_opsworks_permission_test.go create mode 100644 builtin/providers/aws/resource_aws_opsworks_user_profile.go create mode 100644 builtin/providers/aws/resource_aws_opsworks_user_profile_test.go create mode 100644 website/source/docs/providers/aws/r/opsworks_permission.html.markdown create mode 100644 website/source/docs/providers/aws/r/opsworks_user_profile.html.markdown diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index 6ae9d5320..cfe8033e0 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -234,6 +234,8 @@ func Provider() terraform.ResourceProvider { "aws_opsworks_ganglia_layer": resourceAwsOpsworksGangliaLayer(), "aws_opsworks_custom_layer": resourceAwsOpsworksCustomLayer(), "aws_opsworks_instance": resourceAwsOpsworksInstance(), + "aws_opsworks_user_profile": resourceAwsOpsworksUserProfile(), + "aws_opsworks_permission": resourceAwsOpsworksPermission(), "aws_placement_group": resourceAwsPlacementGroup(), "aws_proxy_protocol_policy": resourceAwsProxyProtocolPolicy(), "aws_rds_cluster": resourceAwsRDSCluster(), diff --git a/builtin/providers/aws/resource_aws_opsworks_permission.go b/builtin/providers/aws/resource_aws_opsworks_permission.go new file mode 100644 index 000000000..7493a4c20 --- /dev/null +++ b/builtin/providers/aws/resource_aws_opsworks_permission.go @@ -0,0 +1,155 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/opsworks" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsOpsworksPermission() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsOpsworksPermissionCreate, + Update: resourceAwsOpsworksPermissionCreate, + Delete: resourceAwsOpsworksPermissionDelete, + Read: resourceAwsOpsworksPermissionRead, + + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "allow_ssh": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Optional: true, + }, + "allow_sudo": &schema.Schema{ + Type: schema.TypeBool, + Computed: true, + Optional: true, + }, + "user_arn": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + // one of deny, show, deploy, manage, iam_only + "level": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + expected := [5]string{"deny", "show", "deploy", "manage", "iam_only"} + + found := false + for _, b := range expected { + if b == value { + found = true + } + } + if !found { + errors = append(errors, fmt.Errorf( + "%q has to be one of [deny, show, deploy, manage, iam_only]", k)) + } + return + }, + }, + "stack_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Optional: true, + }, + }, + } +} + +func resourceAwsOpsworksPermissionDelete(d *schema.ResourceData, meta interface{}) error { + return nil +} + +func resourceAwsOpsworksPermissionRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).opsworksconn + + req := &opsworks.DescribePermissionsInput{ + IamUserArn: aws.String(d.Get("user_arn").(string)), + StackId: aws.String(d.Get("stack_id").(string)), + } + + log.Printf("[DEBUG] Reading OpsWorks prermissions for: %s on stack: %s", d.Get("user_arn"), d.Get("stack_id")) + + resp, err := client.DescribePermissions(req) + if err != nil { + if awserr, ok := err.(awserr.Error); ok { + if awserr.Code() == "ResourceNotFoundException" { + log.Printf("[INFO] Permission not found") + d.SetId("") + return nil + } + } + return err + } + + found := false + id := "" + for _, permission := range resp.Permissions { + id = *permission.IamUserArn + *permission.StackId + + if d.Get("user_arn").(string)+d.Get("stack_id").(string) == id { + found = true + d.SetId(id) + d.Set("id", id) + d.Set("allow_ssh", permission.AllowSudo) + d.Set("allow_sodo", permission.AllowSudo) + d.Set("user_arn", permission.IamUserArn) + d.Set("stack_id", permission.StackId) + } + + } + + if false == found { + d.SetId("") + log.Printf("[INFO] The correct permission could not be found for: %s on stack: %s", d.Get("user_arn"), d.Get("stack_id")) + } + + return nil +} + +func resourceAwsOpsworksPermissionCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).opsworksconn + + req := &opsworks.SetPermissionInput{ + AllowSudo: aws.Bool(d.Get("allow_sudo").(bool)), + AllowSsh: aws.Bool(d.Get("allow_ssh").(bool)), + IamUserArn: aws.String(d.Get("user_arn").(string)), + StackId: aws.String(d.Get("stack_id").(string)), + } + + var resp *opsworks.SetPermissionOutput + err := resource.Retry(2*time.Minute, func() *resource.RetryError { + var cerr error + resp, cerr = client.SetPermission(req) + if cerr != nil { + log.Printf("[INFO] client error") + if opserr, ok := cerr.(awserr.Error); ok { + // XXX: handle errors + log.Printf("[ERROR] OpsWorks error: %s message: %s", opserr.Code(), opserr.Message()) + return resource.RetryableError(cerr) + } + return resource.NonRetryableError(cerr) + } + return nil + }) + + if err != nil { + return err + } + + return resourceAwsOpsworksPermissionRead(d, meta) +} diff --git a/builtin/providers/aws/resource_aws_opsworks_permission_test.go b/builtin/providers/aws/resource_aws_opsworks_permission_test.go new file mode 100644 index 000000000..7c17bfc57 --- /dev/null +++ b/builtin/providers/aws/resource_aws_opsworks_permission_test.go @@ -0,0 +1,41 @@ +package aws + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAWSOpsworksPermission(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAwsOpsworksPermissionCreate, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "aws_opsworks_permission.tf-acc-perm", "allow_ssh", "true", + ), + resource.TestCheckResourceAttr( + "aws_opsworks_permission.tf-acc-perm", "allow_sudo", "true", + ), + resource.TestCheckResourceAttr( + "aws_opsworks_permission.tf-acc-perm", "level", "iam_only", + ), + ), + }, + }, + }) +} + +var testAccAwsOpsworksPermissionCreate = testAccAwsOpsworksUserProfileCreate + ` +resource "aws_opsworks_permission" "tf-acc-perm" { + stack_id = "${aws_opsworks_stack.tf-acc.id}" + + allow_ssh = true + allow_sudo = true + user_arn = "${aws_opsworks_user_profile.user.user_arn}" + level = "iam_only" +} +` diff --git a/builtin/providers/aws/resource_aws_opsworks_stack_test.go b/builtin/providers/aws/resource_aws_opsworks_stack_test.go index 0a23273df..1bb9bba14 100644 --- a/builtin/providers/aws/resource_aws_opsworks_stack_test.go +++ b/builtin/providers/aws/resource_aws_opsworks_stack_test.go @@ -26,7 +26,7 @@ func TestAccAWSOpsworksStackNoVpc(t *testing.T) { Steps: []resource.TestStep{ resource.TestStep{ Config: testAccAwsOpsworksStackConfigNoVpcCreate(stackName), - Check: testAccAwsOpsworksStackCheckResourceAttrsCreate("us-east-1c", stackName), + Check: testAccAwsOpsworksStackCheckResourceAttrsCreate("us-east-1a", stackName), }, // resource.TestStep{ // Config: testAccAWSOpsworksStackConfigNoVpcUpdate(stackName), @@ -236,7 +236,7 @@ resource "aws_opsworks_stack" "tf-acc" { region = "us-east-1" service_role_arn = "${aws_iam_role.opsworks_service.arn}" default_instance_profile_arn = "${aws_iam_instance_profile.opsworks_instance.arn}" - default_availability_zone = "us-east-1c" + default_availability_zone = "us-east-1a" default_os = "Amazon Linux 2014.09" default_root_device_type = "ebs" custom_json = "{\"key\": \"value\"}" @@ -317,7 +317,7 @@ resource "aws_opsworks_stack" "tf-acc" { region = "us-east-1" service_role_arn = "${aws_iam_role.opsworks_service.arn}" default_instance_profile_arn = "${aws_iam_instance_profile.opsworks_instance.arn}" - default_availability_zone = "us-east-1c" + default_availability_zone = "us-east-1a" default_os = "Amazon Linux 2014.09" default_root_device_type = "ebs" custom_json = "{\"key\": \"value\"}" diff --git a/builtin/providers/aws/resource_aws_opsworks_user_profile.go b/builtin/providers/aws/resource_aws_opsworks_user_profile.go new file mode 100644 index 000000000..fc4a8e246 --- /dev/null +++ b/builtin/providers/aws/resource_aws_opsworks_user_profile.go @@ -0,0 +1,136 @@ +package aws + +import ( + "log" + + "github.com/hashicorp/terraform/helper/schema" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/opsworks" +) + +func resourceAwsOpsworksUserProfile() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsOpsworksUserProfileCreate, + Read: resourceAwsOpsworksUserProfileRead, + Update: resourceAwsOpsworksUserProfileUpdate, + Delete: resourceAwsOpsworksUserProfileDelete, + + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "user_arn": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "allow_self_management": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "ssh_username": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "ssh_public_key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + } +} + +func resourceAwsOpsworksUserProfileRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).opsworksconn + + req := &opsworks.DescribeUserProfilesInput{ + IamUserArns: []*string{ + aws.String(d.Id()), + }, + } + + log.Printf("[DEBUG] Reading OpsWorks user profile: %s", d.Id()) + + resp, err := client.DescribeUserProfiles(req) + if err != nil { + if awserr, ok := err.(awserr.Error); ok { + if awserr.Code() == "ResourceNotFoundException" { + log.Printf("[DEBUG] OpsWorks user profile (%s) not found", d.Id()) + d.SetId("") + return nil + } + } + return err + } + + for _, profile := range resp.UserProfiles { + d.Set("allow_self_management", profile.AllowSelfManagement) + d.Set("user_arn", profile.IamUserArn) + d.Set("ssh_public_key", profile.SshPublicKey) + d.Set("ssh_username", profile.SshUsername) + break + } + + return nil +} + +func resourceAwsOpsworksUserProfileCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).opsworksconn + + req := &opsworks.CreateUserProfileInput{ + AllowSelfManagement: aws.Bool(d.Get("allow_self_management").(bool)), + IamUserArn: aws.String(d.Get("user_arn").(string)), + SshPublicKey: aws.String(d.Get("ssh_public_key").(string)), + SshUsername: aws.String(d.Get("ssh_username").(string)), + } + + resp, err := client.CreateUserProfile(req) + if err != nil { + return err + } + + d.SetId(*resp.IamUserArn) + + return resourceAwsOpsworksUserProfileUpdate(d, meta) +} + +func resourceAwsOpsworksUserProfileUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).opsworksconn + + req := &opsworks.UpdateUserProfileInput{ + AllowSelfManagement: aws.Bool(d.Get("allow_self_management").(bool)), + IamUserArn: aws.String(d.Get("user_arn").(string)), + SshPublicKey: aws.String(d.Get("ssh_public_key").(string)), + SshUsername: aws.String(d.Get("ssh_username").(string)), + } + + log.Printf("[DEBUG] Updating OpsWorks user profile: %s", req) + + _, err := client.UpdateUserProfile(req) + if err != nil { + return err + } + + return resourceAwsOpsworksUserProfileRead(d, meta) +} + +func resourceAwsOpsworksUserProfileDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*AWSClient).opsworksconn + + req := &opsworks.DeleteUserProfileInput{ + IamUserArn: aws.String(d.Id()), + } + + log.Printf("[DEBUG] Deleting OpsWorks user profile: %s", d.Id()) + + _, err := client.DeleteUserProfile(req) + + return err +} diff --git a/builtin/providers/aws/resource_aws_opsworks_user_profile_test.go b/builtin/providers/aws/resource_aws_opsworks_user_profile_test.go new file mode 100644 index 000000000..68c4f30ea --- /dev/null +++ b/builtin/providers/aws/resource_aws_opsworks_user_profile_test.go @@ -0,0 +1,37 @@ +package aws + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAWSOpsworksUserProfile(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAwsOpsworksUserProfileCreate, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "aws_opsworks_user_profile.user", "ssh_public_key", "", + ), + resource.TestCheckResourceAttr( + "aws_opsworks_user_profile.user", "ssh_username", "test-user", + ), + resource.TestCheckResourceAttr( + "aws_opsworks_user_profile.user", "allow_self_management", "false", + ), + ), + }, + }, + }) +} + +var testAccAwsOpsworksUserProfileCreate = testAccAWSUserConfig + testAccAwsOpsworksStackConfigNoVpcCreate("tf-ops-acc-user-profile") + ` +resource "aws_opsworks_user_profile" "user" { + user_arn = "${aws_iam_user.user.arn}" + ssh_username = "${aws_iam_user.user.name}" +} +` diff --git a/website/source/docs/providers/aws/r/opsworks_permission.html.markdown b/website/source/docs/providers/aws/r/opsworks_permission.html.markdown new file mode 100644 index 000000000..3739a35be --- /dev/null +++ b/website/source/docs/providers/aws/r/opsworks_permission.html.markdown @@ -0,0 +1,39 @@ +--- +layout: "aws" +page_title: "AWS: aws_opsworks_permission" +sidebar_current: "docs-aws-resource-opsworks-permission" +description: |- + Provides an OpsWorks permission resource. +------------------------------------------- + +# aws\_opsworks\_permission + +Provides an OpsWorks permission resource. + +## Example Usage + +``` +resource "aws_opsworks_permission" "my_stack_permission" { + allow_ssh = true + allow_sudo = true + level = "iam_only" + user_arn = "${aws_iam_user.user.arn}" + stack_id = "${aws_opsworks_stack.stack.id}" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `allow_ssh` - (Optional) Whethe the user is allowed to use SSH to communicate with the instance +* `allow_sudo` - (Optional) Whethe the user is allowed to use sudo to elevate privileges +* `user_arn` - (Required) The user's IAM ARN to set permissions for +* `level` - (Optional) The users permission level. Mus be one of `deny`, `show`, `deploy`, `manage`, `iam_only` +* `stack_id` - (Required) The stack to set the permissions for + +## Attributes Reference + +The following attributes are exported: + +* `id` - The computed id of the permission. Please note that this is only used internally to identify the permission. This value is not used in aws. \ No newline at end of file diff --git a/website/source/docs/providers/aws/r/opsworks_user_profile.html.markdown b/website/source/docs/providers/aws/r/opsworks_user_profile.html.markdown new file mode 100644 index 000000000..781fae028 --- /dev/null +++ b/website/source/docs/providers/aws/r/opsworks_user_profile.html.markdown @@ -0,0 +1,35 @@ +--- +layout: "aws" +page_title: "AWS: aws_opsworks_user_profile_" +sidebar_current: "docs-aws-resource-opsworks-user-profile" +description: |- + Provides an OpsWorks User Profile resource. +--------------------------------------------- + +# aws\_opsworks\_user\_profile + +Provides an OpsWorks User Profile resource. + +## Example Usage + +``` +resource "aws_opsworks_user_profile" "my_profile" { + user_arn = "${aws_iam_user.user.arn}" + ssh_username = "my_user" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `user_arn` - (Required) The user's IAM ARN +* `allow_self_management` - (Optional) Whether users can specify their own SSH public key through the My Settings page +* `ssh_username` - (Required) The ssh username, with witch this user wants to log in +* `ssh_public_key` - (Optional) The users public key + +## Attributes Reference + +The following attributes are exported: + +* `id` - Same value as `user_arn` From 7ba439dbf61c2016cc2b0cb22eddcdebbf062c14 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Thu, 21 Jul 2016 00:30:43 +0100 Subject: [PATCH 0358/1238] Update CHANGELOG.md --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1092f3989..50d25d909 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -60,6 +60,8 @@ FEATURES: * **New Resource:** `aws_ses_receipt_rule` [GH-5387] * **New Resource:** `aws_ses_receipt_rule_set` [GH-5387] * **New Resource:** `aws_simpledb_domain` [GH-7600] + * **New Resource:** `aws_opsworks_user_profile` [GH-6304] + * **New Resource:** `aws_opsworks_permission` [GH-6304] * **New Resource:** `openstack_blockstorage_volume_v2` [GH-6693] * **New Resource:** `openstack_lb_loadbalancer_v2` [GH-7012] * **New Resource:** `openstack_lb_listener_v2` [GH-7012] From df5d2c9a63116e0cdaa439647a969c44c2e764db Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 20 Jul 2016 18:38:14 -0500 Subject: [PATCH 0359/1238] provider/aws: pull iamconn setup earlier (#7734) Fixes problem introduced in re-arrangement of config --- builtin/providers/aws/config.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go index 823e9ecd5..a9b49528c 100644 --- a/builtin/providers/aws/config.go +++ b/builtin/providers/aws/config.go @@ -193,7 +193,10 @@ func (c *Config) Client() (interface{}, error) { dynamoSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.DynamoDBEndpoint)}) kinesisSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.KinesisEndpoint)}) + // These two services need to be set up early so we can check on AccountID + client.iamconn = iam.New(awsIamSess) client.stsconn = sts.New(sess) + err = c.ValidateCredentials(client.stsconn) if err != nil { errs = append(errs, err) @@ -233,7 +236,6 @@ func (c *Config) Client() (interface{}, error) { client.esconn = elasticsearch.New(sess) client.firehoseconn = firehose.New(sess) client.glacierconn = glacier.New(sess) - client.iamconn = iam.New(awsIamSess) client.kinesisconn = kinesis.New(kinesisSess) client.kmsconn = kms.New(sess) client.lambdaconn = lambda.New(sess) From 2e650ce1537ba0a5684a69c1511e4426d4e5693b Mon Sep 17 00:00:00 2001 From: Joe Topjian Date: Thu, 21 Jul 2016 00:35:49 -0600 Subject: [PATCH 0360/1238] provider/openstack: making import command consistent (#7739) --- .../providers/openstack/r/blockstorage_volume_v1.html.markdown | 2 +- .../providers/openstack/r/blockstorage_volume_v2.html.markdown | 2 +- .../providers/openstack/r/compute_floatingip_v2.html.markdown | 2 +- .../docs/providers/openstack/r/compute_keypair_v2.html.markdown | 2 +- .../providers/openstack/r/compute_secgroup_v2.html.markdown | 2 +- .../providers/openstack/r/compute_servergroup_v2.html.markdown | 2 +- .../docs/providers/openstack/r/fw_firewall_v1.html.markdown | 2 +- .../docs/providers/openstack/r/fw_policy_v1.html.markdown | 2 +- .../source/docs/providers/openstack/r/fw_rule_v1.html.markdown | 2 +- .../docs/providers/openstack/r/lb_member_v1.html.markdown | 2 +- .../docs/providers/openstack/r/lb_monitor_v1.html.markdown | 2 +- .../source/docs/providers/openstack/r/lb_pool_v1.html.markdown | 2 +- .../source/docs/providers/openstack/r/lb_vip_v1.html.markdown | 2 +- .../openstack/r/networking_floatingip_v2.html.markdown | 2 +- .../providers/openstack/r/networking_network_v2.html.markdown | 2 +- .../docs/providers/openstack/r/networking_port_v2.html.markdown | 2 +- .../openstack/r/networking_secgroup_rule_v2.html.markdown | 2 +- .../providers/openstack/r/networking_secgroup_v2.html.markdown | 2 +- .../providers/openstack/r/networking_subnet_v2.html.markdown | 2 +- 19 files changed, 19 insertions(+), 19 deletions(-) diff --git a/website/source/docs/providers/openstack/r/blockstorage_volume_v1.html.markdown b/website/source/docs/providers/openstack/r/blockstorage_volume_v1.html.markdown index bc6e1cd9a..39f9b0f34 100644 --- a/website/source/docs/providers/openstack/r/blockstorage_volume_v1.html.markdown +++ b/website/source/docs/providers/openstack/r/blockstorage_volume_v1.html.markdown @@ -79,5 +79,5 @@ The following attributes are exported: Volumes can be imported using the `id`, e.g. ``` -terraform import openstack_blockstorage_volume_v1.volume_1 ea257959-eeb1-4c10-8d33-26f0409a755d +$ terraform import openstack_blockstorage_volume_v1.volume_1 ea257959-eeb1-4c10-8d33-26f0409a755d ``` diff --git a/website/source/docs/providers/openstack/r/blockstorage_volume_v2.html.markdown b/website/source/docs/providers/openstack/r/blockstorage_volume_v2.html.markdown index dee3a4f0a..fd9d4344f 100644 --- a/website/source/docs/providers/openstack/r/blockstorage_volume_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/blockstorage_volume_v2.html.markdown @@ -84,5 +84,5 @@ The following attributes are exported: Volumes can be imported using the `id`, e.g. ``` -terraform import openstack_blockstorage_volume_v2.volume_1 ea257959-eeb1-4c10-8d33-26f0409a755d +$ terraform import openstack_blockstorage_volume_v2.volume_1 ea257959-eeb1-4c10-8d33-26f0409a755d ``` diff --git a/website/source/docs/providers/openstack/r/compute_floatingip_v2.html.markdown b/website/source/docs/providers/openstack/r/compute_floatingip_v2.html.markdown index fc4359b22..cbaee21ce 100644 --- a/website/source/docs/providers/openstack/r/compute_floatingip_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/compute_floatingip_v2.html.markdown @@ -49,5 +49,5 @@ The following attributes are exported: Floating IPs can be imported using the `id`, e.g. ``` -terraform import openstack_compute_floatingip_v2.floatip_1 89c60255-9bd6-460c-822a-e2b959ede9d2 +$ terraform import openstack_compute_floatingip_v2.floatip_1 89c60255-9bd6-460c-822a-e2b959ede9d2 ``` diff --git a/website/source/docs/providers/openstack/r/compute_keypair_v2.html.markdown b/website/source/docs/providers/openstack/r/compute_keypair_v2.html.markdown index 991fdfb8f..8852385e7 100644 --- a/website/source/docs/providers/openstack/r/compute_keypair_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/compute_keypair_v2.html.markdown @@ -47,5 +47,5 @@ The following attributes are exported: Keypairs can be imported using the `name`, e.g. ``` -terraform import openstack_compute_keypair_v2.my-keypair test-keypair +$ terraform import openstack_compute_keypair_v2.my-keypair test-keypair ``` diff --git a/website/source/docs/providers/openstack/r/compute_secgroup_v2.html.markdown b/website/source/docs/providers/openstack/r/compute_secgroup_v2.html.markdown index a9c1c8385..35f2a6d48 100644 --- a/website/source/docs/providers/openstack/r/compute_secgroup_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/compute_secgroup_v2.html.markdown @@ -120,5 +120,5 @@ resource "openstack_compute_instance_v2" "test-server" { Security Groups can be imported using the `id`, e.g. ``` -terraform import openstack_compute_secgroup_v2.my_secgroup 1bc30ee9-9d5b-4c30-bdd5-7f1e663f5edf +$ terraform import openstack_compute_secgroup_v2.my_secgroup 1bc30ee9-9d5b-4c30-bdd5-7f1e663f5edf ``` diff --git a/website/source/docs/providers/openstack/r/compute_servergroup_v2.html.markdown b/website/source/docs/providers/openstack/r/compute_servergroup_v2.html.markdown index f3af0a5f6..cd54a66e9 100644 --- a/website/source/docs/providers/openstack/r/compute_servergroup_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/compute_servergroup_v2.html.markdown @@ -57,5 +57,5 @@ The following attributes are exported: Server Groups can be imported using the `id`, e.g. ``` -terraform import openstack_compute_servergroup_v2.test-sg 1bc30ee9-9d5b-4c30-bdd5-7f1e663f5edf +$ terraform import openstack_compute_servergroup_v2.test-sg 1bc30ee9-9d5b-4c30-bdd5-7f1e663f5edf ``` diff --git a/website/source/docs/providers/openstack/r/fw_firewall_v1.html.markdown b/website/source/docs/providers/openstack/r/fw_firewall_v1.html.markdown index d3a1e9739..48fbc6b60 100644 --- a/website/source/docs/providers/openstack/r/fw_firewall_v1.html.markdown +++ b/website/source/docs/providers/openstack/r/fw_firewall_v1.html.markdown @@ -85,5 +85,5 @@ The following attributes are exported: Firewalls can be imported using the `id`, e.g. ``` -terraform import openstack_fw_firewall_v1.firewall_1 c9e39fb2-ce20-46c8-a964-25f3898c7a97 +$ terraform import openstack_fw_firewall_v1.firewall_1 c9e39fb2-ce20-46c8-a964-25f3898c7a97 ``` diff --git a/website/source/docs/providers/openstack/r/fw_policy_v1.html.markdown b/website/source/docs/providers/openstack/r/fw_policy_v1.html.markdown index e8be6f4c2..1e2a394cd 100644 --- a/website/source/docs/providers/openstack/r/fw_policy_v1.html.markdown +++ b/website/source/docs/providers/openstack/r/fw_policy_v1.html.markdown @@ -83,5 +83,5 @@ The following attributes are exported: Firewall Policies can be imported using the `id`, e.g. ``` -terraform import openstack_fw_policy_v1.policy_1 07f422e6-c596-474b-8b94-fe2c12506ce0 +$ terraform import openstack_fw_policy_v1.policy_1 07f422e6-c596-474b-8b94-fe2c12506ce0 ``` diff --git a/website/source/docs/providers/openstack/r/fw_rule_v1.html.markdown b/website/source/docs/providers/openstack/r/fw_rule_v1.html.markdown index a6c55f96b..74598086f 100644 --- a/website/source/docs/providers/openstack/r/fw_rule_v1.html.markdown +++ b/website/source/docs/providers/openstack/r/fw_rule_v1.html.markdown @@ -94,5 +94,5 @@ The following attributes are exported: Firewall Rules can be imported using the `id`, e.g. ``` -terraform import openstack_fw_rule_v1.rule_1 8dbc0c28-e49c-463f-b712-5c5d1bbac327 +$ terraform import openstack_fw_rule_v1.rule_1 8dbc0c28-e49c-463f-b712-5c5d1bbac327 ``` diff --git a/website/source/docs/providers/openstack/r/lb_member_v1.html.markdown b/website/source/docs/providers/openstack/r/lb_member_v1.html.markdown index d11366d6a..ef01d712d 100644 --- a/website/source/docs/providers/openstack/r/lb_member_v1.html.markdown +++ b/website/source/docs/providers/openstack/r/lb_member_v1.html.markdown @@ -62,5 +62,5 @@ The following attributes are exported: Load Balancer Members can be imported using the `id`, e.g. ``` -terraform import openstack_lb_member_v1.member_1 a7498676-4fe4-4243-a864-2eaaf18c73df +$ terraform import openstack_lb_member_v1.member_1 a7498676-4fe4-4243-a864-2eaaf18c73df ``` diff --git a/website/source/docs/providers/openstack/r/lb_monitor_v1.html.markdown b/website/source/docs/providers/openstack/r/lb_monitor_v1.html.markdown index e210ad3d9..f43c9934a 100644 --- a/website/source/docs/providers/openstack/r/lb_monitor_v1.html.markdown +++ b/website/source/docs/providers/openstack/r/lb_monitor_v1.html.markdown @@ -86,5 +86,5 @@ The following attributes are exported: Load Balancer Members can be imported using the `id`, e.g. ``` -terraform import openstack_lb_monitor_v1.monitor_1 119d7530-72e9-449a-aa97-124a5ef1992c +$ terraform import openstack_lb_monitor_v1.monitor_1 119d7530-72e9-449a-aa97-124a5ef1992c ``` diff --git a/website/source/docs/providers/openstack/r/lb_pool_v1.html.markdown b/website/source/docs/providers/openstack/r/lb_pool_v1.html.markdown index cab6e9d5b..62c5e2d5d 100644 --- a/website/source/docs/providers/openstack/r/lb_pool_v1.html.markdown +++ b/website/source/docs/providers/openstack/r/lb_pool_v1.html.markdown @@ -184,5 +184,5 @@ The `member` block is deprecated in favor of the `openstack_lb_member_v1` resour Load Balancer Pools can be imported using the `id`, e.g. ``` -terraform import openstack_lb_pool_v1.pool_1 b255e6ba-02ad-43e6-8951-3428ca26b713 +$ terraform import openstack_lb_pool_v1.pool_1 b255e6ba-02ad-43e6-8951-3428ca26b713 ``` diff --git a/website/source/docs/providers/openstack/r/lb_vip_v1.html.markdown b/website/source/docs/providers/openstack/r/lb_vip_v1.html.markdown index 8e1d64561..ed4de3b87 100644 --- a/website/source/docs/providers/openstack/r/lb_vip_v1.html.markdown +++ b/website/source/docs/providers/openstack/r/lb_vip_v1.html.markdown @@ -104,5 +104,5 @@ The following attributes are exported: Load Balancer VIPs can be imported using the `id`, e.g. ``` -terraform import openstack_lb_vip_v1.vip_1 50e16b26-89c1-475e-a492-76167182511e +$ terraform import openstack_lb_vip_v1.vip_1 50e16b26-89c1-475e-a492-76167182511e ``` diff --git a/website/source/docs/providers/openstack/r/networking_floatingip_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_floatingip_v2.html.markdown index 6106f2d19..e96a503ee 100644 --- a/website/source/docs/providers/openstack/r/networking_floatingip_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/networking_floatingip_v2.html.markdown @@ -61,5 +61,5 @@ The following attributes are exported: Floating IPs can be imported using the `id`, e.g. ``` -terraform import openstack_networking_floatingip_v2.floatip_1 2c7f39f3-702b-48d1-940c-b50384177ee1 +$ terraform import openstack_networking_floatingip_v2.floatip_1 2c7f39f3-702b-48d1-940c-b50384177ee1 ``` diff --git a/website/source/docs/providers/openstack/r/networking_network_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_network_v2.html.markdown index 5ce7dcec5..0eff316e8 100644 --- a/website/source/docs/providers/openstack/r/networking_network_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/networking_network_v2.html.markdown @@ -97,5 +97,5 @@ The following attributes are exported: Networks can be imported using the `id`, e.g. ``` -terraform import openstack_networking_network_v2.network_1 d90ce693-5ccf-4136-a0ed-152ce412b6b9 +$ terraform import openstack_networking_network_v2.network_1 d90ce693-5ccf-4136-a0ed-152ce412b6b9 ``` diff --git a/website/source/docs/providers/openstack/r/networking_port_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_port_v2.html.markdown index 4b8134b38..df8c9d996 100644 --- a/website/source/docs/providers/openstack/r/networking_port_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/networking_port_v2.html.markdown @@ -91,7 +91,7 @@ The following attributes are exported: Ports can be imported using the `id`, e.g. ``` -terraform import openstack_networking_port_v2.port_1 eae26a3e-1c33-4cc1-9c31-0cd729c438a1 +$ terraform import openstack_networking_port_v2.port_1 eae26a3e-1c33-4cc1-9c31-0cd729c438a1 ``` ## Notes diff --git a/website/source/docs/providers/openstack/r/networking_secgroup_rule_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_secgroup_rule_v2.html.markdown index b0c4e7b3f..36d589ac4 100644 --- a/website/source/docs/providers/openstack/r/networking_secgroup_rule_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/networking_secgroup_rule_v2.html.markdown @@ -93,5 +93,5 @@ The following attributes are exported: Security Group Rules can be imported using the `id`, e.g. ``` -terraform import openstack_networking_secgroup_rule_v2.secgroup_rule_1 aeb68ee3-6e9d-4256-955c-9584a6212745 +$ terraform import openstack_networking_secgroup_rule_v2.secgroup_rule_1 aeb68ee3-6e9d-4256-955c-9584a6212745 ``` diff --git a/website/source/docs/providers/openstack/r/networking_secgroup_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_secgroup_v2.html.markdown index b964c2934..c17c5c47b 100644 --- a/website/source/docs/providers/openstack/r/networking_secgroup_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/networking_secgroup_v2.html.markdown @@ -54,5 +54,5 @@ The following attributes are exported: Security Groups can be imported using the `id`, e.g. ``` -terraform import openstack_networking_secgroup_v2.secgroup_1 38809219-5e8a-4852-9139-6f461c90e8bc +$ terraform import openstack_networking_secgroup_v2.secgroup_1 38809219-5e8a-4852-9139-6f461c90e8bc ``` diff --git a/website/source/docs/providers/openstack/r/networking_subnet_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_subnet_v2.html.markdown index a990d2e96..072c49b16 100644 --- a/website/source/docs/providers/openstack/r/networking_subnet_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/networking_subnet_v2.html.markdown @@ -106,5 +106,5 @@ The following attributes are exported: Subnets can be imported using the `id`, e.g. ``` -terraform import openstack_networking_subnet_v2.subnet_1 da4faf16-5546-41e4-8330-4d0002b74048 +$ terraform import openstack_networking_subnet_v2.subnet_1 da4faf16-5546-41e4-8330-4d0002b74048 ``` From 399f1c20e0fccaec8c40ccb4ba1e6fca7c6e29fd Mon Sep 17 00:00:00 2001 From: stack72 Date: Thu, 21 Jul 2016 12:41:01 +0100 Subject: [PATCH 0361/1238] provider/aws: Rename the Basic Import test for CloudFront distributions --- .../providers/aws/import_aws_cloudfront_distribution_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/providers/aws/import_aws_cloudfront_distribution_test.go b/builtin/providers/aws/import_aws_cloudfront_distribution_test.go index 9c2bbed63..237a26d4b 100644 --- a/builtin/providers/aws/import_aws_cloudfront_distribution_test.go +++ b/builtin/providers/aws/import_aws_cloudfront_distribution_test.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform/helper/resource" ) -func TestAccCloudFrontDistribution_importBasic(t *testing.T) { +func TestAccAWSCloudFrontDistribution_importBasic(t *testing.T) { resourceName := "aws_cloudfront_distribution.s3_distribution" resource.Test(t, resource.TestCase{ From ddc0f4cdb0c5b5fb848ac4856e9bcf32cc55ec0f Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Thu, 21 Jul 2016 15:23:58 +0200 Subject: [PATCH 0362/1238] Fix dynamically determining if `ForceNew = true` (#7745) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The same instance of the resources’ `schema.Resource` is used for all resources of the same type. So we need to set either `true` or `false` for every resource to make sure we get the correct value. --- builtin/providers/cloudstack/resource_cloudstack_network.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/builtin/providers/cloudstack/resource_cloudstack_network.go b/builtin/providers/cloudstack/resource_cloudstack_network.go index 1fccdfd55..2b58b5bef 100644 --- a/builtin/providers/cloudstack/resource_cloudstack_network.go +++ b/builtin/providers/cloudstack/resource_cloudstack_network.go @@ -25,6 +25,8 @@ func resourceCloudStackNetwork() *schema.Resource { if value == none { aclidSchema.ForceNew = true + } else { + aclidSchema.ForceNew = false } return value From 885935962c52933ce49573bb5181b2a52e57c009 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Thu, 21 Jul 2016 10:56:32 -0400 Subject: [PATCH 0363/1238] Add a terraform version header to all atlas calls Using the DefaultHeader added to the atlas.Client --- command/push.go | 3 +++ terraform/version.go | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/command/push.go b/command/push.go index 67cac6740..1abe13e0b 100644 --- a/command/push.go +++ b/command/push.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/atlas-go/archive" "github.com/hashicorp/atlas-go/v1" + "github.com/hashicorp/terraform/terraform" ) type PushCommand struct { @@ -126,6 +127,8 @@ func (c *PushCommand) Run(args []string) int { } } + client.DefaultHeader.Set(terraform.VersionHeader, terraform.Version) + if atlasToken != "" { client.Token = atlasToken } diff --git a/terraform/version.go b/terraform/version.go index e781d9c25..7462a3c67 100644 --- a/terraform/version.go +++ b/terraform/version.go @@ -16,3 +16,7 @@ const VersionPrerelease = "dev" // benefit of verifying during tests and init time that our version is a // proper semantic version, which should always be the case. var SemVersion = version.Must(version.NewVersion(Version)) + +// VersionHeader is the header name used to send the current terraform version +// in http requests. +const VersionHeader = "Terraform-Version" From bd99c7a9040d27534de61db91f12615382a85b6b Mon Sep 17 00:00:00 2001 From: James Bardin Date: Thu, 21 Jul 2016 11:04:40 -0400 Subject: [PATCH 0364/1238] Add the VersionHeader to the atlas provider too --- builtin/providers/atlas/provider.go | 1 + 1 file changed, 1 insertion(+) diff --git a/builtin/providers/atlas/provider.go b/builtin/providers/atlas/provider.go index 3d14edca8..e7034a7cd 100644 --- a/builtin/providers/atlas/provider.go +++ b/builtin/providers/atlas/provider.go @@ -52,6 +52,7 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) { return nil, err } } + client.DefaultHeader.Set(terraform.VersionHeader, terraform.Version) client.Token = d.Get("token").(string) return client, nil From 69041f9c96ae372a24965781ba8c4e7750be3531 Mon Sep 17 00:00:00 2001 From: gambrose Date: Thu, 21 Jul 2016 17:51:18 +0100 Subject: [PATCH 0365/1238] Removed azurerm_network_interface from azurerm_virtual_machine_scale_set example (#7750) Azure scale sets encapsulates the creation of network interfaces so it is not valid in this example. --- .../r/virtual_machine_scale_sets.html.markdown | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/website/source/docs/providers/azurerm/r/virtual_machine_scale_sets.html.markdown b/website/source/docs/providers/azurerm/r/virtual_machine_scale_sets.html.markdown index 0704de4e0..5588b0864 100644 --- a/website/source/docs/providers/azurerm/r/virtual_machine_scale_sets.html.markdown +++ b/website/source/docs/providers/azurerm/r/virtual_machine_scale_sets.html.markdown @@ -32,18 +32,6 @@ resource "azurerm_subnet" "test" { address_prefix = "10.0.2.0/24" } -resource "azurerm_network_interface" "test" { - name = "acctni" - location = "West US" - resource_group_name = "${azurerm_resource_group.test.name}" - - ip_configuration { - name = "testconfiguration1" - subnet_id = "${azurerm_subnet.test.id}" - private_ip_address_allocation = "dynamic" - } -} - resource "azurerm_storage_account" "test" { name = "accsa" resource_group_name = "${azurerm_resource_group.test.name}" From 0c86217db7c922aad3e56b1de3ef546d1ca1b1e4 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Thu, 21 Jul 2016 18:21:03 +0100 Subject: [PATCH 0366/1238] docs/azurerm: prefixes removed from `azurerm_virtual_machine_scale_set` (#7752) documentation example --- .../azurerm/r/virtual_machine_scale_sets.html.markdown | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/website/source/docs/providers/azurerm/r/virtual_machine_scale_sets.html.markdown b/website/source/docs/providers/azurerm/r/virtual_machine_scale_sets.html.markdown index 5588b0864..f9d0f231e 100644 --- a/website/source/docs/providers/azurerm/r/virtual_machine_scale_sets.html.markdown +++ b/website/source/docs/providers/azurerm/r/virtual_machine_scale_sets.html.markdown @@ -62,13 +62,13 @@ resource "azurerm_virtual_machine_scale_set" "test" { capacity = 2 } - virtual_machine_os_profile { + os_profile { computer_name_prefix = "testvm" admin_username = "myadmin" admin_password = "Passwword1234" } - virtual_machine_os_profile_linux_config { + os_profile_linux_config { disable_password_authentication = true ssh_keys { path = "/home/myadmin/.ssh/authorized_keys" @@ -76,7 +76,7 @@ resource "azurerm_virtual_machine_scale_set" "test" { } } - virtual_machine_network_profile { + network_profile { name = "TestNetworkProfile" primary = true ip_configuration { @@ -85,14 +85,14 @@ resource "azurerm_virtual_machine_scale_set" "test" { } } - virtual_machine_storage_profile_os_disk { + storage_profile_os_disk { name = "osDiskProfile" caching = "ReadWrite" create_option = "FromImage" vhd_containers = ["${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}"] } - virtual_machine_storage_profile_image_reference { + storage_profile_image_reference { publisher = "Canonical" offer = "UbuntuServer" sku = "14.04.2-LTS" From 97c52830dbff221f412af7104d11e4d0007f8370 Mon Sep 17 00:00:00 2001 From: Raphael Randschau Date: Thu, 21 Jul 2016 20:46:12 +0200 Subject: [PATCH 0367/1238] fix(doc): remove copy pasta --- .../providers/aws/d/ecs_container_definition.html.markdown | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/website/source/docs/providers/aws/d/ecs_container_definition.html.markdown b/website/source/docs/providers/aws/d/ecs_container_definition.html.markdown index ac86593f7..6badc81dd 100644 --- a/website/source/docs/providers/aws/d/ecs_container_definition.html.markdown +++ b/website/source/docs/providers/aws/d/ecs_container_definition.html.markdown @@ -8,9 +8,8 @@ description: |- # aws\_ecs\_container\_definition -The Availability Zones data source allows access to the list of AWS -Availability Zones which can be accessed by an AWS account within the region -configured in the provider. +The ECS container definition data source allows access to details of +a specific container within an AWS ECS service. ## Example Usage From 4969e64132fffdb5a333413daa8e3f3c20082ff8 Mon Sep 17 00:00:00 2001 From: Jeremy Yoder Date: Thu, 21 Jul 2016 14:55:10 -0400 Subject: [PATCH 0368/1238] Fix endpoint documentation for aws_rds_cluster (#7755) --- .../providers/aws/r/rds_cluster.html.markdown | 17 ++++++++--------- .../aws/r/rds_cluster_instance.html.markdown | 6 +++--- 2 files changed, 11 insertions(+), 12 deletions(-) diff --git a/website/source/docs/providers/aws/r/rds_cluster.html.markdown b/website/source/docs/providers/aws/r/rds_cluster.html.markdown index 7229a926c..90e373eac 100644 --- a/website/source/docs/providers/aws/r/rds_cluster.html.markdown +++ b/website/source/docs/providers/aws/r/rds_cluster.html.markdown @@ -19,11 +19,11 @@ Changes to a RDS Cluster can occur when you manually change a parameter, such as `port`, and are reflected in the next maintenance window. Because of this, Terraform may report a difference in it's planning phase because a modification has not yet taken place. You can use the -`apply_immediately` flag to instruct the service to apply the change immediately -(see documentation below). +`apply_immediately` flag to instruct the service to apply the change immediately +(see documentation below). -~> **Note:** using `apply_immediately` can result in a -brief downtime as the server reboots. See the AWS Docs on [RDS Maintenance][4] +~> **Note:** using `apply_immediately` can result in a +brief downtime as the server reboots. See the AWS Docs on [RDS Maintenance][4] for more information. ## Example Usage @@ -66,7 +66,7 @@ string. instances in the DB cluster can be created in * `backup_retention_period` - (Optional) The days to retain backups for. Default 1 -* `preferred_backup_window` - (Optional) The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter. +* `preferred_backup_window` - (Optional) The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter. Default: A 30-minute window selected at random from an 8-hour block of time per region. e.g. 04:00-09:00 * `preferred_maintenance_window` - (Optional) The weekly time range during which system maintenance can occur, in (UTC) e.g. wed:04:00-wed:04:30 * `port` - (Optional) The port on which the DB accepts connections @@ -88,13 +88,12 @@ The following attributes are exported: * `id` - The RDS Cluster Identifier * `cluster_identifier` - The RDS Cluster Identifier * `cluster_members` – List of RDS Instances that are a part of this cluster -* `address` - The address of the RDS instance. * `allocated_storage` - The amount of allocated storage * `availability_zones` - The availability zone of the instance * `backup_retention_period` - The backup retention period * `preferred_backup_window` - The backup window * `preferred_maintenance_window` - The maintenance window -* `endpoint` - The primary, writeable connection endpoint +* `endpoint` - The DNS address of the RDS instance * `engine` - The database engine * `engine_version` - The database engine version * `maintenance_window` - The instance maintenance window @@ -113,8 +112,8 @@ The following attributes are exported: ## Import -RDS Clusters can be imported using the `cluster_identifier`, e.g. +RDS Clusters can be imported using the `cluster_identifier`, e.g. ``` $ terraform import aws_rds_cluster.aurora_cluster aurora-prod-cluster -``` \ No newline at end of file +``` diff --git a/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown b/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown index 804a11458..53df2a3c1 100644 --- a/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown +++ b/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown @@ -80,7 +80,7 @@ The following attributes are exported: this instance is a read replica * `allocated_storage` - The amount of allocated storage * `availability_zones` - The availability zone of the instance -* `endpoint` - The IP address for this instance. May not be writable +* `endpoint` - The DNS address for this instance. May not be writable * `engine` - The database engine * `engine_version` - The database engine version * `database_name` - The database name @@ -95,8 +95,8 @@ this instance is a read replica ## Import -Redshift Cluster Instances can be imported using the `identifier`, e.g. +Redshift Cluster Instances can be imported using the `identifier`, e.g. ``` $ terraform import aws_rds_cluster_instance.prod_instance_1 aurora-cluster-instance-1 -``` \ No newline at end of file +``` From 9d88ad1d086f2b3e77b8e8ac7d6f12725caf6a72 Mon Sep 17 00:00:00 2001 From: Clint Date: Thu, 21 Jul 2016 14:10:33 -0500 Subject: [PATCH 0369/1238] provider/fastly: Update go-fastly SDK (#7747) * provider/fastly: Update go-fastly dependency * update test to check regression --- .../fastly/resource_fastly_service_v1.go | 2 +- ...resource_fastly_service_v1_headers_test.go | 9 +- vendor/github.com/ajg/form/.travis.yml | 24 ---- vendor/github.com/ajg/form/README.md | 39 ++++++- vendor/github.com/ajg/form/TODO.md | 5 + vendor/github.com/ajg/form/decode.go | 106 ++++++++++++------ vendor/github.com/ajg/form/encode.go | 42 ++++++- vendor/github.com/ajg/form/form.go | 3 + vendor/github.com/ajg/form/node.go | 46 ++++---- vendor/github.com/ajg/form/pre-commit.sh | 0 .../github.com/sethvargo/go-fastly/.gitignore | 28 ----- .../sethvargo/go-fastly/.travis.yml | 19 ---- .../github.com/sethvargo/go-fastly/Makefile | 43 +++++-- .../sethvargo/go-fastly/cache_setting.go | 2 +- .../github.com/sethvargo/go-fastly/client.go | 15 +-- .../github.com/sethvargo/go-fastly/errors.go | 4 +- .../github.com/sethvargo/go-fastly/fastly.go | 4 +- .../github.com/sethvargo/go-fastly/header.go | 4 +- vendor/github.com/sethvargo/go-fastly/s3.go | 82 ++++++++------ .../github.com/sethvargo/go-fastly/version.go | 16 +-- vendor/vendor.json | 8 +- 21 files changed, 291 insertions(+), 210 deletions(-) delete mode 100644 vendor/github.com/ajg/form/.travis.yml create mode 100644 vendor/github.com/ajg/form/TODO.md mode change 100644 => 100755 vendor/github.com/ajg/form/pre-commit.sh delete mode 100644 vendor/github.com/sethvargo/go-fastly/.gitignore delete mode 100644 vendor/github.com/sethvargo/go-fastly/.travis.yml diff --git a/builtin/providers/fastly/resource_fastly_service_v1.go b/builtin/providers/fastly/resource_fastly_service_v1.go index e61802c55..348170a0e 100644 --- a/builtin/providers/fastly/resource_fastly_service_v1.go +++ b/builtin/providers/fastly/resource_fastly_service_v1.go @@ -1566,7 +1566,7 @@ func buildHeader(headerMap interface{}) (*gofastly.CreateHeaderInput, error) { df := headerMap.(map[string]interface{}) opts := gofastly.CreateHeaderInput{ Name: df["name"].(string), - IgnoreIfSet: df["ignore_if_set"].(bool), + IgnoreIfSet: gofastly.Compatibool(df["ignore_if_set"].(bool)), Destination: df["destination"].(string), Priority: uint(df["priority"].(int)), Source: df["source"].(string), diff --git a/builtin/providers/fastly/resource_fastly_service_v1_headers_test.go b/builtin/providers/fastly/resource_fastly_service_v1_headers_test.go index 306de61f4..7c59470e8 100644 --- a/builtin/providers/fastly/resource_fastly_service_v1_headers_test.go +++ b/builtin/providers/fastly/resource_fastly_service_v1_headers_test.go @@ -181,10 +181,11 @@ resource "fastly_service_v1" "foo" { } header { - destination = "http.Server" - type = "cache" - action = "delete" - name = "remove s3 server" + destination = "http.Server" + type = "cache" + action = "delete" + name = "remove s3 server" + ignore_if_set = "true" } force_destroy = true diff --git a/vendor/github.com/ajg/form/.travis.yml b/vendor/github.com/ajg/form/.travis.yml deleted file mode 100644 index b257361d8..000000000 --- a/vendor/github.com/ajg/form/.travis.yml +++ /dev/null @@ -1,24 +0,0 @@ -## Copyright 2014 Alvaro J. Genial. All rights reserved. -## Use of this source code is governed by a BSD-style -## license that can be found in the LICENSE file. - -language: go - -go: - - tip - - 1.3 - # - 1.2 - # Note: 1.2 is disabled because it seems to require that cover - # be installed from code.google.com/p/go.tools/cmd/cover - -before_install: - - go get -v golang.org/x/tools/cmd/cover - - go get -v golang.org/x/tools/cmd/vet - - go get -v github.com/golang/lint/golint - - export PATH=$PATH:/home/travis/gopath/bin - -script: - - go build -v ./... - - go test -v -cover ./... - - go vet ./... - - golint . diff --git a/vendor/github.com/ajg/form/README.md b/vendor/github.com/ajg/form/README.md index de3ab635c..7117f4812 100644 --- a/vendor/github.com/ajg/form/README.md +++ b/vendor/github.com/ajg/form/README.md @@ -171,10 +171,47 @@ Now any value with type `Binary` will automatically be encoded using the [URL](h Keys ---- -In theory any value can be a key as long as it has a string representation. However, periods have special meaning to `form`, and thus, under the hood (i.e. in encoded form) they are transparently escaped using a preceding backslash (`\`). Backslashes within keys, themselves, are also escaped in this manner (e.g. as `\\`) in order to permit representing `\.` itself (as `\\\.`). +In theory any value can be a key as long as it has a string representation. However, by default, periods have special meaning to `form`, and thus, under the hood (i.e. in encoded form) they are transparently escaped using a preceding backslash (`\`). Backslashes within keys, themselves, are also escaped in this manner (e.g. as `\\`) in order to permit representing `\.` itself (as `\\\.`). (Note: it is normally unnecessary to deal with this issue unless keys are being constructed manually—e.g. literally embedded in HTML or in a URI.) +The default delimiter and escape characters used for encoding and decoding composite keys can be changed using the `DelimitWith` and `EscapeWith` setter methods of `Encoder` and `Decoder`, respectively. For example... + +```go +package main + +import ( + "os" + + "github.com/ajg/form" +) + +func main() { + type B struct { + Qux string `form:"qux"` + } + type A struct { + FooBar B `form:"foo.bar"` + } + a := A{FooBar: B{"XYZ"}} + os.Stdout.WriteString("Default: ") + form.NewEncoder(os.Stdout).Encode(a) + os.Stdout.WriteString("\nCustom: ") + form.NewEncoder(os.Stdout).DelimitWith('/').Encode(a) + os.Stdout.WriteString("\n") +} + +``` + +...will produce... + +``` +Default: foo%5C.bar.qux=XYZ +Custom: foo.bar%2Fqux=XYZ +``` + +(`%5C` and `%2F` represent `\` and `/`, respectively.) + Limitations ----------- diff --git a/vendor/github.com/ajg/form/TODO.md b/vendor/github.com/ajg/form/TODO.md new file mode 100644 index 000000000..672fd4657 --- /dev/null +++ b/vendor/github.com/ajg/form/TODO.md @@ -0,0 +1,5 @@ +TODO +==== + + - Document IgnoreCase and IgnoreUnknownKeys in README. + - Fix want/have newlines in tests. diff --git a/vendor/github.com/ajg/form/decode.go b/vendor/github.com/ajg/form/decode.go index d03b2082c..3346fffe5 100644 --- a/vendor/github.com/ajg/form/decode.go +++ b/vendor/github.com/ajg/form/decode.go @@ -16,12 +16,28 @@ import ( // NewDecoder returns a new form decoder. func NewDecoder(r io.Reader) *decoder { - return &decoder{r} + return &decoder{r, defaultDelimiter, defaultEscape, false, false} } // decoder decodes data from a form (application/x-www-form-urlencoded). type decoder struct { - r io.Reader + r io.Reader + d rune + e rune + ignoreUnknown bool + ignoreCase bool +} + +// DelimitWith sets r as the delimiter used for composite keys by decoder d and returns the latter; it is '.' by default. +func (d *decoder) DelimitWith(r rune) *decoder { + d.d = r + return d +} + +// EscapeWith sets r as the escape used for delimiters (and to escape itself) by decoder d and returns the latter; it is '\\' by default. +func (d *decoder) EscapeWith(r rune) *decoder { + d.e = r + return d } // Decode reads in and decodes form-encoded data into dst. @@ -35,26 +51,48 @@ func (d decoder) Decode(dst interface{}) error { return err } v := reflect.ValueOf(dst) - return decodeNode(v, parseValues(vs, canIndexOrdinally(v))) + return d.decodeNode(v, parseValues(d.d, d.e, vs, canIndexOrdinally(v))) +} + +// IgnoreUnknownKeys if set to true it will make the decoder ignore values +// that are not found in the destination object instead of returning an error. +func (d *decoder) IgnoreUnknownKeys(ignoreUnknown bool) { + d.ignoreUnknown = ignoreUnknown +} + +// IgnoreCase if set to true it will make the decoder try to set values in the +// destination object even if the case does not match. +func (d *decoder) IgnoreCase(ignoreCase bool) { + d.ignoreCase = ignoreCase } // DecodeString decodes src into dst. -func DecodeString(dst interface{}, src string) error { +func (d decoder) DecodeString(dst interface{}, src string) error { vs, err := url.ParseQuery(src) if err != nil { return err } v := reflect.ValueOf(dst) - return decodeNode(v, parseValues(vs, canIndexOrdinally(v))) + return d.decodeNode(v, parseValues(d.d, d.e, vs, canIndexOrdinally(v))) +} + +// DecodeValues decodes vs into dst. +func (d decoder) DecodeValues(dst interface{}, vs url.Values) error { + v := reflect.ValueOf(dst) + return d.decodeNode(v, parseValues(d.d, d.e, vs, canIndexOrdinally(v))) +} + +// DecodeString decodes src into dst. +func DecodeString(dst interface{}, src string) error { + return NewDecoder(nil).DecodeString(dst, src) } // DecodeValues decodes vs into dst. func DecodeValues(dst interface{}, vs url.Values) error { - v := reflect.ValueOf(dst) - return decodeNode(v, parseValues(vs, canIndexOrdinally(v))) + return NewDecoder(nil).DecodeValues(dst, vs) } -func decodeNode(v reflect.Value, n node) (err error) { +func (d decoder) decodeNode(v reflect.Value, n node) (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("%v", e) @@ -64,11 +102,11 @@ func decodeNode(v reflect.Value, n node) (err error) { if v.Kind() == reflect.Slice { return fmt.Errorf("could not decode directly into slice; use pointer to slice") } - decodeValue(v, n) + d.decodeValue(v, n) return nil } -func decodeValue(v reflect.Value, x interface{}) { +func (d decoder) decodeValue(v reflect.Value, x interface{}) { t := v.Type() k := v.Kind() @@ -84,11 +122,11 @@ func decodeValue(v reflect.Value, x interface{}) { switch k { case reflect.Ptr: - decodeValue(v.Elem(), x) + d.decodeValue(v.Elem(), x) return case reflect.Interface: if !v.IsNil() { - decodeValue(v.Elem(), x) + d.decodeValue(v.Elem(), x) return } else if empty { @@ -106,48 +144,50 @@ func decodeValue(v reflect.Value, x interface{}) { switch k { case reflect.Struct: if t.ConvertibleTo(timeType) { - decodeTime(v, x) + d.decodeTime(v, x) } else if t.ConvertibleTo(urlType) { - decodeURL(v, x) + d.decodeURL(v, x) } else { - decodeStruct(v, x) + d.decodeStruct(v, x) } case reflect.Slice: - decodeSlice(v, x) + d.decodeSlice(v, x) case reflect.Array: - decodeArray(v, x) + d.decodeArray(v, x) case reflect.Map: - decodeMap(v, x) + d.decodeMap(v, x) case reflect.Invalid, reflect.Uintptr, reflect.UnsafePointer, reflect.Chan, reflect.Func: panic(t.String() + " has unsupported kind " + k.String()) default: - decodeBasic(v, x) + d.decodeBasic(v, x) } } -func decodeStruct(v reflect.Value, x interface{}) { +func (d decoder) decodeStruct(v reflect.Value, x interface{}) { t := v.Type() for k, c := range getNode(x) { - if f, ok := findField(v, k); !ok && k == "" { + if f, ok := findField(v, k, d.ignoreCase); !ok && k == "" { panic(getString(x) + " cannot be decoded as " + t.String()) } else if !ok { - panic(k + " doesn't exist in " + t.String()) + if !d.ignoreUnknown { + panic(k + " doesn't exist in " + t.String()) + } } else if !f.CanSet() { panic(k + " cannot be set in " + t.String()) } else { - decodeValue(f, c) + d.decodeValue(f, c) } } } -func decodeMap(v reflect.Value, x interface{}) { +func (d decoder) decodeMap(v reflect.Value, x interface{}) { t := v.Type() if v.IsNil() { v.Set(reflect.MakeMap(t)) } for k, c := range getNode(x) { i := reflect.New(t.Key()).Elem() - decodeValue(i, k) + d.decodeValue(i, k) w := v.MapIndex(i) if w.IsValid() { // We have an actual element value to decode into. @@ -171,12 +211,12 @@ func decodeMap(v reflect.Value, x interface{}) { } } - decodeValue(w, c) + d.decodeValue(w, c) v.SetMapIndex(i, w) } } -func decodeArray(v reflect.Value, x interface{}) { +func (d decoder) decodeArray(v reflect.Value, x interface{}) { t := v.Type() for k, c := range getNode(x) { i, err := strconv.Atoi(k) @@ -186,11 +226,11 @@ func decodeArray(v reflect.Value, x interface{}) { if l := v.Len(); i >= l { panic("index is above array size") } - decodeValue(v.Index(i), c) + d.decodeValue(v.Index(i), c) } } -func decodeSlice(v reflect.Value, x interface{}) { +func (d decoder) decodeSlice(v reflect.Value, x interface{}) { t := v.Type() if t.Elem().Kind() == reflect.Uint8 { // Allow, but don't require, byte slices to be encoded as a single string. @@ -221,11 +261,11 @@ func decodeSlice(v reflect.Value, x interface{}) { delta := i - l + 1 v.Set(reflect.AppendSlice(v, reflect.MakeSlice(t, delta, delta))) } - decodeValue(v.Index(i), c) + d.decodeValue(v.Index(i), c) } } -func decodeBasic(v reflect.Value, x interface{}) { +func (d decoder) decodeBasic(v reflect.Value, x interface{}) { t := v.Type() switch k, s := t.Kind(), getString(x); k { case reflect.Bool: @@ -276,7 +316,7 @@ func decodeBasic(v reflect.Value, x interface{}) { } } -func decodeTime(v reflect.Value, x interface{}) { +func (d decoder) decodeTime(v reflect.Value, x interface{}) { t := v.Type() s := getString(x) // TODO: Find a more efficient way to do this. @@ -289,7 +329,7 @@ func decodeTime(v reflect.Value, x interface{}) { panic("cannot decode string `" + s + "` as " + t.String()) } -func decodeURL(v reflect.Value, x interface{}) { +func (d decoder) decodeURL(v reflect.Value, x interface{}) { t := v.Type() s := getString(x) if u, err := url.Parse(s); err == nil { diff --git a/vendor/github.com/ajg/form/encode.go b/vendor/github.com/ajg/form/encode.go index 4c6f6c869..3e824c6c6 100644 --- a/vendor/github.com/ajg/form/encode.go +++ b/vendor/github.com/ajg/form/encode.go @@ -18,12 +18,26 @@ import ( // NewEncoder returns a new form encoder. func NewEncoder(w io.Writer) *encoder { - return &encoder{w} + return &encoder{w, defaultDelimiter, defaultEscape} } // encoder provides a way to encode to a Writer. type encoder struct { w io.Writer + d rune + e rune +} + +// DelimitWith sets r as the delimiter used for composite keys by encoder e and returns the latter; it is '.' by default. +func (e *encoder) DelimitWith(r rune) *encoder { + e.d = r + return e +} + +// EscapeWith sets r as the escape used for delimiters (and to escape itself) by encoder e and returns the latter; it is '\\' by default. +func (e *encoder) EscapeWith(r rune) *encoder { + e.e = r + return e } // Encode encodes dst as form and writes it out using the encoder's Writer. @@ -33,7 +47,7 @@ func (e encoder) Encode(dst interface{}) error { if err != nil { return err } - s := n.Values().Encode() + s := n.values(e.d, e.e).Encode() l, err := io.WriteString(e.w, s) switch { case err != nil: @@ -51,7 +65,8 @@ func EncodeToString(dst interface{}) (string, error) { if err != nil { return "", err } - return n.Values().Encode(), nil + vs := n.values(defaultDelimiter, defaultEscape) + return vs.Encode(), nil } // EncodeToValues encodes dst as a form and returns it as Values. @@ -61,7 +76,8 @@ func EncodeToValues(dst interface{}) (url.Values, error) { if err != nil { return nil, err } - return n.Values(), nil + vs := n.values(defaultDelimiter, defaultEscape) + return vs, nil } func encodeToNode(v reflect.Value) (n node, err error) { @@ -258,9 +274,16 @@ func fieldInfo(f reflect.StructField) (k string, oe bool) { return k, oe } -func findField(v reflect.Value, n string) (reflect.Value, bool) { +func findField(v reflect.Value, n string, ignoreCase bool) (reflect.Value, bool) { t := v.Type() l := v.NumField() + + var lowerN string + caseInsensitiveMatch := -1 + if ignoreCase { + lowerN = strings.ToLower(n) + } + // First try named fields. for i := 0; i < l; i++ { f := t.Field(i) @@ -269,9 +292,16 @@ func findField(v reflect.Value, n string) (reflect.Value, bool) { continue } else if n == k { return v.Field(i), true + } else if ignoreCase && lowerN == strings.ToLower(k) { + caseInsensitiveMatch = i } } + // If no exact match was found try case insensitive match. + if caseInsensitiveMatch != -1 { + return v.Field(caseInsensitiveMatch), true + } + // Then try anonymous (embedded) fields. for i := 0; i < l; i++ { f := t.Field(i) @@ -289,7 +319,7 @@ func findField(v reflect.Value, n string) (reflect.Value, bool) { if fk != reflect.Struct { continue } - if ev, ok := findField(fv, n); ok { + if ev, ok := findField(fv, n, ignoreCase); ok { return ev, true } } diff --git a/vendor/github.com/ajg/form/form.go b/vendor/github.com/ajg/form/form.go index 7c74f3d57..4052369cf 100644 --- a/vendor/github.com/ajg/form/form.go +++ b/vendor/github.com/ajg/form/form.go @@ -8,4 +8,7 @@ package form const ( implicitKey = "_" omittedKey = "-" + + defaultDelimiter = '.' + defaultEscape = '\\' ) diff --git a/vendor/github.com/ajg/form/node.go b/vendor/github.com/ajg/form/node.go index e4a04e5bd..567aaafde 100644 --- a/vendor/github.com/ajg/form/node.go +++ b/vendor/github.com/ajg/form/node.go @@ -12,19 +12,19 @@ import ( type node map[string]interface{} -func (n node) Values() url.Values { +func (n node) values(d, e rune) url.Values { vs := url.Values{} - n.merge("", &vs) + n.merge(d, e, "", &vs) return vs } -func (n node) merge(p string, vs *url.Values) { +func (n node) merge(d, e rune, p string, vs *url.Values) { for k, x := range n { switch y := x.(type) { case string: - vs.Add(p+escape(k), y) + vs.Add(p+escape(d, e, k), y) case node: - y.merge(p+escape(k)+".", vs) + y.merge(d, e, p+escape(d, e, k)+string(d), vs) default: panic("value is neither string nor node") } @@ -32,7 +32,7 @@ func (n node) merge(p string, vs *url.Values) { } // TODO: Add tests for implicit indexing. -func parseValues(vs url.Values, canIndexFirstLevelOrdinally bool) node { +func parseValues(d, e rune, vs url.Values, canIndexFirstLevelOrdinally bool) node { // NOTE: Because of the flattening of potentially multiple strings to one key, implicit indexing works: // i. At the first level; e.g. Foo.Bar=A&Foo.Bar=B becomes 0.Foo.Bar=A&1.Foo.Bar=B // ii. At the last level; e.g. Foo.Bar._=A&Foo.Bar._=B becomes Foo.Bar.0=A&Foo.Bar.1=B @@ -41,11 +41,11 @@ func parseValues(vs url.Values, canIndexFirstLevelOrdinally bool) node { m := map[string]string{} for k, ss := range vs { - indexLastLevelOrdinally := strings.HasSuffix(k, "."+implicitKey) + indexLastLevelOrdinally := strings.HasSuffix(k, string(d)+implicitKey) for i, s := range ss { if canIndexFirstLevelOrdinally { - k = strconv.Itoa(i) + "." + k + k = strconv.Itoa(i) + string(d) + k } else if indexLastLevelOrdinally { k = strings.TrimSuffix(k, implicitKey) + strconv.Itoa(i) } @@ -56,28 +56,28 @@ func parseValues(vs url.Values, canIndexFirstLevelOrdinally bool) node { n := node{} for k, s := range m { - n = n.split(k, s) + n = n.split(d, e, k, s) } return n } -func splitPath(path string) (k, rest string) { +func splitPath(d, e rune, path string) (k, rest string) { esc := false for i, r := range path { switch { - case !esc && r == '\\': + case !esc && r == e: esc = true - case !esc && r == '.': - return unescape(path[:i]), path[i+1:] + case !esc && r == d: + return unescape(d, e, path[:i]), path[i+1:] default: esc = false } } - return unescape(path), "" + return unescape(d, e, path), "" } -func (n node) split(path, s string) node { - k, rest := splitPath(path) +func (n node) split(d, e rune, path, s string) node { + k, rest := splitPath(d, e, path) if rest == "" { return add(n, k, s) } @@ -86,7 +86,7 @@ func (n node) split(path, s string) node { } c := getNode(n[k]) - n[k] = c.split(rest, s) + n[k] = c.split(d, e, rest, s) return n } @@ -139,10 +139,14 @@ func getString(x interface{}) string { panic("value is neither string nor node") } -func escape(s string) string { - return strings.Replace(strings.Replace(s, `\`, `\\`, -1), `.`, `\.`, -1) +func escape(d, e rune, s string) string { + s = strings.Replace(s, string(e), string(e)+string(e), -1) // Escape the escape (\ => \\) + s = strings.Replace(s, string(d), string(e)+string(d), -1) // Escape the delimiter (. => \.) + return s } -func unescape(s string) string { - return strings.Replace(strings.Replace(s, `\.`, `.`, -1), `\\`, `\`, -1) +func unescape(d, e rune, s string) string { + s = strings.Replace(s, string(e)+string(d), string(d), -1) // Unescape the delimiter (\. => .) + s = strings.Replace(s, string(e)+string(e), string(e), -1) // Unescape the escape (\\ => \) + return s } diff --git a/vendor/github.com/ajg/form/pre-commit.sh b/vendor/github.com/ajg/form/pre-commit.sh old mode 100644 new mode 100755 diff --git a/vendor/github.com/sethvargo/go-fastly/.gitignore b/vendor/github.com/sethvargo/go-fastly/.gitignore deleted file mode 100644 index c9bf97327..000000000 --- a/vendor/github.com/sethvargo/go-fastly/.gitignore +++ /dev/null @@ -1,28 +0,0 @@ -### Go ### -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -bin/ -pkg/ diff --git a/vendor/github.com/sethvargo/go-fastly/.travis.yml b/vendor/github.com/sethvargo/go-fastly/.travis.yml deleted file mode 100644 index 9330e8bd6..000000000 --- a/vendor/github.com/sethvargo/go-fastly/.travis.yml +++ /dev/null @@ -1,19 +0,0 @@ -sudo: false - -language: go - -go: - - 1.4.2 - - 1.5.2 - -branches: - only: - - master - -script: - - make updatedeps - - make test - -env: - # FASTLY_API_KEY - - secure: "eiYcogJFF+lK/6coFXaOOm0bDHxaK1qqZ0GinMmPXmQ6nonf56omMVxNyOsV+6jz/fdJCA7gfGv600raTAOVNxD23E/p2j6yxPSI5O6itxp0jxJm7p6MOHwkmsXFZGfLxaqVN2CHs+W3sSc4cwzCkCqlik4XLXihHvzYpjBk1AZK6QUMWqdTcDYDWMfk5CW1O6wUpmYiFwlnENwDopGQlSs1+PyEiDEbEMYu1yVUq+f83IJ176arM4XL8NS2GN1QMBKyALA+6jpT/OrFtW5tkheE+WOQ6+/ZnDCtY0i1RA8BBuyACYuf+WEAkmWfJGGk7+Ou6q2JFzIBsd6ZS3EsM4bs4P1FyhPBwK5zyFty2w7+PwVm6wrZ0NfUh6BKsfCF9MweypsKq+F+4GOcpjdCYPKZKGRjQ4iKOZVVzaVGLRanz1EHiXUcLT+DDr0kFbvrLCLqCPvujBfqeUDqVZMpsUqir9HWqVKutczAnYzFaoeeSVap14J/sd6kcgZo2bNMSRQvMoPCOvicdW8RLIV8Hyx2l0Cv596ZfinWBk2Dcmn6APLkbrBpvhv6/SUtBKHMijjFc5VvoxO3ZP6vUCueDaZVNWkX1xk+VA5PD0T/IcilLy3+nBedz+3lmiW7dnQPuWnlPBFPWvYZvW2KaDOazv5rZK+pKIq32BIyhP/n/AU=" diff --git a/vendor/github.com/sethvargo/go-fastly/Makefile b/vendor/github.com/sethvargo/go-fastly/Makefile index 8391618cc..2addc380e 100644 --- a/vendor/github.com/sethvargo/go-fastly/Makefile +++ b/vendor/github.com/sethvargo/go-fastly/Makefile @@ -1,21 +1,42 @@ TEST?=./... +NAME?=$(shell basename "${CURDIR}") +EXTERNAL_TOOLS=\ + github.com/mitchellh/gox default: test -# test runs the test suite and vets the code +# test runs the test suite and vets the code. test: generate - go list $(TEST) | xargs -n1 go test -timeout=30s -parallel=12 $(TESTARGS) + @echo "==> Running tests..." + @go list $(TEST) \ + | grep -v "github.com/sethvargo/${NAME}/vendor" \ + | xargs -n1 go test -timeout=60s -parallel=10 ${TESTARGS} -# updatedeps installs all the dependencies the library needs to run and build +# testrace runs the race checker +testrace: generate + @echo "==> Running tests (race)..." + @go list $(TEST) \ + | grep -v "github.com/sethvargo/${NAME}/vendor" \ + | xargs -n1 go test -timeout=60s -race ${TESTARGS} + +# updatedeps installs all the dependencies needed to run and build. updatedeps: - go list ./... \ - | xargs go list -f '{{ join .Deps "\n" }}{{ printf "\n" }}{{ join .TestImports "\n" }}' \ - | grep -v github.com/sethvargo/go-fastly \ - | xargs go get -f -u -v + @sh -c "'${CURDIR}/scripts/deps.sh' '${NAME}'" -# generate runs `go generate` to build the dynamically generated source files +# generate runs `go generate` to build the dynamically generated source files. generate: - find . -type f -name '.DS_Store' -delete - go generate ./... + @echo "==> Generating..." + @find . -type f -name '.DS_Store' -delete + @go list ./... \ + | grep -v "github.com/hashicorp/${NAME}/vendor" \ + | xargs -n1 go generate -.PHONY: default bin dev dist test testrace updatedeps generate +# bootstrap installs the necessary go tools for development/build. +bootstrap: + @echo "==> Bootstrapping..." + @for t in ${EXTERNAL_TOOLS}; do \ + echo "--> Installing "$$t"..." ; \ + go get -u "$$t"; \ + done + +.PHONY: default test testrace updatedeps generate bootstrap diff --git a/vendor/github.com/sethvargo/go-fastly/cache_setting.go b/vendor/github.com/sethvargo/go-fastly/cache_setting.go index 3f5aebe24..79ba5c64c 100644 --- a/vendor/github.com/sethvargo/go-fastly/cache_setting.go +++ b/vendor/github.com/sethvargo/go-fastly/cache_setting.go @@ -164,7 +164,7 @@ type UpdateCacheSettingInput struct { NewName string `form:"name,omitempty"` Action CacheSettingAction `form:"action,omitempty"` TTL uint `form:"ttl,omitempty"` - StateTTL uint `form:"stale_ttl,omitempty"` + StaleTTL uint `form:"stale_ttl,omitempty"` CacheCondition string `form:"cache_condition,omitempty"` } diff --git a/vendor/github.com/sethvargo/go-fastly/client.go b/vendor/github.com/sethvargo/go-fastly/client.go index 8006beeca..4c670601f 100644 --- a/vendor/github.com/sethvargo/go-fastly/client.go +++ b/vendor/github.com/sethvargo/go-fastly/client.go @@ -1,6 +1,7 @@ package fastly import ( + "bytes" "encoding/json" "fmt" "io" @@ -144,11 +145,6 @@ func (c *Client) Request(verb, p string, ro *RequestOptions) (*http.Response, er // RequestForm makes an HTTP request with the given interface being encoded as // form data. func (c *Client) RequestForm(verb, p string, i interface{}, ro *RequestOptions) (*http.Response, error) { - values, err := form.EncodeToValues(i) - if err != nil { - return nil, err - } - if ro == nil { ro = new(RequestOptions) } @@ -158,10 +154,11 @@ func (c *Client) RequestForm(verb, p string, i interface{}, ro *RequestOptions) } ro.Headers["Content-Type"] = "application/x-www-form-urlencoded" - // There is a super-jank implementation in the form library where fields with - // a "dot" are replaced with "/.". That is then URL encoded and Fastly just - // dies. We fix that here. - body := strings.Replace(values.Encode(), "%5C.", ".", -1) + buf := new(bytes.Buffer) + if err := form.NewEncoder(buf).DelimitWith('|').Encode(i); err != nil { + return nil, err + } + body := buf.String() ro.Body = strings.NewReader(body) ro.BodyLength = int64(len(body)) diff --git a/vendor/github.com/sethvargo/go-fastly/errors.go b/vendor/github.com/sethvargo/go-fastly/errors.go index e5a617323..82f27ed21 100644 --- a/vendor/github.com/sethvargo/go-fastly/errors.go +++ b/vendor/github.com/sethvargo/go-fastly/errors.go @@ -79,12 +79,12 @@ type HTTPError struct { // NewHTTPError creates a new HTTP error from the given code. func NewHTTPError(resp *http.Response) *HTTPError { - var e *HTTPError + var e HTTPError if resp.Body != nil { decodeJSON(&e, resp.Body) } e.StatusCode = resp.StatusCode - return e + return &e } // Error implements the error interface and returns the string representing the diff --git a/vendor/github.com/sethvargo/go-fastly/fastly.go b/vendor/github.com/sethvargo/go-fastly/fastly.go index 438aa4ad0..19e7c807d 100644 --- a/vendor/github.com/sethvargo/go-fastly/fastly.go +++ b/vendor/github.com/sethvargo/go-fastly/fastly.go @@ -33,9 +33,9 @@ func (b Compatibool) MarshalText() ([]byte, error) { } // UnmarshalText implements the encoding.TextUnmarshaler interface. -func (b Compatibool) UnmarshalText(t []byte) error { +func (b *Compatibool) UnmarshalText(t []byte) error { if bytes.Equal(t, []byte("1")) { - b = Compatibool(true) + *b = Compatibool(true) } return nil } diff --git a/vendor/github.com/sethvargo/go-fastly/header.go b/vendor/github.com/sethvargo/go-fastly/header.go index 476d1195e..96eb7b219 100644 --- a/vendor/github.com/sethvargo/go-fastly/header.go +++ b/vendor/github.com/sethvargo/go-fastly/header.go @@ -119,7 +119,7 @@ type CreateHeaderInput struct { Name string `form:"name,omitempty"` Action HeaderAction `form:"action,omitempty"` - IgnoreIfSet bool `form:"ignore_if_set,omitempty"` + IgnoreIfSet Compatibool `form:"ignore_if_set,omitempty"` Type HeaderType `form:"type,omitempty"` Destination string `form:"dst,omitempty"` Source string `form:"src,omitempty"` @@ -204,7 +204,7 @@ type UpdateHeaderInput struct { NewName string `form:"name,omitempty"` Action HeaderAction `form:"action,omitempty"` - IgnoreIfSet bool `form:"ignore_if_set,omitempty"` + IgnoreIfSet Compatibool `form:"ignore_if_set,omitempty"` Type HeaderType `form:"type,omitempty"` Destination string `form:"dst,omitempty"` Source string `form:"src,omitempty"` diff --git a/vendor/github.com/sethvargo/go-fastly/s3.go b/vendor/github.com/sethvargo/go-fastly/s3.go index 96d848825..278fec121 100644 --- a/vendor/github.com/sethvargo/go-fastly/s3.go +++ b/vendor/github.com/sethvargo/go-fastly/s3.go @@ -6,25 +6,33 @@ import ( "time" ) +type S3Redundancy string + +const ( + S3RedundancyStandard S3Redundancy = "standard" + S3RedundancyReduced S3Redundancy = "reduced_redundancy" +) + // S3 represents a S3 response from the Fastly API. type S3 struct { ServiceID string `mapstructure:"service_id"` Version string `mapstructure:"version"` - Name string `mapstructure:"name"` - BucketName string `mapstructure:"bucket_name"` - Domain string `mapstructure:"domain"` - AccessKey string `mapstructure:"access_key"` - SecretKey string `mapstructure:"secret_key"` - Path string `mapstructure:"path"` - Period uint `mapstructure:"period"` - GzipLevel uint `mapstructure:"gzip_level"` - Format string `mapstructure:"format"` - ResponseCondition string `mapstructure:"response_condition"` - TimestampFormat string `mapstructure:"timestamp_format"` - CreatedAt *time.Time `mapstructure:"created_at"` - UpdatedAt *time.Time `mapstructure:"updated_at"` - DeletedAt *time.Time `mapstructure:"deleted_at"` + Name string `mapstructure:"name"` + BucketName string `mapstructure:"bucket_name"` + Domain string `mapstructure:"domain"` + AccessKey string `mapstructure:"access_key"` + SecretKey string `mapstructure:"secret_key"` + Path string `mapstructure:"path"` + Period uint `mapstructure:"period"` + GzipLevel uint `mapstructure:"gzip_level"` + Format string `mapstructure:"format"` + ResponseCondition string `mapstructure:"response_condition"` + TimestampFormat string `mapstructure:"timestamp_format"` + Redundancy S3Redundancy `mapstructure:"redundancy"` + CreatedAt *time.Time `mapstructure:"created_at"` + UpdatedAt *time.Time `mapstructure:"updated_at"` + DeletedAt *time.Time `mapstructure:"deleted_at"` } // s3sByName is a sortable list of S3s. @@ -77,17 +85,18 @@ type CreateS3Input struct { Service string Version string - Name string `form:"name,omitempty"` - BucketName string `form:"bucket_name,omitempty"` - Domain string `form:"domain,omitempty"` - AccessKey string `form:"access_key,omitempty"` - SecretKey string `form:"secret_key,omitempty"` - Path string `form:"path,omitempty"` - Period uint `form:"period,omitempty"` - GzipLevel uint `form:"gzip_level,omitempty"` - Format string `form:"format,omitempty"` - ResponseCondition string `form:"response_condition,omitempty"` - TimestampFormat string `form:"timestamp_format,omitempty"` + Name string `form:"name,omitempty"` + BucketName string `form:"bucket_name,omitempty"` + Domain string `form:"domain,omitempty"` + AccessKey string `form:"access_key,omitempty"` + SecretKey string `form:"secret_key,omitempty"` + Path string `form:"path,omitempty"` + Period uint `form:"period,omitempty"` + GzipLevel uint `form:"gzip_level,omitempty"` + Format string `form:"format,omitempty"` + ResponseCondition string `form:"response_condition,omitempty"` + TimestampFormat string `form:"timestamp_format,omitempty"` + Redundancy S3Redundancy `form:"redundancy,omitempty"` } // CreateS3 creates a new Fastly S3. @@ -161,17 +170,18 @@ type UpdateS3Input struct { // Name is the name of the S3 to update. Name string - NewName string `form:"name,omitempty"` - BucketName string `form:"bucket_name,omitempty"` - Domain string `form:"domain,omitempty"` - AccessKey string `form:"access_key,omitempty"` - SecretKey string `form:"secret_key,omitempty"` - Path string `form:"path,omitempty"` - Period uint `form:"period,omitempty"` - GzipLevel uint `form:"gzip_level,omitempty"` - Format string `form:"format,omitempty"` - ResponseCondition string `form:"response_condition,omitempty"` - TimestampFormat string `form:"timestamp_format,omitempty"` + NewName string `form:"name,omitempty"` + BucketName string `form:"bucket_name,omitempty"` + Domain string `form:"domain,omitempty"` + AccessKey string `form:"access_key,omitempty"` + SecretKey string `form:"secret_key,omitempty"` + Path string `form:"path,omitempty"` + Period uint `form:"period,omitempty"` + GzipLevel uint `form:"gzip_level,omitempty"` + Format string `form:"format,omitempty"` + ResponseCondition string `form:"response_condition,omitempty"` + TimestampFormat string `form:"timestamp_format,omitempty"` + Redundancy S3Redundancy `form:"redundancy,omitempty"` } // UpdateS3 updates a specific S3. diff --git a/vendor/github.com/sethvargo/go-fastly/version.go b/vendor/github.com/sethvargo/go-fastly/version.go index fd22f9067..8b54c9cee 100644 --- a/vendor/github.com/sethvargo/go-fastly/version.go +++ b/vendor/github.com/sethvargo/go-fastly/version.go @@ -7,14 +7,14 @@ import ( // Version represents a distinct configuration version. type Version struct { - Number string `mapstructure:"number"` - Comment string `mapstructure:"comment"` - ServiceID string `mapstructure:"service_id"` - Active bool `mapstructure:"active"` - Locked bool `mapstructure:"locked"` - Deployed bool `mapstructure:"deployed"` - Staging bool `mapstructure:"staging"` - Testing bool `mapstructure:"testing"` + Number string `mapstructure:"number"` + Comment string `mapstructure:"comment"` + ServiceID string `mapstructure:"service_id"` + Active bool `mapstructure:"active"` + Locked bool `mapstructure:"locked"` + Deployed bool `mapstructure:"deployed"` + Staging bool `mapstructure:"staging"` + Testing bool `mapstructure:"testing"` } // versionsByNumber is a sortable list of versions. This is used by the version diff --git a/vendor/vendor.json b/vendor/vendor.json index 931fedb09..b062792b6 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -250,8 +250,10 @@ "revision": "9b82b0372a4edf52f66fbc8feaa6aafe0123001d" }, { + "checksumSHA1": "csR8njyJfkweB0RCtfnLwgXNeqQ=", "path": "github.com/ajg/form", - "revision": "c9e1c3ae1f869d211cdaa085d23c6af2f5f83866" + "revision": "7ff89c75808766205bfa4411abb436c98c33eb5e", + "revisionTime": "2016-06-29T21:43:12Z" }, { "path": "github.com/apparentlymart/go-cidr/cidr", @@ -1611,8 +1613,10 @@ "revision": "d41af8bb6a7704f00bc3b7cba9355ae6a5a80048" }, { + "checksumSHA1": "DWJoWDXcRi4HUCyxU6dLVVjR4pI=", "path": "github.com/sethvargo/go-fastly", - "revision": "6566b161e807516f4a45bc3054eac291a120e217" + "revision": "b0a18d43769d55365d4fbd9ba36493e5c0dcd8f5", + "revisionTime": "2016-07-08T18:18:56Z" }, { "comment": "v1.1-2-g5578a8c", From eb314ee520806c60c8363c723bde74fa2a79750f Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Thu, 21 Jul 2016 20:16:13 +0100 Subject: [PATCH 0370/1238] provider/aws: Set `storage_encrypted` to state in (#7751) `aws_rds_cluster_instance` The Import test showed that there was no setting of the `storage_encrypted` value back to state on the Read func. ``` % make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSRDSClusterInstance_importBasic' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSRDSClusterInstance_importBasic -timeout 120m === RUN TestAccAWSRDSClusterInstance_importBasic --- PASS: TestAccAWSRDSClusterInstance_importBasic (754.30s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 754.411s ``` --- builtin/providers/aws/resource_aws_rds_cluster_instance.go | 1 + 1 file changed, 1 insertion(+) diff --git a/builtin/providers/aws/resource_aws_rds_cluster_instance.go b/builtin/providers/aws/resource_aws_rds_cluster_instance.go index 795665685..745674c43 100644 --- a/builtin/providers/aws/resource_aws_rds_cluster_instance.go +++ b/builtin/providers/aws/resource_aws_rds_cluster_instance.go @@ -205,6 +205,7 @@ func resourceAwsRDSClusterInstanceRead(d *schema.ResourceData, meta interface{}) d.Set("cluster_identifier", db.DBClusterIdentifier) d.Set("instance_class", db.DBInstanceClass) d.Set("identifier", db.DBInstanceIdentifier) + d.Set("storage_encrypted", db.StorageEncrypted) if len(db.DBParameterGroups) > 0 { d.Set("db_parameter_group_name", db.DBParameterGroups[0].DBParameterGroupName) From f66d1a10a4a6ce7dafa8196c82fb236540f2a6b5 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Thu, 21 Jul 2016 14:07:16 -0400 Subject: [PATCH 0371/1238] Add VersionString We conditionally format version with VersionPrerelease in a number of places. Add a package-level function where we can unify the version format. Replace most of version formatting in terraform, but leave th few instances set from the top-level package to make sure we don't break anything before release. --- builtin/providers/atlas/provider.go | 2 +- builtin/providers/aws/config.go | 2 +- builtin/providers/azurerm/config.go | 8 +------- builtin/providers/google/config.go | 6 +----- terraform/version.go | 9 +++++++++ 5 files changed, 13 insertions(+), 14 deletions(-) diff --git a/builtin/providers/atlas/provider.go b/builtin/providers/atlas/provider.go index e7034a7cd..14928de63 100644 --- a/builtin/providers/atlas/provider.go +++ b/builtin/providers/atlas/provider.go @@ -52,7 +52,7 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) { return nil, err } } - client.DefaultHeader.Set(terraform.VersionHeader, terraform.Version) + client.DefaultHeader.Set(terraform.VersionHeader, terraform.VersionString()) client.Token = d.Get("token").(string) return client, nil diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go index ab50b8ae2..85f1f30ea 100644 --- a/builtin/providers/aws/config.go +++ b/builtin/providers/aws/config.go @@ -397,7 +397,7 @@ func (c *Config) ValidateAccountId(accountId string) error { var addTerraformVersionToUserAgent = request.NamedHandler{ Name: "terraform.TerraformVersionUserAgentHandler", Fn: request.MakeAddToUserAgentHandler( - "terraform", terraform.Version, terraform.VersionPrerelease), + "terraform", terraform.VersionString()), } type awsLogger struct{} diff --git a/builtin/providers/azurerm/config.go b/builtin/providers/azurerm/config.go index 50e9c89f4..e44a9222a 100644 --- a/builtin/providers/azurerm/config.go +++ b/builtin/providers/azurerm/config.go @@ -92,13 +92,7 @@ func withRequestLogging() autorest.SendDecorator { } func setUserAgent(client *autorest.Client) { - var version string - if terraform.VersionPrerelease != "" { - version = fmt.Sprintf("%s-%s", terraform.Version, terraform.VersionPrerelease) - } else { - version = terraform.Version - } - + version := terraform.VersionString() client.UserAgent = fmt.Sprintf("HashiCorp-Terraform-v%s", version) } diff --git a/builtin/providers/google/config.go b/builtin/providers/google/config.go index 159a57e09..c824c9ee6 100644 --- a/builtin/providers/google/config.go +++ b/builtin/providers/google/config.go @@ -85,11 +85,7 @@ func (c *Config) loadAndValidate() error { } } - versionString := terraform.Version - prerelease := terraform.VersionPrerelease - if len(prerelease) > 0 { - versionString = fmt.Sprintf("%s-%s", versionString, prerelease) - } + versionString := terraform.VersionString() userAgent := fmt.Sprintf( "(%s %s) Terraform/%s", runtime.GOOS, runtime.GOARCH, versionString) diff --git a/terraform/version.go b/terraform/version.go index 7462a3c67..d753ff8e7 100644 --- a/terraform/version.go +++ b/terraform/version.go @@ -1,6 +1,8 @@ package terraform import ( + "fmt" + "github.com/hashicorp/go-version" ) @@ -20,3 +22,10 @@ var SemVersion = version.Must(version.NewVersion(Version)) // VersionHeader is the header name used to send the current terraform version // in http requests. const VersionHeader = "Terraform-Version" + +func VersionString() string { + if VersionPrerelease != "" { + return fmt.Sprintf("%s-%s", Version, VersionPrerelease) + } + return Version +} From 78155d23cf4ba40bd82d04b860bd8016dc3f30a3 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Thu, 21 Jul 2016 16:45:18 -0500 Subject: [PATCH 0372/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 50d25d909..5660e9c59 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -118,6 +118,7 @@ IMPROVEMENTS: * provider/aws: Support `task_role_arn` on `aws_ecs_task_definition [GH-7653] * provider/aws: Support Tags on `aws_rds_cluster` [GH-7695] * provider/aws: Support kms_key_id for `aws_rds_cluster` [GH-7662] + * provider/aws: Allow setting a `poll_interval` on `aws_elastic_beanstalk_environment` [GH-7523] * provider/azurerm: Add support for EnableIPForwarding to `azurerm_network_interface` [GH-6807] * provider/azurerm: Add support for exporting the `azurerm_storage_account` access keys [GH-6742] * provider/azurerm: The Azure SDK now exposes better error messages [GH-6976] From 732b8d3b6e0ba139ad97dc09db0a1418f58ffcf4 Mon Sep 17 00:00:00 2001 From: Brad Sickles Date: Thu, 21 Jul 2016 18:08:32 -0400 Subject: [PATCH 0373/1238] Implementing aws_ami_launch_permission. (#7365) --- builtin/providers/aws/provider.go | 1 + .../aws/resource_aws_ami_launch_permission.go | 104 +++++++++++++++++ ...resource_aws_ami_launch_permission_test.go | 105 ++++++++++++++++++ .../docs/providers/aws/r/ami.html.markdown | 3 + .../aws/r/ami_launch_permission.html.markdown | 33 ++++++ 5 files changed, 246 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_ami_launch_permission.go create mode 100644 builtin/providers/aws/resource_aws_ami_launch_permission_test.go create mode 100644 website/source/docs/providers/aws/r/ami_launch_permission.html.markdown diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index cfe8033e0..edb2a744f 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -121,6 +121,7 @@ func Provider() terraform.ResourceProvider { "aws_ami": resourceAwsAmi(), "aws_ami_copy": resourceAwsAmiCopy(), "aws_ami_from_instance": resourceAwsAmiFromInstance(), + "aws_ami_launch_permission": resourceAwsAmiLaunchPermission(), "aws_api_gateway_account": resourceAwsApiGatewayAccount(), "aws_api_gateway_api_key": resourceAwsApiGatewayApiKey(), "aws_api_gateway_authorizer": resourceAwsApiGatewayAuthorizer(), diff --git a/builtin/providers/aws/resource_aws_ami_launch_permission.go b/builtin/providers/aws/resource_aws_ami_launch_permission.go new file mode 100644 index 000000000..d1c738d39 --- /dev/null +++ b/builtin/providers/aws/resource_aws_ami_launch_permission.go @@ -0,0 +1,104 @@ +package aws + +import ( + "fmt" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsAmiLaunchPermission() *schema.Resource { + return &schema.Resource{ + Exists: resourceAwsAmiLaunchPermissionExists, + Create: resourceAwsAmiLaunchPermissionCreate, + Read: resourceAwsAmiLaunchPermissionRead, + Delete: resourceAwsAmiLaunchPermissionDelete, + + Schema: map[string]*schema.Schema{ + "image_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "account_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsAmiLaunchPermissionExists(d *schema.ResourceData, meta interface{}) (bool, error) { + conn := meta.(*AWSClient).ec2conn + + image_id := d.Get("image_id").(string) + account_id := d.Get("account_id").(string) + return hasLaunchPermission(conn, image_id, account_id) +} + +func resourceAwsAmiLaunchPermissionCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + image_id := d.Get("image_id").(string) + account_id := d.Get("account_id").(string) + + _, err := conn.ModifyImageAttribute(&ec2.ModifyImageAttributeInput{ + ImageId: aws.String(image_id), + Attribute: aws.String("launchPermission"), + LaunchPermission: &ec2.LaunchPermissionModifications{ + Add: []*ec2.LaunchPermission{ + &ec2.LaunchPermission{UserId: aws.String(account_id)}, + }, + }, + }) + if err != nil { + return fmt.Errorf("error creating ami launch permission: %s", err) + } + + d.SetId(fmt.Sprintf("%s-%s", image_id, account_id)) + return nil +} + +func resourceAwsAmiLaunchPermissionRead(d *schema.ResourceData, meta interface{}) error { + return nil +} + +func resourceAwsAmiLaunchPermissionDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + image_id := d.Get("image_id").(string) + account_id := d.Get("account_id").(string) + + _, err := conn.ModifyImageAttribute(&ec2.ModifyImageAttributeInput{ + ImageId: aws.String(image_id), + Attribute: aws.String("launchPermission"), + LaunchPermission: &ec2.LaunchPermissionModifications{ + Remove: []*ec2.LaunchPermission{ + &ec2.LaunchPermission{UserId: aws.String(account_id)}, + }, + }, + }) + if err != nil { + return fmt.Errorf("error removing ami launch permission: %s", err) + } + + return nil +} + +func hasLaunchPermission(conn *ec2.EC2, image_id string, account_id string) (bool, error) { + attrs, err := conn.DescribeImageAttribute(&ec2.DescribeImageAttributeInput{ + ImageId: aws.String(image_id), + Attribute: aws.String("launchPermission"), + }) + if err != nil { + return false, err + } + + for _, lp := range attrs.LaunchPermissions { + if *lp.UserId == account_id { + return true, nil + } + } + return false, nil +} diff --git a/builtin/providers/aws/resource_aws_ami_launch_permission_test.go b/builtin/providers/aws/resource_aws_ami_launch_permission_test.go new file mode 100644 index 000000000..0affa9161 --- /dev/null +++ b/builtin/providers/aws/resource_aws_ami_launch_permission_test.go @@ -0,0 +1,105 @@ +package aws + +import ( + "fmt" + r "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "os" + "testing" +) + +func TestAccAWSAMILaunchPermission_Basic(t *testing.T) { + image_id := "" + account_id := os.Getenv("AWS_ACCOUNT_ID") + + r.Test(t, r.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + if os.Getenv("AWS_ACCOUNT_ID") == "" { + t.Fatal("AWS_ACCOUNT_ID must be set") + } + }, + Providers: testAccProviders, + Steps: []r.TestStep{ + // Scaffold everything + r.TestStep{ + Config: testAccAWSAMILaunchPermissionConfig(account_id, true), + Check: r.ComposeTestCheckFunc( + testCheckResourceGetAttr("aws_ami_copy.test", "id", &image_id), + testAccAWSAMILaunchPermissionExists(account_id, &image_id), + ), + }, + // Drop just launch permission to test destruction + r.TestStep{ + Config: testAccAWSAMILaunchPermissionConfig(account_id, false), + Check: r.ComposeTestCheckFunc( + testAccAWSAMILaunchPermissionDestroyed(account_id, &image_id), + ), + }, + }, + }) +} + +func testCheckResourceGetAttr(name, key string, value *string) r.TestCheckFunc { + return func(s *terraform.State) error { + ms := s.RootModule() + rs, ok := ms.Resources[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + is := rs.Primary + if is == nil { + return fmt.Errorf("No primary instance: %s", name) + } + + *value = is.Attributes[key] + return nil + } +} + +func testAccAWSAMILaunchPermissionExists(account_id string, image_id *string) r.TestCheckFunc { + return func(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).ec2conn + if has, err := hasLaunchPermission(conn, *image_id, account_id); err != nil { + return err + } else if !has { + return fmt.Errorf("launch permission does not exist for '%s' on '%s'", account_id, *image_id) + } + return nil + } +} + +func testAccAWSAMILaunchPermissionDestroyed(account_id string, image_id *string) r.TestCheckFunc { + return func(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).ec2conn + if has, err := hasLaunchPermission(conn, *image_id, account_id); err != nil { + return err + } else if has { + return fmt.Errorf("launch permission still exists for '%s' on '%s'", account_id, *image_id) + } + return nil + } +} + +func testAccAWSAMILaunchPermissionConfig(account_id string, includeLaunchPermission bool) string { + base := ` +resource "aws_ami_copy" "test" { + name = "launch-permission-test" + description = "Launch Permission Test Copy" + source_ami_id = "ami-7172b611" + source_ami_region = "us-west-2" +} +` + + if !includeLaunchPermission { + return base + } + + return base + fmt.Sprintf(` +resource "aws_ami_launch_permission" "self-test" { + image_id = "${aws_ami_copy.test.id}" + account_id = "%s" +} +`, account_id) +} diff --git a/website/source/docs/providers/aws/r/ami.html.markdown b/website/source/docs/providers/aws/r/ami.html.markdown index 25ac04db6..1a579407e 100644 --- a/website/source/docs/providers/aws/r/ami.html.markdown +++ b/website/source/docs/providers/aws/r/ami.html.markdown @@ -14,6 +14,9 @@ The AMI resource allows the creation and management of a completely-custom If you just want to duplicate an existing AMI, possibly copying it to another region, it's better to use `aws_ami_copy` instead. +If you just want to share an existing AMI with another AWS account, +it's better to use `aws_ami_launch_permission` instead. + ## Example Usage ``` diff --git a/website/source/docs/providers/aws/r/ami_launch_permission.html.markdown b/website/source/docs/providers/aws/r/ami_launch_permission.html.markdown new file mode 100644 index 000000000..7be0ffd59 --- /dev/null +++ b/website/source/docs/providers/aws/r/ami_launch_permission.html.markdown @@ -0,0 +1,33 @@ +--- +layout: "aws" +page_title: "AWS: aws_ami_launch_permission" +sidebar_current: "docs-aws-resource-ami-launch-permission" +description: |- + Adds launch permission to Amazon Machine Image (AMI). +--- + +# aws\_ami\_launch\_permission + +Adds launch permission to Amazon Machine Image (AMI) from another AWS account. + +## Example Usage + +``` +resource "aws_ami_launch_permission" "example" { + image_id = "ami-12345678" + account_id = "123456789012" +} +``` + +## Argument Reference + +The following arguments are supported: + + * `image_id` - (required) A region-unique name for the AMI. + * `account_id` - (required) An AWS Account ID to add launch permissions. + +## Attributes Reference + +The following attributes are exported: + + * `id` - A combination of "`image_id`-`account_id`". From 3c9ea8da7560655bf52ba1140ad931b885ed6895 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Thu, 21 Jul 2016 23:09:12 +0100 Subject: [PATCH 0374/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5660e9c59..a076e6c1f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -62,6 +62,7 @@ FEATURES: * **New Resource:** `aws_simpledb_domain` [GH-7600] * **New Resource:** `aws_opsworks_user_profile` [GH-6304] * **New Resource:** `aws_opsworks_permission` [GH-6304] + * **New Resource:** `aws_ami_launch_permission` [GH-7365] * **New Resource:** `openstack_blockstorage_volume_v2` [GH-6693] * **New Resource:** `openstack_lb_loadbalancer_v2` [GH-7012] * **New Resource:** `openstack_lb_listener_v2` [GH-7012] From 2505b2e35f371a1b4d96cefbe77bc73d779c6b5e Mon Sep 17 00:00:00 2001 From: Chris Broglie Date: Thu, 21 Jul 2016 15:37:58 -0700 Subject: [PATCH 0375/1238] Add support for Kinesis streams shard-level metrics (#7684) * Add support for Kinesis streams shard-level metrics * Add test case for flattenKinesisShardLevelMetrics * Document new shard_level_metrics field --- .../aws/resource_aws_kinesis_stream.go | 110 +++++++++++++++--- .../aws/resource_aws_kinesis_stream_test.go | 78 +++++++++++++ builtin/providers/aws/structure.go | 12 ++ builtin/providers/aws/structure_test.go | 22 ++++ .../aws/r/kinesis_stream.html.markdown | 6 + 5 files changed, 212 insertions(+), 16 deletions(-) diff --git a/builtin/providers/aws/resource_aws_kinesis_stream.go b/builtin/providers/aws/resource_aws_kinesis_stream.go index 1d70d8476..ed731f165 100644 --- a/builtin/providers/aws/resource_aws_kinesis_stream.go +++ b/builtin/providers/aws/resource_aws_kinesis_stream.go @@ -46,6 +46,13 @@ func resourceAwsKinesisStream() *schema.Resource { }, }, + "shard_level_metrics": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "arn": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -110,6 +117,9 @@ func resourceAwsKinesisStreamUpdate(d *schema.ResourceData, meta interface{}) er if err := setKinesisRetentionPeriod(conn, d); err != nil { return err } + if err := updateKinesisShardLevelMetrics(conn, d); err != nil { + return err + } return resourceAwsKinesisStreamRead(d, meta) } @@ -134,6 +144,10 @@ func resourceAwsKinesisStreamRead(d *schema.ResourceData, meta interface{}) erro d.Set("shard_count", state.shardCount) d.Set("retention_period", state.retentionPeriod) + if len(state.shardLevelMetrics) > 0 { + d.Set("shard_level_metrics", state.shardLevelMetrics) + } + // set tags describeTagsOpts := &kinesis.ListTagsForStreamInput{ StreamName: aws.String(sn), @@ -212,30 +226,74 @@ func setKinesisRetentionPeriod(conn *kinesis.Kinesis, d *schema.ResourceData) er } } - stateConf := &resource.StateChangeConf{ - Pending: []string{"UPDATING"}, - Target: []string{"ACTIVE"}, - Refresh: streamStateRefreshFunc(conn, sn), - Timeout: 5 * time.Minute, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, + if err := waitForKinesisToBeActive(conn, sn); err != nil { + return err } - _, err := stateConf.WaitForState() - if err != nil { - return fmt.Errorf( - "Error waiting for Kinesis Stream (%s) to become active: %s", - sn, err) + return nil +} + +func updateKinesisShardLevelMetrics(conn *kinesis.Kinesis, d *schema.ResourceData) error { + sn := d.Get("name").(string) + + o, n := d.GetChange("shard_level_metrics") + if o == nil { + o = new(schema.Set) + } + if n == nil { + n = new(schema.Set) + } + + os := o.(*schema.Set) + ns := n.(*schema.Set) + + disableMetrics := os.Difference(ns) + if disableMetrics.Len() != 0 { + metrics := disableMetrics.List() + log.Printf("[DEBUG] Disabling shard level metrics %v for stream %s", metrics, sn) + + props := &kinesis.DisableEnhancedMonitoringInput{ + StreamName: aws.String(sn), + ShardLevelMetrics: expandStringList(metrics), + } + + _, err := conn.DisableEnhancedMonitoring(props) + if err != nil { + return fmt.Errorf("Failure to disable shard level metrics for stream %s: %s", sn, err) + } + if err := waitForKinesisToBeActive(conn, sn); err != nil { + return err + } + } + + enabledMetrics := ns.Difference(os) + if enabledMetrics.Len() != 0 { + metrics := enabledMetrics.List() + log.Printf("[DEBUG] Enabling shard level metrics %v for stream %s", metrics, sn) + + props := &kinesis.EnableEnhancedMonitoringInput{ + StreamName: aws.String(sn), + ShardLevelMetrics: expandStringList(metrics), + } + + _, err := conn.EnableEnhancedMonitoring(props) + if err != nil { + return fmt.Errorf("Failure to enable shard level metrics for stream %s: %s", sn, err) + } + if err := waitForKinesisToBeActive(conn, sn); err != nil { + return err + } } return nil } type kinesisStreamState struct { - arn string - status string - shardCount int - retentionPeriod int64 + arn string + status string + shardCount int + retentionPeriod int64 + shardLevelMetrics []string } func readKinesisStreamState(conn *kinesis.Kinesis, sn string) (kinesisStreamState, error) { @@ -249,6 +307,7 @@ func readKinesisStreamState(conn *kinesis.Kinesis, sn string) (kinesisStreamStat state.status = aws.StringValue(page.StreamDescription.StreamStatus) state.shardCount += len(openShards(page.StreamDescription.Shards)) state.retentionPeriod = aws.Int64Value(page.StreamDescription.RetentionPeriodHours) + state.shardLevelMetrics = flattenKinesisShardLevelMetrics(page.StreamDescription.EnhancedMonitoring) return !last }) return state, err @@ -271,6 +330,25 @@ func streamStateRefreshFunc(conn *kinesis.Kinesis, sn string) resource.StateRefr } } +func waitForKinesisToBeActive(conn *kinesis.Kinesis, sn string) error { + stateConf := &resource.StateChangeConf{ + Pending: []string{"UPDATING"}, + Target: []string{"ACTIVE"}, + Refresh: streamStateRefreshFunc(conn, sn), + Timeout: 5 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err := stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for Kinesis Stream (%s) to become active: %s", + sn, err) + } + return nil +} + // See http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-merge.html func openShards(shards []*kinesis.Shard) []*kinesis.Shard { var open []*kinesis.Shard diff --git a/builtin/providers/aws/resource_aws_kinesis_stream_test.go b/builtin/providers/aws/resource_aws_kinesis_stream_test.go index 626f949f6..974761182 100644 --- a/builtin/providers/aws/resource_aws_kinesis_stream_test.go +++ b/builtin/providers/aws/resource_aws_kinesis_stream_test.go @@ -116,6 +116,52 @@ func TestAccAWSKinesisStream_retentionPeriod(t *testing.T) { }) } +func TestAccAWSKinesisStream_shardLevelMetrics(t *testing.T) { + var stream kinesis.StreamDescription + + ri := rand.New(rand.NewSource(time.Now().UnixNano())).Int() + config := fmt.Sprintf(testAccKinesisStreamConfig, ri) + allConfig := fmt.Sprintf(testAccKinesisStreamConfigAllShardLevelMetrics, ri) + singleConfig := fmt.Sprintf(testAccKinesisStreamConfigSingleShardLevelMetric, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckKinesisStreamDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: config, + Check: resource.ComposeTestCheckFunc( + testAccCheckKinesisStreamExists("aws_kinesis_stream.test_stream", &stream), + testAccCheckAWSKinesisStreamAttributes(&stream), + resource.TestCheckResourceAttr( + "aws_kinesis_stream.test_stream", "shard_level_metrics.#", ""), + ), + }, + + resource.TestStep{ + Config: allConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckKinesisStreamExists("aws_kinesis_stream.test_stream", &stream), + testAccCheckAWSKinesisStreamAttributes(&stream), + resource.TestCheckResourceAttr( + "aws_kinesis_stream.test_stream", "shard_level_metrics.#", "7"), + ), + }, + + resource.TestStep{ + Config: singleConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckKinesisStreamExists("aws_kinesis_stream.test_stream", &stream), + testAccCheckAWSKinesisStreamAttributes(&stream), + resource.TestCheckResourceAttr( + "aws_kinesis_stream.test_stream", "shard_level_metrics.#", "1"), + ), + }, + }, + }) +} + func testAccCheckKinesisStreamExists(n string, stream *kinesis.StreamDescription) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -227,3 +273,35 @@ resource "aws_kinesis_stream" "test_stream" { } } ` + +var testAccKinesisStreamConfigAllShardLevelMetrics = ` +resource "aws_kinesis_stream" "test_stream" { + name = "terraform-kinesis-test-%d" + shard_count = 2 + tags { + Name = "tf-test" + } + shard_level_metrics = [ + "IncomingBytes", + "IncomingRecords", + "OutgoingBytes", + "OutgoingRecords", + "WriteProvisionedThroughputExceeded", + "ReadProvisionedThroughputExceeded", + "IteratorAgeMilliseconds" + ] +} +` + +var testAccKinesisStreamConfigSingleShardLevelMetric = ` +resource "aws_kinesis_stream" "test_stream" { + name = "terraform-kinesis-test-%d" + shard_count = 2 + tags { + Name = "tf-test" + } + shard_level_metrics = [ + "IncomingBytes" + ] +} +` diff --git a/builtin/providers/aws/structure.go b/builtin/providers/aws/structure.go index 41464a5d3..cdedb27ca 100644 --- a/builtin/providers/aws/structure.go +++ b/builtin/providers/aws/structure.go @@ -21,6 +21,7 @@ import ( "github.com/aws/aws-sdk-go/service/elasticbeanstalk" elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice" "github.com/aws/aws-sdk-go/service/elb" + "github.com/aws/aws-sdk-go/service/kinesis" "github.com/aws/aws-sdk-go/service/lambda" "github.com/aws/aws-sdk-go/service/rds" "github.com/aws/aws-sdk-go/service/redshift" @@ -1005,6 +1006,17 @@ func flattenAsgEnabledMetrics(list []*autoscaling.EnabledMetric) []string { return strs } +func flattenKinesisShardLevelMetrics(list []*kinesis.EnhancedMetrics) []string { + if len(list) == 0 { + return []string{} + } + strs := make([]string, 0, len(list[0].ShardLevelMetrics)) + for _, s := range list[0].ShardLevelMetrics { + strs = append(strs, *s) + } + return strs +} + func flattenApiGatewayStageKeys(keys []*string) []map[string]interface{} { stageKeys := make([]map[string]interface{}, 0, len(keys)) for _, o := range keys { diff --git a/builtin/providers/aws/structure_test.go b/builtin/providers/aws/structure_test.go index 0ac0a73dc..937411af1 100644 --- a/builtin/providers/aws/structure_test.go +++ b/builtin/providers/aws/structure_test.go @@ -11,6 +11,7 @@ import ( "github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/elasticache" "github.com/aws/aws-sdk-go/service/elb" + "github.com/aws/aws-sdk-go/service/kinesis" "github.com/aws/aws-sdk-go/service/rds" "github.com/aws/aws-sdk-go/service/redshift" "github.com/aws/aws-sdk-go/service/route53" @@ -839,6 +840,27 @@ func TestFlattenAsgEnabledMetrics(t *testing.T) { } } +func TestFlattenKinesisShardLevelMetrics(t *testing.T) { + expanded := []*kinesis.EnhancedMetrics{ + &kinesis.EnhancedMetrics{ + ShardLevelMetrics: []*string{ + aws.String("IncomingBytes"), + aws.String("IncomingRecords"), + }, + }, + } + result := flattenKinesisShardLevelMetrics(expanded) + if len(result) != 2 { + t.Fatalf("expected result had %d elements, but got %d", 2, len(result)) + } + if result[0] != "IncomingBytes" { + t.Fatalf("expected element 0 to be IncomingBytes, but was %s", result[0]) + } + if result[1] != "IncomingRecords" { + t.Fatalf("expected element 0 to be IncomingRecords, but was %s", result[1]) + } +} + func TestFlattenSecurityGroups(t *testing.T) { cases := []struct { ownerId *string diff --git a/website/source/docs/providers/aws/r/kinesis_stream.html.markdown b/website/source/docs/providers/aws/r/kinesis_stream.html.markdown index 90220bffb..1ae13bfcf 100644 --- a/website/source/docs/providers/aws/r/kinesis_stream.html.markdown +++ b/website/source/docs/providers/aws/r/kinesis_stream.html.markdown @@ -20,6 +20,10 @@ resource "aws_kinesis_stream" "test_stream" { name = "terraform-kinesis-test" shard_count = 1 retention_period = 48 + shard_level_metrics = [ + "IncomingBytes", + "OutgoingBytes" + ] tags { Environment = "test" } @@ -36,6 +40,7 @@ AWS account and region the Stream is created in. Amazon has guidlines for specifying the Stream size that should be referenced when creating a Kinesis stream. See [Amazon Kinesis Streams][2] for more. * `retention_period` - (Optional) Length of time data records are accessible after they are added to the stream. The maximum value of a stream's retention period is 168 hours. Minimum value is 24. Default is 24. +* `shard_level_metrics` - (Optional) A list of shard-level CloudWatch metrics which can be enabled for the stream. See [Monitoring with CloudWatch][3] for more. Note that the value ALL should not be used; instead you should provide an explicit list of metrics you wish to enable. * `tags` - (Optional) A mapping of tags to assign to the resource. ## Attributes Reference @@ -48,3 +53,4 @@ when creating a Kinesis stream. See [Amazon Kinesis Streams][2] for more. [1]: https://aws.amazon.com/documentation/kinesis/ [2]: https://docs.aws.amazon.com/kinesis/latest/dev/amazon-kinesis-streams.html +[3]: https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html From dce175459a9b0bc1f42b1b0f919aae294a73a138 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Thu, 21 Jul 2016 23:38:49 +0100 Subject: [PATCH 0376/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a076e6c1f..ac2640d2b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -120,6 +120,7 @@ IMPROVEMENTS: * provider/aws: Support Tags on `aws_rds_cluster` [GH-7695] * provider/aws: Support kms_key_id for `aws_rds_cluster` [GH-7662] * provider/aws: Allow setting a `poll_interval` on `aws_elastic_beanstalk_environment` [GH-7523] + * provider/aws: Add support for Kinesis streams shard-level metrics [GH-7684] * provider/azurerm: Add support for EnableIPForwarding to `azurerm_network_interface` [GH-6807] * provider/azurerm: Add support for exporting the `azurerm_storage_account` access keys [GH-6742] * provider/azurerm: The Azure SDK now exposes better error messages [GH-6976] From aca7f5e805a511b229eff7635debdcdf44d3e196 Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Fri, 22 Jul 2016 10:10:26 +0200 Subject: [PATCH 0377/1238] Update CHANGELOG.md --- CHANGELOG.md | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ac2640d2b..aabf76ea3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,8 +22,9 @@ BACKWARDS INCOMPATIBILITIES / NOTES: * `azurerm_virtual_machine` computer_name now Required * `aws_db_instance` now defaults `publicly_accessible` to false * `openstack_fw_policy_v1` now correctly applies rules in the order they are specified. Upon the next apply, current rules might be re-ordered. - * `atlas_artifact` resource has be depracated. Please use the new `atlas_artifact` Data Source + * `atlas_artifact` resource has be deprecated. Please use the new `atlas_artifact` Data Source * The `member` attribute of `openstack_lb_pool_v1` has been deprecated. Please ue the new `openstack_lb_member_v1` resource. + * All deprecated parameters are removed from all `CloudStack` resources FEATURES: @@ -131,6 +132,11 @@ IMPROVEMENTS: * provider/cloudstack: Add support for affinity groups to `cloudstack_instance` [GH-6898] * provider/cloudstack: Enable swapping of ACLs without having to rebuild the network tier [GH-6741] * provider/cloudstack: Improve ACL swapping [GH-7315] + * provider/cloudstack: Add project support to `cloudstack_network_acl` and `cloudstack_network_acl_rule` [GH-7612] + * provider/cloudstack: Add option to set `root_disk_size` to `cloudstack_instance` [GH-7070] + * provider/cloudstack: Do no longer force a new `cloudstack_instance` resource when updating `user_data` [GH-7074] + * provider/cloudstack: Add option to set `security_group_names` to `cloudstack_instance` [GH-7240] + * provider/cloudstack: Add option to set `affinity_group_names` to `cloudstack_instance` [GH-7242] * provider/datadog: Add support for 'require full window' and 'locked' [GH-6738] * provider/docker: Docker Container DNS Setting Enhancements [GH-7392] * provider/docker: Add `destroy_grace_seconds` option to stop container before delete [GH-7513] @@ -228,6 +234,9 @@ BUG FIXES: * provider/azurerm: `azurerm_virtual_machine` computer_name now Required [GH-7308] * provider/cloudflare: Fix issue upgrading CloudFlare Records created before v0.6.15 [GH-6969] * provider/cloudstack: Fix using `cloudstack_network_acl` within a project [GH-6743] + * provider/cloudstack: Fix refresing `cloudstack_network_acl_rule` when the associated ACL is deleted [GH-7612] + * provider/cloudstack: Fix refresing `cloudstack_port_forward` when the associated IP address is no longer associated [GH-7612] + * provider/cloudstack: Fix creating `cloudstack_network` with offerings that do not support specifying IP ranges [GH-7612] * provider/digitalocean: Stop `digitocean_droplet` forcing new resource on uppercase region [GH-7044] * provider/digitalocean: Reassign Floating IP when droplet changes [GH-7411] * provider/google: Fix a bug causing an error attempting to delete an already-deleted `google_compute_disk` [GH-6689] From fa7ef4ceeda8413c5d36b6b145d3d10c770066cc Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Fri, 22 Jul 2016 05:39:59 -0500 Subject: [PATCH 0378/1238] Update docs to centralize on ARM-based Azure provider (#7767) Sidebar: - Rename "Azure (Resource Manager)" to "Microsoft Azure" and sort accordingly - Rename "Azure (Service Management)" to "Microsoft Azure (Legacy ASM)" and sort accordingly ARM provider docs: - Name changes everywhere to Microsoft Azure Provider - Mention and link to "legacy Azure Service Management Provider" in opening paragraph - Sidebar gains link at bottom to Azure Service Management Provider ASM provider docs: - Name changes everywhere to Azure Service Management Provider - Sidebar gains link at bottom to Microsoft Azure Provider - Every page gets a header with the following - "NOTE: The Azure Service Management provider is no longer being actively developed by HashiCorp employees. It continues to be supported by the community. We recommend using the Azure Resource Manager based [Microsoft Azure Provider] instead if possible." --- .../docs/providers/azure/index.html.markdown | 8 +++++--- .../docs/providers/azurerm/index.html.markdown | 15 +++++++++------ website/source/layouts/azure.erb | 14 +++++++++++++- website/source/layouts/azurerm.erb | 5 ++++- website/source/layouts/docs.erb | 16 ++++++++-------- 5 files changed, 39 insertions(+), 19 deletions(-) diff --git a/website/source/docs/providers/azure/index.html.markdown b/website/source/docs/providers/azure/index.html.markdown index d7e6dcda4..6e054b442 100644 --- a/website/source/docs/providers/azure/index.html.markdown +++ b/website/source/docs/providers/azure/index.html.markdown @@ -1,14 +1,16 @@ --- layout: "azure" -page_title: "Provider: Azure" +page_title: "Provider: Azure Service Management" sidebar_current: "docs-azure-index" description: |- The Azure provider is used to interact with the many resources supported by Azure. The provider needs to be configured with a publish settings file and optionally a subscription ID before it can be used. --- -# Azure Provider +# Azure Service Management Provider -The Azure provider is used to interact with the many resources supported +[arm]: /docs/providers/azurerm/index.html + +The Azure Service Management Provider is used to interact with the many resources supported by Azure. The provider needs to be configured with a [publish settings file](https://manage.windowsazure.com/publishsettings) and optionally a subscription ID before it can be used. diff --git a/website/source/docs/providers/azurerm/index.html.markdown b/website/source/docs/providers/azurerm/index.html.markdown index d669d1b80..ddd54f0ca 100644 --- a/website/source/docs/providers/azurerm/index.html.markdown +++ b/website/source/docs/providers/azurerm/index.html.markdown @@ -6,19 +6,22 @@ description: |- The Azure Resource Manager provider is used to interact with the many resources supported by Azure, via the ARM API. This supercedes the Azure provider, which interacts with Azure using the Service Management API. The provider needs to be configured with a credentials file, or credentials needed to generate OAuth tokens for the ARM API. --- -# Azure Resource Manager Provider +# Microsoft Azure Provider -The Azure Resource Manager provider is used to interact with the many resources -supported by Azure, via the ARM API. This supercedes the Azure provider, which -interacts with Azure using the Service Management API. The provider needs to be -configured with the credentials needed to generate OAuth tokens for the ARM API. +The Microsoft Azure Provider provider is used to interact with the many +resources supported by Azure, via the ARM API. This supercedes the [legacy Azure +provider][asm], which interacts with Azure using the Service Management API. The +provider needs to be configured with the credentials needed to generate OAuth +tokens for the ARM API. + +[asm]: /docs/providers/azure/index.html Use the navigation to the left to read about the available resources. ## Example Usage ``` -# Configure the Azure Resource Manager Provider +# Configure the Microsoft Azure Provider provider "azurerm" { subscription_id = "..." client_id = "..." diff --git a/website/source/layouts/azure.erb b/website/source/layouts/azure.erb index 47356cba4..30db8c685 100644 --- a/website/source/layouts/azure.erb +++ b/website/source/layouts/azure.erb @@ -7,7 +7,7 @@ > - Azure Provider + Azure Service Management Provider > @@ -78,9 +78,21 @@ + +
  • + Microsoft Azure Provider » +
  • <% end %> + + <%= yield %> <% end %> diff --git a/website/source/layouts/azurerm.erb b/website/source/layouts/azurerm.erb index 9d0f781e4..089fb3dd1 100644 --- a/website/source/layouts/azurerm.erb +++ b/website/source/layouts/azurerm.erb @@ -8,7 +8,7 @@ > - Azure Resource Manager Provider + Microsoft Azure Provider > @@ -198,6 +198,9 @@ +
  • + Azure Service Management Provider » +
  • <% end %> diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index 287ff0350..20b0b578c 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -170,14 +170,6 @@ AWS - > - Azure (Service Management) - - - > - Azure (Resource Manager) - - > Chef @@ -262,6 +254,14 @@ Mailgun + > + Microsoft Azure + + + > + Microsoft Azure (Legacy ASM) + + > MySQL From 345ddd8978c79418813bfc662c568866159eadde Mon Sep 17 00:00:00 2001 From: stack72 Date: Fri, 22 Jul 2016 11:41:15 +0100 Subject: [PATCH 0379/1238] docs/azure: Small changes to remove the use of double --- website/source/docs/providers/azure/index.html.markdown | 2 +- website/source/docs/providers/azurerm/index.html.markdown | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/website/source/docs/providers/azure/index.html.markdown b/website/source/docs/providers/azure/index.html.markdown index 6e054b442..fd063556f 100644 --- a/website/source/docs/providers/azure/index.html.markdown +++ b/website/source/docs/providers/azure/index.html.markdown @@ -10,7 +10,7 @@ description: |- [arm]: /docs/providers/azurerm/index.html -The Azure Service Management Provider is used to interact with the many resources supported +The Azure Service Management provider is used to interact with the many resources supported by Azure. The provider needs to be configured with a [publish settings file](https://manage.windowsazure.com/publishsettings) and optionally a subscription ID before it can be used. diff --git a/website/source/docs/providers/azurerm/index.html.markdown b/website/source/docs/providers/azurerm/index.html.markdown index ddd54f0ca..e1f74142a 100644 --- a/website/source/docs/providers/azurerm/index.html.markdown +++ b/website/source/docs/providers/azurerm/index.html.markdown @@ -8,7 +8,7 @@ description: |- # Microsoft Azure Provider -The Microsoft Azure Provider provider is used to interact with the many +The Microsoft Azure provider is used to interact with the many resources supported by Azure, via the ARM API. This supercedes the [legacy Azure provider][asm], which interacts with Azure using the Service Management API. The provider needs to be configured with the credentials needed to generate OAuth From 2340d576b12f922320098e276dce5205a9ddf4be Mon Sep 17 00:00:00 2001 From: stack72 Date: Fri, 22 Jul 2016 13:49:06 +0100 Subject: [PATCH 0380/1238] provider/aws: Rename the ECS Container Data Source test --- .../aws/data_source_aws_ecs_container_definition_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/providers/aws/data_source_aws_ecs_container_definition_test.go b/builtin/providers/aws/data_source_aws_ecs_container_definition_test.go index c0037e682..461808550 100644 --- a/builtin/providers/aws/data_source_aws_ecs_container_definition_test.go +++ b/builtin/providers/aws/data_source_aws_ecs_container_definition_test.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/terraform/helper/resource" ) -func TestAccAWSAmiDataSource_ecsContainerDefinition(t *testing.T) { +func TestAccAWSEcsDataSource_ecsContainerDefinition(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, From 9de8a263c0b6d6b8cd816acea92fc627cd01ba28 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Fri, 22 Jul 2016 13:51:00 +0100 Subject: [PATCH 0381/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index aabf76ea3..d9d360fc8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,7 @@ FEATURES: * **New Data Source:** `aws_availability_zones` [GH-6805] * **New Data Source:** `aws_iam_policy_document` [GH-6881] * **New Data Source:** `aws_s3_bucket_object` [GH-6946] + * **New Data Source:** `aws_ecs_container_definition` [GH-7230] * **New Data Source:** `atlas_artifact` [GH-7419] * **New Interpolation Function:** `sort` [GH-7128] * **New Interpolation Function:** `distinct` [GH-7174] From 40047902478b544e16d3b084e613f4444fdddb9e Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Fri, 22 Jul 2016 16:01:48 +0200 Subject: [PATCH 0382/1238] Make using `ssl_verify_mode` more robust (#7769) And prettify the template output by removing additions empty lines. --- .../chef/linux_provisioner_test.go | 12 +++----- .../provisioners/chef/resource_provisioner.go | 30 ++++++++++++------- .../chef/windows_provisioner_test.go | 12 +++----- 3 files changed, 28 insertions(+), 26 deletions(-) diff --git a/builtin/provisioners/chef/linux_provisioner_test.go b/builtin/provisioners/chef/linux_provisioner_test.go index ec72f7deb..c33840583 100644 --- a/builtin/provisioners/chef/linux_provisioner_test.go +++ b/builtin/provisioners/chef/linux_provisioner_test.go @@ -220,6 +220,7 @@ func TestResourceProvider_linuxCreateConfigFiles(t *testing.T) { "run_list": []interface{}{"cookbook::recipe"}, "secret_key_path": "test-fixtures/encrypted_data_bag_secret", "server_url": "https://chef.local", + "ssl_verify_mode": "verify_none", "validation_client_name": "validator", "validation_key_path": "test-fixtures/validator.pem", }), @@ -340,20 +341,15 @@ chef_server_url "https://chef.local" validation_client_name "validator" node_name "nodename1" - - - http_proxy "http://proxy.local" ENV['http_proxy'] = "http://proxy.local" ENV['HTTP_PROXY'] = "http://proxy.local" - - https_proxy "https://proxy.local" ENV['https_proxy'] = "https://proxy.local" ENV['HTTPS_PROXY'] = "https://proxy.local" - - no_proxy "http://local.local,https://local.local" -ENV['no_proxy'] = "http://local.local,https://local.local"` +ENV['no_proxy'] = "http://local.local,https://local.local" + +ssl_verify_mode :verify_none` diff --git a/builtin/provisioners/chef/resource_provisioner.go b/builtin/provisioners/chef/resource_provisioner.go index d4c057529..276c4d3af 100644 --- a/builtin/provisioners/chef/resource_provisioner.go +++ b/builtin/provisioners/chef/resource_provisioner.go @@ -43,35 +43,40 @@ log_location STDOUT chef_server_url "{{ .ServerURL }}" validation_client_name "{{ .ValidationClientName }}" node_name "{{ .NodeName }}" - {{ if .UsePolicyfile }} use_policyfile true policy_group "{{ .PolicyGroup }}" policy_name "{{ .PolicyName }}" -{{ end }} +{{ end -}} {{ if .HTTPProxy }} http_proxy "{{ .HTTPProxy }}" ENV['http_proxy'] = "{{ .HTTPProxy }}" ENV['HTTP_PROXY'] = "{{ .HTTPProxy }}" -{{ end }} +{{ end -}} {{ if .HTTPSProxy }} https_proxy "{{ .HTTPSProxy }}" ENV['https_proxy'] = "{{ .HTTPSProxy }}" ENV['HTTPS_PROXY'] = "{{ .HTTPSProxy }}" -{{ end }} +{{ end -}} {{ if .NOProxy }} no_proxy "{{ join .NOProxy "," }}" ENV['no_proxy'] = "{{ join .NOProxy "," }}" +{{ end -}} + +{{ if .SSLVerifyMode }} +ssl_verify_mode {{ .SSLVerifyMode }} +{{- end -}} + +{{ if .DisableReporting }} +enable_reporting false +{{ end -}} + +{{ if .ClientOptions }} +{{ join .ClientOptions "\n" }} {{ end }} - -{{ if .SSLVerifyMode }}ssl_verify_mode {{ .SSLVerifyMode }}{{ end }} - -{{ if .DisableReporting }}enable_reporting false{{ end }} - -{{ if .ClientOptions }}{{ join .ClientOptions "\n" }}{{ end }} ` // Provisioner represents a specificly configured chef provisioner @@ -452,6 +457,11 @@ func (p *Provisioner) deployConfigFiles( } } + // Make sure the SSLVerifyMode value is written as a symbol + if p.SSLVerifyMode != "" && !strings.HasPrefix(p.SSLVerifyMode, ":") { + p.SSLVerifyMode = fmt.Sprintf(":%s", p.SSLVerifyMode) + } + // Make strings.Join available for use within the template funcMap := template.FuncMap{ "join": strings.Join, diff --git a/builtin/provisioners/chef/windows_provisioner_test.go b/builtin/provisioners/chef/windows_provisioner_test.go index 8dd0dee28..18a9b44d9 100644 --- a/builtin/provisioners/chef/windows_provisioner_test.go +++ b/builtin/provisioners/chef/windows_provisioner_test.go @@ -137,6 +137,7 @@ func TestResourceProvider_windowsCreateConfigFiles(t *testing.T) { "run_list": []interface{}{"cookbook::recipe"}, "secret_key_path": "test-fixtures/encrypted_data_bag_secret", "server_url": "https://chef.local", + "ssl_verify_mode": "verify_none", "validation_client_name": "validator", "validation_key_path": "test-fixtures/validator.pem", }), @@ -366,20 +367,15 @@ chef_server_url "https://chef.local" validation_client_name "validator" node_name "nodename1" - - - http_proxy "http://proxy.local" ENV['http_proxy'] = "http://proxy.local" ENV['HTTP_PROXY'] = "http://proxy.local" - - https_proxy "https://proxy.local" ENV['https_proxy'] = "https://proxy.local" ENV['HTTPS_PROXY'] = "https://proxy.local" - - no_proxy "http://local.local,https://local.local" -ENV['no_proxy'] = "http://local.local,https://local.local"` +ENV['no_proxy'] = "http://local.local,https://local.local" + +ssl_verify_mode :verify_none` From 640c3a891fbead844cd1714ade1865294251feea Mon Sep 17 00:00:00 2001 From: James Bardin Date: Fri, 22 Jul 2016 10:22:46 -0400 Subject: [PATCH 0383/1238] Update vendored atlas client --- .../hashicorp/atlas-go/v1/client.go | 19 ++++++++++++++----- vendor/vendor.json | 4 +++- 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/vendor/github.com/hashicorp/atlas-go/v1/client.go b/vendor/github.com/hashicorp/atlas-go/v1/client.go index 2e61e064b..b5ee211a3 100644 --- a/vendor/github.com/hashicorp/atlas-go/v1/client.go +++ b/vendor/github.com/hashicorp/atlas-go/v1/client.go @@ -70,6 +70,10 @@ type Client struct { // HTTPClient is the underlying http client with which to make requests. HTTPClient *http.Client + + // DefaultHeaders is a set of headers that will be added to every request. + // This minimally includes the atlas user-agent string. + DefaultHeader http.Header } // DefaultClient returns a client that connects to the Atlas API. @@ -108,10 +112,13 @@ func NewClient(urlString string) (*Client, error) { } client := &Client{ - URL: parsedURL, - Token: token, + URL: parsedURL, + Token: token, + DefaultHeader: make(http.Header), } + client.DefaultHeader.Set("User-Agent", userAgent) + if err := client.init(); err != nil { return nil, err } @@ -227,10 +234,12 @@ func (c *Client) rawRequest(verb string, u *url.URL, ro *RequestOptions) (*http. return nil, err } - // Set the User-Agent - request.Header.Set("User-Agent", userAgent) + // set our default headers first + for k, v := range c.DefaultHeader { + request.Header[k] = v + } - // Add any headers (auth will be here if set) + // Add any request headers (auth will be here if set) for k, v := range ro.Headers { request.Header.Add(k, v) } diff --git a/vendor/vendor.json b/vendor/vendor.json index 931fedb09..0d225698a 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -920,9 +920,11 @@ "revision": "95fa852edca41c06c4ce526af4bb7dec4eaad434" }, { + "checksumSHA1": "EWGfo74RcoKaYFZNSkvzYRJMgrY=", "comment": "20141209094003-92-g95fa852", "path": "github.com/hashicorp/atlas-go/v1", - "revision": "95fa852edca41c06c4ce526af4bb7dec4eaad434" + "revision": "c8b26aa95f096efc0f378b2d2830ca909631d584", + "revisionTime": "2016-07-22T13:58:36Z" }, { "comment": "v0.6.3-28-g3215b87", From c4e6c14fecbb9bc545fe261de96e344387074cbc Mon Sep 17 00:00:00 2001 From: clint shryock Date: Fri, 22 Jul 2016 10:32:51 -0500 Subject: [PATCH 0384/1238] provider/aws: Restore lost client.simpledbconn initialization It once was lost but now is found! --- builtin/providers/aws/config.go | 1 + 1 file changed, 1 insertion(+) diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go index 8ffef3010..88bf4d0e7 100644 --- a/builtin/providers/aws/config.go +++ b/builtin/providers/aws/config.go @@ -243,6 +243,7 @@ func (c *Config) Client() (interface{}, error) { client.r53conn = route53.New(usEast1Sess) client.rdsconn = rds.New(sess) client.redshiftconn = redshift.New(sess) + client.simpledbconn = simpledb.New(sess) client.s3conn = s3.New(sess) client.sesConn = ses.New(sess) client.snsconn = sns.New(sess) From 8c88038647530e1c2981d52098a634405a58b879 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Fri, 22 Jul 2016 18:14:30 +0100 Subject: [PATCH 0385/1238] provider/aws: `aws_redshift_cluster` `number_of_nodes` was having the (#7771) wrong value set to state we used `len(ClusterNodes)` rather than NumberOfNodes :) This was picked up by the nightly tests! <3 --- builtin/providers/aws/resource_aws_redshift_cluster.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_redshift_cluster.go b/builtin/providers/aws/resource_aws_redshift_cluster.go index 68651c28b..6ca041389 100644 --- a/builtin/providers/aws/resource_aws_redshift_cluster.go +++ b/builtin/providers/aws/resource_aws_redshift_cluster.go @@ -373,7 +373,7 @@ func resourceAwsRedshiftClusterRead(d *schema.ResourceData, meta interface{}) er } else { d.Set("cluster_type", "single-node") } - d.Set("number_of_nodes", len(rsc.ClusterNodes)) + d.Set("number_of_nodes", rsc.NumberOfNodes) d.Set("publicly_accessible", rsc.PubliclyAccessible) var vpcg []string From 9539b259666ce5ac603a8e7da08fe0f5df1ee5f8 Mon Sep 17 00:00:00 2001 From: jonatanblue Date: Sun, 24 Jul 2016 20:14:01 +0100 Subject: [PATCH 0386/1238] Fix broken link to Consul demo (#7789) * fixed broken link * update one more link --- website/source/intro/examples/consul.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/source/intro/examples/consul.html.markdown b/website/source/intro/examples/consul.html.markdown index 47c896d36..28b6acf05 100644 --- a/website/source/intro/examples/consul.html.markdown +++ b/website/source/intro/examples/consul.html.markdown @@ -25,14 +25,14 @@ and will default to "m1.small" if that key does not exist. Once the instance is the "tf\_test/id" and "tf\_test/public\_dns" keys will be set with the computed values for the instance. -Before we run the example, use the [Web UI](http://demo.consul.io/ui/#/nyc1/kv/) +Before we run the example, use the [Web UI](http://demo.consul.io/ui/#/nyc3/kv/) to set the "tf\_test/size" key to "t1.micro". Once that is done, copy the configuration into a configuration file ("consul.tf" works fine). Either provide the AWS credentials as a default value in the configuration or invoke `apply` with the appropriate variables set. Once the `apply` has completed, we can see the keys in Consul by -visiting the [Web UI](http://demo.consul.io/ui/#/nyc1/kv/). We can see +visiting the [Web UI](http://demo.consul.io/ui/#/nyc3/kv/). We can see that the "tf\_test/id" and "tf\_test/public\_dns" values have been set. From ad62f090618e3a1ce92b4d6ca165f3881a22c8ee Mon Sep 17 00:00:00 2001 From: David Tolnay Date: Mon, 25 Jul 2016 00:15:03 -0700 Subject: [PATCH 0387/1238] provider/aws: Delete access keys before deleting IAM user (#7766) * provider/aws: Delete access keys before deleting IAM user * provider/aws: Put IAM key removal behind force_destroy option * provider/aws: Move all access key deletion under force_destroy * Add iam_user force_destroy to website * provider/aws: Improve clarity of looping over pages in delete IAM user --- .../providers/aws/resource_aws_iam_user.go | 69 ++++++++++++------- .../providers/aws/r/iam_user.html.markdown | 5 +- 2 files changed, 48 insertions(+), 26 deletions(-) diff --git a/builtin/providers/aws/resource_aws_iam_user.go b/builtin/providers/aws/resource_aws_iam_user.go index 30282031d..7fd3509ef 100644 --- a/builtin/providers/aws/resource_aws_iam_user.go +++ b/builtin/providers/aws/resource_aws_iam_user.go @@ -48,6 +48,12 @@ func resourceAwsIamUser() *schema.Resource { Default: "/", ForceNew: true, }, + "force_destroy": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: "Delete user even if it has non-Terraform-managed IAM access keys", + }, }, } } @@ -132,39 +138,25 @@ func resourceAwsIamUserUpdate(d *schema.ResourceData, meta interface{}) error { } return nil } + func resourceAwsIamUserDelete(d *schema.ResourceData, meta interface{}) error { iamconn := meta.(*AWSClient).iamconn // IAM Users must be removed from all groups before they can be deleted var groups []string - var marker *string - truncated := aws.Bool(true) - - for *truncated == true { - listOpts := iam.ListGroupsForUserInput{ - UserName: aws.String(d.Id()), - } - - if marker != nil { - listOpts.Marker = marker - } - - r, err := iamconn.ListGroupsForUser(&listOpts) - if err != nil { - return err - } - - for _, g := range r.Groups { + listGroups := &iam.ListGroupsForUserInput{ + UserName: aws.String(d.Id()), + } + pageOfGroups := func(page *iam.ListGroupsForUserOutput, lastPage bool) (shouldContinue bool) { + for _, g := range page.Groups { groups = append(groups, *g.GroupName) } - - // if there's a marker present, we need to save it for pagination - if r.Marker != nil { - *marker = *r.Marker - } - *truncated = *r.IsTruncated + return !lastPage + } + err := iamconn.ListGroupsForUserPages(listGroups, pageOfGroups) + if err != nil { + return fmt.Errorf("Error removing user %q from all groups: %s", d.Id(), err) } - for _, g := range groups { // use iam group membership func to remove user from all groups log.Printf("[DEBUG] Removing IAM User %s from IAM Group %s", d.Id(), g) @@ -173,6 +165,33 @@ func resourceAwsIamUserDelete(d *schema.ResourceData, meta interface{}) error { } } + // All access keys for the user must be removed + if d.Get("force_destroy").(bool) { + var accessKeys []string + listAccessKeys := &iam.ListAccessKeysInput{ + UserName: aws.String(d.Id()), + } + pageOfAccessKeys := func(page *iam.ListAccessKeysOutput, lastPage bool) (shouldContinue bool) { + for _, k := range page.AccessKeyMetadata { + accessKeys = append(accessKeys, *k.AccessKeyId) + } + return !lastPage + } + err = iamconn.ListAccessKeysPages(listAccessKeys, pageOfAccessKeys) + if err != nil { + return fmt.Errorf("Error removing access keys of user %s: %s", d.Id(), err) + } + for _, k := range accessKeys { + _, err := iamconn.DeleteAccessKey(&iam.DeleteAccessKeyInput{ + UserName: aws.String(d.Id()), + AccessKeyId: aws.String(k), + }) + if err != nil { + return fmt.Errorf("Error deleting access key %s: %s", k, err) + } + } + } + request := &iam.DeleteUserInput{ UserName: aws.String(d.Id()), } diff --git a/website/source/docs/providers/aws/r/iam_user.html.markdown b/website/source/docs/providers/aws/r/iam_user.html.markdown index acc79b749..3ec681cd6 100644 --- a/website/source/docs/providers/aws/r/iam_user.html.markdown +++ b/website/source/docs/providers/aws/r/iam_user.html.markdown @@ -48,6 +48,9 @@ The following arguments are supported: * `name` - (Required) The user's name. * `path` - (Optional, default "/") Path in which to create the user. +* `force_destroy` - (Optional, default false) When destroying this user, destroy + even if it has non-Terraform-managed IAM access keys. Without `force_destroy` + a user with non-Terraform-managed access keys will fail to be destroyed. ## Attributes Reference @@ -65,4 +68,4 @@ IAM Users can be imported using the `name`, e.g. ``` $ terraform import aws_iam_user.lb loadbalancer -``` \ No newline at end of file +``` From afeb3f16a9ac72acf85f2d7fa441cad67febe77a Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Mon, 25 Jul 2016 08:17:32 +0100 Subject: [PATCH 0388/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d9d360fc8..85691cc75 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -88,6 +88,7 @@ IMPROVEMENTS: * core: Support `.` in map keys [GH-7654] * command: Remove second DefaultDataDirectory const [GH-7666] * provider/aws: Add `dns_name` to `aws_efs_mount_target` [GH-7428] + * provider/aws: Add `force_destroy` to `aws_iam_user` for force-deleting access keys assigned to the user [GH-7766] * provider/aws: Add `option_settings` to `aws_db_option_group` [GH-6560] * provider/aws: Add more explicit support for Skipping Final Snapshot in RDS Cluster [GH-6795] * provider/aws: Add support for S3 Bucket Acceleration [GH-6628] From 3c63453e2c35c4bb7132b9f4c41d0672f92e396f Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 25 Jul 2016 11:47:44 +0100 Subject: [PATCH 0389/1238] provider/aws: Bump SDK package version to 1.2.7 (#7799) --- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws-sdk-go/service/cloudformation/api.go | 100 ++-- .../service/elastictranscoder/api.go | 467 +++++++++--------- vendor/vendor.json | 400 ++++++++++----- 4 files changed, 562 insertions(+), 407 deletions(-) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 97a3f57f5..438218867 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.2.5" +const SDKVersion = "1.2.7" diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudformation/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/api.go index 5ec08091c..432c6e93e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudformation/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudformation/api.go @@ -1606,14 +1606,14 @@ func (s ContinueUpdateRollbackOutput) GoString() string { type CreateChangeSetInput struct { _ struct{} `type:"structure"` - // A list of capabilities that you must specify before AWS CloudFormation can - // update certain stacks. Some stack templates might include resources that - // can affect permissions in your AWS account, for example, by creating new - // AWS Identity and Access Management (IAM) users. For those stacks, you must - // explicitly acknowledge their capabilities by specifying this parameter. + // A list of values that you must specify before AWS CloudFormation can update + // certain stacks. Some stack templates might include resources that can affect + // permissions in your AWS account, for example, by creating new AWS Identity + // and Access Management (IAM) users. For those stacks, you must explicitly + // acknowledge their capabilities by specifying this parameter. // - // Currently, the only valid value is CAPABILITY_IAM, which is required for - // the following resources: AWS::IAM::AccessKey (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-accesskey.html), + // The only valid values are CAPABILITY_IAM and CAPABILITY_NAMED_IAM. The following + // resources require you to specify this parameter: AWS::IAM::AccessKey (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-accesskey.html), // AWS::IAM::Group (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-group.html), // AWS::IAM::InstanceProfile (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html), // AWS::IAM::Policy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-policy.html), @@ -1622,8 +1622,14 @@ type CreateChangeSetInput struct { // and AWS::IAM::UserToGroupAddition (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-addusertogroup.html). // If your stack template contains these resources, we recommend that you review // all permissions associated with them and edit their permissions if necessary. - // If your template contains any of the listed resources and you don't specify - // this parameter, this action returns an InsufficientCapabilities error. + // + // If you have IAM resources, you can specify either capability. If you have + // IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If + // you don't specify this parameter, this action returns an InsufficientCapabilities + // error. + // + // For more information, see Acknowledging IAM Resources in AWS CloudFormation + // Templates (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-template.html#capabilities). Capabilities []*string `type:"list"` // The name of the change set. The name must be unique among all change sets @@ -1763,14 +1769,14 @@ func (s CreateChangeSetOutput) GoString() string { type CreateStackInput struct { _ struct{} `type:"structure"` - // A list of capabilities that you must specify before AWS CloudFormation can - // create certain stacks. Some stack templates might include resources that - // can affect permissions in your AWS account, for example, by creating new - // AWS Identity and Access Management (IAM) users. For those stacks, you must - // explicitly acknowledge their capabilities by specifying this parameter. + // A list of values that you must specify before AWS CloudFormation can create + // certain stacks. Some stack templates might include resources that can affect + // permissions in your AWS account, for example, by creating new AWS Identity + // and Access Management (IAM) users. For those stacks, you must explicitly + // acknowledge their capabilities by specifying this parameter. // - // Currently, the only valid value is CAPABILITY_IAM, which is required for - // the following resources: AWS::IAM::AccessKey (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-accesskey.html), + // The only valid values are CAPABILITY_IAM and CAPABILITY_NAMED_IAM. The following + // resources require you to specify this parameter: AWS::IAM::AccessKey (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-accesskey.html), // AWS::IAM::Group (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-group.html), // AWS::IAM::InstanceProfile (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html), // AWS::IAM::Policy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-policy.html), @@ -1779,8 +1785,14 @@ type CreateStackInput struct { // and AWS::IAM::UserToGroupAddition (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-addusertogroup.html). // If your stack template contains these resources, we recommend that you review // all permissions associated with them and edit their permissions if necessary. - // If your template contains any of the listed resources and you don't specify - // this parameter, this action returns an InsufficientCapabilities error. + // + // If you have IAM resources, you can specify either capability. If you have + // IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If + // you don't specify this parameter, this action returns an InsufficientCapabilities + // error. + // + // For more information, see Acknowledging IAM Resources in AWS CloudFormation + // Templates (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-template.html#capabilities). Capabilities []*string `type:"list"` // Set to true to disable rollback of the stack if stack creation failed. You @@ -2778,11 +2790,14 @@ func (s *GetTemplateSummaryInput) Validate() error { type GetTemplateSummaryOutput struct { _ struct{} `type:"structure"` - // The capabilities found within the template. Currently, AWS CloudFormation - // supports only the CAPABILITY_IAM capability. If your template contains IAM - // resources, you must specify the CAPABILITY_IAM value for this parameter when - // you use the CreateStack or UpdateStack actions with your template; otherwise, - // those actions return an InsufficientCapabilities error. + // The capabilities found within the template. If your template contains IAM + // resources, you must specify the CAPABILITY_IAM or CAPABILITY_NAMED_IAM value + // for this parameter when you use the CreateStack or UpdateStack actions with + // your template; otherwise, those actions return an InsufficientCapabilities + // error. + // + // For more information, see Acknowledging IAM Resources in AWS CloudFormation + // Templates (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-template.html#capabilities). Capabilities []*string `type:"list"` // The list of resources that generated the values in the Capabilities response @@ -3744,14 +3759,14 @@ func (s TemplateParameter) GoString() string { type UpdateStackInput struct { _ struct{} `type:"structure"` - // A list of capabilities that you must specify before AWS CloudFormation can - // update certain stacks. Some stack templates might include resources that - // can affect permissions in your AWS account, for example, by creating new - // AWS Identity and Access Management (IAM) users. For those stacks, you must - // explicitly acknowledge their capabilities by specifying this parameter. + // A list of values that you must specify before AWS CloudFormation can update + // certain stacks. Some stack templates might include resources that can affect + // permissions in your AWS account, for example, by creating new AWS Identity + // and Access Management (IAM) users. For those stacks, you must explicitly + // acknowledge their capabilities by specifying this parameter. // - // Currently, the only valid value is CAPABILITY_IAM, which is required for - // the following resources: AWS::IAM::AccessKey (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-accesskey.html), + // The only valid values are CAPABILITY_IAM and CAPABILITY_NAMED_IAM. The following + // resources require you to specify this parameter: AWS::IAM::AccessKey (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-accesskey.html), // AWS::IAM::Group (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-group.html), // AWS::IAM::InstanceProfile (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-iam-instanceprofile.html), // AWS::IAM::Policy (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-policy.html), @@ -3760,8 +3775,14 @@ type UpdateStackInput struct { // and AWS::IAM::UserToGroupAddition (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-iam-addusertogroup.html). // If your stack template contains these resources, we recommend that you review // all permissions associated with them and edit their permissions if necessary. - // If your template contains any of the listed resources and you don't specify - // this parameter, this action returns an InsufficientCapabilities error. + // + // If you have IAM resources, you can specify either capability. If you have + // IAM resources with custom names, you must specify CAPABILITY_NAMED_IAM. If + // you don't specify this parameter, this action returns an InsufficientCapabilities + // error. + // + // For more information, see Acknowledging IAM Resources in AWS CloudFormation + // Templates (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-template.html#capabilities). Capabilities []*string `type:"list"` // Amazon Simple Notification Service topic Amazon Resource Names (ARNs) that @@ -3969,11 +3990,14 @@ func (s *ValidateTemplateInput) Validate() error { type ValidateTemplateOutput struct { _ struct{} `type:"structure"` - // The capabilities found within the template. Currently, AWS CloudFormation - // supports only the CAPABILITY_IAM capability. If your template contains IAM - // resources, you must specify the CAPABILITY_IAM value for this parameter when - // you use the CreateStack or UpdateStack actions with your template; otherwise, - // those actions return an InsufficientCapabilities error. + // The capabilities found within the template. If your template contains IAM + // resources, you must specify the CAPABILITY_IAM or CAPABILITY_NAMED_IAM value + // for this parameter when you use the CreateStack or UpdateStack actions with + // your template; otherwise, those actions return an InsufficientCapabilities + // error. + // + // For more information, see Acknowledging IAM Resources in AWS CloudFormation + // Templates (http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-iam-template.html#capabilities). Capabilities []*string `type:"list"` // The list of resources that generated the values in the Capabilities response @@ -4000,6 +4024,8 @@ func (s ValidateTemplateOutput) GoString() string { const ( // @enum Capability CapabilityCapabilityIam = "CAPABILITY_IAM" + // @enum Capability + CapabilityCapabilityNamedIam = "CAPABILITY_NAMED_IAM" ) const ( diff --git a/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/api.go b/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/api.go index af931b4f1..2d0685ddf 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elastictranscoder/api.go @@ -1056,22 +1056,22 @@ type Artwork struct { // Specify one of the following values to control scaling of the output album // art: // - // Fit: Elastic Transcoder scales the output art so it matches the value + // Fit: Elastic Transcoder scales the output art so it matches the value // that you specified in either MaxWidth or MaxHeight without exceeding the - // other value. Fill: Elastic Transcoder scales the output art so it matches + // other value. Fill: Elastic Transcoder scales the output art so it matches // the value that you specified in either MaxWidth or MaxHeight and matches // or exceeds the other value. Elastic Transcoder centers the output art and // then crops it in the dimension (if any) that exceeds the maximum value. - // Stretch: Elastic Transcoder stretches the output art to match the values + // Stretch: Elastic Transcoder stretches the output art to match the values // that you specified for MaxWidth and MaxHeight. If the relative proportions // of the input art and the output art are different, the output art will be - // distorted. Keep: Elastic Transcoder does not scale the output art. If either + // distorted. Keep: Elastic Transcoder does not scale the output art. If either // dimension of the input art exceeds the values that you specified for MaxWidth - // and MaxHeight, Elastic Transcoder crops the output art. ShrinkToFit: Elastic + // and MaxHeight, Elastic Transcoder crops the output art. ShrinkToFit: Elastic // Transcoder scales the output art down so that its dimensions match the values // that you specified for at least one of MaxWidth and MaxHeight without exceeding // either value. If you specify this option, Elastic Transcoder does not scale - // the art up. ShrinkToFill Elastic Transcoder scales the output art down so + // the art up. ShrinkToFill Elastic Transcoder scales the output art down so // that its dimensions match the values that you specified for at least one // of MaxWidth and MaxHeight without dropping below either value. If you specify // this option, Elastic Transcoder does not scale the art up. @@ -1131,11 +1131,11 @@ type AudioCodecOptions struct { // Specify the AAC profile for the output file. Elastic Transcoder supports // the following profiles: // - // auto: If you specify auto, Elastic Transcoder will select the profile - // based on the bit rate selected for the output file. AAC-LC: The most common - // AAC profile. Use for bit rates larger than 64 kbps. HE-AAC: Not supported - // on some older players and devices. Use for bit rates between 40 and 80 kbps. - // HE-AACv2: Not supported on some players and devices. Use for bit rates less + // auto: If you specify auto, Elastic Transcoder will select the profile based + // on the bit rate selected for the output file. AAC-LC: The most common AAC + // profile. Use for bit rates larger than 64 kbps. HE-AAC: Not supported on + // some older players and devices. Use for bit rates between 40 and 80 kbps. + // HE-AACv2: Not supported on some players and devices. Use for bit rates less // than 48 kbps. All outputs in a Smooth playlist must have the same value // for Profile. // @@ -1184,15 +1184,15 @@ type AudioParameters struct { // The outputs of SingleTrack for a specific channel value and inputs are as // follows: // - // 0 channels with any input: Audio omitted from the output 1, 2, or auto - // channels with no audio input: Audio omitted from the output 1 channel - // with any input with audio: One track with one channel, downmixed if necessary - // 2 channels with one track with one channel: One track with two identical - // channels 2 or auto channels with two tracks with one channel each: One - // track with two channels 2 or auto channels with one track with two channels: - // One track with two channels 2 channels with one track with multiple channels: - // One track with two channels auto channels with one track with one channel: - // One track with one channel auto channels with one track with multiple channels: + // 0 channels with any input: Audio omitted from the output 1, 2, or auto + // channels with no audio input: Audio omitted from the output 1 channel with + // any input with audio: One track with one channel, downmixed if necessary + // 2 channels with one track with one channel: One track with two identical + // channels 2 or auto channels with two tracks with one channel each: One track + // with two channels 2 or auto channels with one track with two channels: One + // track with two channels 2 channels with one track with multiple channels: + // One track with two channels auto channels with one track with one channel: + // One track with one channel auto channels with one track with multiple channels: // One track with multiple channels When you specify OneChannelPerTrack, Elastic // Transcoder creates a new track for every channel in your output. Your output // can have up to eight single-channel tracks. @@ -1200,34 +1200,34 @@ type AudioParameters struct { // The outputs of OneChannelPerTrack for a specific channel value and inputs // are as follows: // - // 0 channels with any input: Audio omitted from the output 1, 2, or auto - // channels with no audio input: Audio omitted from the output 1 channel - // with any input with audio: One track with one channel, downmixed if necessary - // 2 channels with one track with one channel: Two tracks with one identical - // channel each 2 or auto channels with two tracks with one channel each: - // Two tracks with one channel each 2 or auto channels with one track with - // two channels: Two tracks with one channel each 2 channels with one track - // with multiple channels: Two tracks with one channel each auto channels - // with one track with one channel: One track with one channel auto channels - // with one track with multiple channels: Up to eight tracks with one channel - // each When you specify OneChannelPerTrackWithMosTo8Tracks, Elastic Transcoder - // creates eight single-channel tracks for your output. All tracks that do not - // contain audio data from an input channel are MOS, or Mit Out Sound, tracks. + // 0 channels with any input: Audio omitted from the output 1, 2, or auto + // channels with no audio input: Audio omitted from the output 1 channel with + // any input with audio: One track with one channel, downmixed if necessary + // 2 channels with one track with one channel: Two tracks with one identical + // channel each 2 or auto channels with two tracks with one channel each: Two + // tracks with one channel each 2 or auto channels with one track with two + // channels: Two tracks with one channel each 2 channels with one track with + // multiple channels: Two tracks with one channel each auto channels with one + // track with one channel: One track with one channel auto channels with one + // track with multiple channels: Up to eight tracks with one channel each When + // you specify OneChannelPerTrackWithMosTo8Tracks, Elastic Transcoder creates + // eight single-channel tracks for your output. All tracks that do not contain + // audio data from an input channel are MOS, or Mit Out Sound, tracks. // // The outputs of OneChannelPerTrackWithMosTo8Tracks for a specific channel // value and inputs are as follows: // - // 0 channels with any input: Audio omitted from the output 1, 2, or auto - // channels with no audio input: Audio omitted from the output 1 channel - // with any input with audio: One track with one channel, downmixed if necessary, - // plus six MOS tracks 2 channels with one track with one channel: Two tracks - // with one identical channel each, plus six MOS tracks 2 or auto channels + // 0 channels with any input: Audio omitted from the output 1, 2, or auto + // channels with no audio input: Audio omitted from the output 1 channel with + // any input with audio: One track with one channel, downmixed if necessary, + // plus six MOS tracks 2 channels with one track with one channel: Two tracks + // with one identical channel each, plus six MOS tracks 2 or auto channels // with two tracks with one channel each: Two tracks with one channel each, - // plus six MOS tracks 2 or auto channels with one track with two channels: - // Two tracks with one channel each, plus six MOS tracks 2 channels with one + // plus six MOS tracks 2 or auto channels with one track with two channels: + // Two tracks with one channel each, plus six MOS tracks 2 channels with one // track with multiple channels: Two tracks with one channel each, plus six - // MOS tracks auto channels with one track with one channel: One track with - // one channel, plus seven MOS tracks auto channels with one track with multiple + // MOS tracks auto channels with one track with one channel: One track with + // one channel, plus seven MOS tracks auto channels with one track with multiple // channels: Up to eight tracks with one channel each, plus MOS tracks until // there are eight tracks in all AudioPackingMode *string `type:"string"` @@ -1250,11 +1250,11 @@ type AudioParameters struct { // // The output of a specific channel value and inputs are as follows: // - // auto channel specified, with any input: Pass through up to eight input - // channels. 0 channels specified, with any input: Audio omitted from the output. - // 1 channel specified, with at least one input channel: Mono sound. 2 channels + // auto channel specified, with any input: Pass through up to eight input + // channels. 0 channels specified, with any input: Audio omitted from the output. + // 1 channel specified, with at least one input channel: Mono sound. 2 channels // specified, with any input: Two identical mono channels or stereo. For more - // information about tracks, see Audio:AudioPackingMode. For more information + // information about tracks, see Audio:AudioPackingMode. For more information // about how Elastic Transcoder organizes channels and tracks, see Audio:AudioPackingMode. Channels *string `type:"string"` @@ -1353,7 +1353,7 @@ type CaptionFormat struct { // The format you specify determines whether Elastic Transcoder generates an // embedded or sidecar caption for this output. // - // Valid Embedded Caption Formats: + // Valid Embedded Caption Formats: // // for FLAC: None // @@ -1367,7 +1367,7 @@ type CaptionFormat struct { // // For webm: None // - // Valid Sidecar Caption Formats: Elastic Transcoder supports dfxp (first + // Valid Sidecar Caption Formats: Elastic Transcoder supports dfxp (first // div element only), scc, srt, and webvtt. If you want ttml or smpte-tt compatible // captions, specify dfxp as your output format. // @@ -1380,7 +1380,7 @@ type CaptionFormat struct { // The prefix for caption filenames, in the form description-{language}, where: // - // description is a description of the video. {language} is a literal value + // description is a description of the video. {language} is a literal value // that Elastic Transcoder replaces with the two- or three-letter code for the // language of the caption in the output file names. If you don't include {language} // in the file name pattern, Elastic Transcoder automatically appends "{language}" @@ -1665,7 +1665,7 @@ type CreateJobOutput struct { // from one format to another. All captions must be in UTF-8. Elastic Transcoder // supports two types of captions: // - // Embedded: Embedded captions are included in the same file as the audio + // Embedded: Embedded captions are included in the same file as the audio // and video. Elastic Transcoder supports only one embedded caption per language, // to a maximum of 300 embedded captions per file. // @@ -1676,7 +1676,7 @@ type CreateJobOutput struct { // // Elastic Transcoder supports a maximum of one embedded format per output. // - // Sidecar: Sidecar captions are kept in a separate metadata file from the + // Sidecar: Sidecar captions are kept in a separate metadata file from the // audio and video data. Sidecar captions require a player that is capable of // understanding the relationship between the video file and the sidecar file. // Elastic Transcoder supports only one sidecar caption per language, to a maximum @@ -1687,7 +1687,7 @@ type CreateJobOutput struct { // // Valid outputs include: dfxp (first div element only), scc, srt, and webvtt. // - // If you want ttml or smpte-tt compatible captions, specify dfxp as your + // If you want ttml or smpte-tt compatible captions, specify dfxp as your // output format. // // Elastic Transcoder does not support OCR (Optical Character Recognition), @@ -1978,21 +1978,21 @@ type CreatePipelineInput struct { // If you specify values for ContentConfig and ThumbnailConfig, omit the OutputBucket // object. // - // Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save - // transcoded files and playlists. Permissions (Optional): The Permissions - // object specifies which users you want to have access to transcoded files - // and the type of access you want them to have. You can grant permissions to - // a maximum of 30 users and/or predefined Amazon S3 groups. Grantee Type: - // Specify the type of value that appears in the Grantee object: Canonical: - // The value in the Grantee object is either the canonical user ID for an AWS - // account or an origin access identity for an Amazon CloudFront distribution. - // For more information about canonical user IDs, see Access Control List (ACL) - // Overview in the Amazon Simple Storage Service Developer Guide. For more information + // Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save + // transcoded files and playlists. Permissions (Optional): The Permissions object + // specifies which users you want to have access to transcoded files and the + // type of access you want them to have. You can grant permissions to a maximum + // of 30 users and/or predefined Amazon S3 groups. Grantee Type: Specify the + // type of value that appears in the Grantee object: Canonical: The value in + // the Grantee object is either the canonical user ID for an AWS account or + // an origin access identity for an Amazon CloudFront distribution. For more + // information about canonical user IDs, see Access Control List (ACL) Overview + // in the Amazon Simple Storage Service Developer Guide. For more information // about using CloudFront origin access identities to require that users use // CloudFront URLs instead of Amazon S3 URLs, see Using an Origin Access Identity // to Restrict Access to Your Amazon S3 Content. A canonical user ID is not - // the same as an AWS account number. Email: The value in the Grantee object - // is the registered email address of an AWS account. Group: The value in the + // the same as an AWS account number. Email: The value in the Grantee object + // is the registered email address of an AWS account. Group: The value in the // Grantee object is one of the following predefined Amazon S3 groups: AllUsers, // AuthenticatedUsers, or LogDelivery. Grantee: The AWS user or group that // you want to have access to transcoded files and playlists. To identify the @@ -2001,12 +2001,12 @@ type CreatePipelineInput struct { // address of an AWS account, or a predefined Amazon S3 group Access: The // permission that you want to give to the AWS user that you specified in Grantee. // Permissions are granted on the files that Elastic Transcoder adds to the - // bucket, including playlists and video files. Valid values include: READ: + // bucket, including playlists and video files. Valid values include: READ: // The grantee can read the objects and metadata for objects that Elastic Transcoder - // adds to the Amazon S3 bucket. READ_ACP: The grantee can read the object - // ACL for objects that Elastic Transcoder adds to the Amazon S3 bucket. WRITE_ACP: + // adds to the Amazon S3 bucket. READ_ACP: The grantee can read the object ACL + // for objects that Elastic Transcoder adds to the Amazon S3 bucket. WRITE_ACP: // The grantee can write the ACL for the objects that Elastic Transcoder adds - // to the Amazon S3 bucket. FULL_CONTROL: The grantee has READ, READ_ACP, and + // to the Amazon S3 bucket. FULL_CONTROL: The grantee has READ, READ_ACP, and // WRITE_ACP permissions for the objects that Elastic Transcoder adds to the // Amazon S3 bucket. StorageClass: The Amazon S3 storage class, Standard // or ReducedRedundancy, that you want Elastic Transcoder to assign to the video @@ -2027,17 +2027,17 @@ type CreatePipelineInput struct { // notify to report job status. // // To receive notifications, you must also subscribe to the new topic in the - // Amazon SNS console. Progressing: The topic ARN for the Amazon Simple Notification + // Amazon SNS console. Progressing: The topic ARN for the Amazon Simple Notification // Service (Amazon SNS) topic that you want to notify when Elastic Transcoder // has started to process a job in this pipeline. This is the ARN that Amazon // SNS returned when you created the topic. For more information, see Create - // a Topic in the Amazon Simple Notification Service Developer Guide. Completed: + // a Topic in the Amazon Simple Notification Service Developer Guide. Completed: // The topic ARN for the Amazon SNS topic that you want to notify when Elastic // Transcoder has finished processing a job in this pipeline. This is the ARN - // that Amazon SNS returned when you created the topic. Warning: The topic - // ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder + // that Amazon SNS returned when you created the topic. Warning: The topic ARN + // for the Amazon SNS topic that you want to notify when Elastic Transcoder // encounters a warning condition while processing a job in this pipeline. This - // is the ARN that Amazon SNS returned when you created the topic. Error: The + // is the ARN that Amazon SNS returned when you created the topic. Error: The // topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder // encounters an error condition while processing a job in this pipeline. This // is the ARN that Amazon SNS returned when you created the topic. @@ -2052,7 +2052,7 @@ type CreatePipelineInput struct { // to the transcoded files, thumbnails, and playlists. You do not want to specify // the permissions that Elastic Transcoder grants to the files. When Elastic // Transcoder saves files in OutputBucket, it grants full control over the files - // only to the AWS account that owns the role that is specified by Role. You + // only to the AWS account that owns the role that is specified by Role. You // want to associate the transcoded files and thumbnails with the Amazon S3 // Standard storage class. // @@ -2077,8 +2077,8 @@ type CreatePipelineInput struct { // If you specify values for ContentConfig and ThumbnailConfig, omit the OutputBucket // object. // - // Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save - // thumbnail files. Permissions (Optional): The Permissions object specifies + // Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save + // thumbnail files. Permissions (Optional): The Permissions object specifies // which users and/or predefined Amazon S3 groups you want to have access to // thumbnail files, and the type of access you want them to have. You can grant // permissions to a maximum of 30 users and/or predefined Amazon S3 groups. @@ -2086,25 +2086,25 @@ type CreatePipelineInput struct { // Canonical: The value in the Grantee object is either the canonical user // ID for an AWS account or an origin access identity for an Amazon CloudFront // distribution. A canonical user ID is not the same as an AWS account number. - // Email: The value in the Grantee object is the registered email address - // of an AWS account. Group: The value in the Grantee object is one of the - // following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery. + // Email: The value in the Grantee object is the registered email address of + // an AWS account. Group: The value in the Grantee object is one of the following + // predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery. // Grantee: The AWS user or group that you want to have access to thumbnail // files. To identify the user or group, you can specify the canonical user // ID for an AWS account, an origin access identity for a CloudFront distribution, // the registered email address of an AWS account, or a predefined Amazon S3 // group. Access: The permission that you want to give to the AWS user that // you specified in Grantee. Permissions are granted on the thumbnail files - // that Elastic Transcoder adds to the bucket. Valid values include: READ: + // that Elastic Transcoder adds to the bucket. Valid values include: READ: // The grantee can read the thumbnails and metadata for objects that Elastic - // Transcoder adds to the Amazon S3 bucket. READ_ACP: The grantee can read - // the object ACL for thumbnails that Elastic Transcoder adds to the Amazon - // S3 bucket. WRITE_ACP: The grantee can write the ACL for the thumbnails - // that Elastic Transcoder adds to the Amazon S3 bucket. FULL_CONTROL: The - // grantee has READ, READ_ACP, and WRITE_ACP permissions for the thumbnails - // that Elastic Transcoder adds to the Amazon S3 bucket. StorageClass: The - // Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic - // Transcoder to assign to the thumbnails that it stores in your Amazon S3 bucket. + // Transcoder adds to the Amazon S3 bucket. READ_ACP: The grantee can read the + // object ACL for thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. + // WRITE_ACP: The grantee can write the ACL for the thumbnails that Elastic + // Transcoder adds to the Amazon S3 bucket. FULL_CONTROL: The grantee has READ, + // READ_ACP, and WRITE_ACP permissions for the thumbnails that Elastic Transcoder + // adds to the Amazon S3 bucket. StorageClass: The Amazon S3 storage class, + // Standard or ReducedRedundancy, that you want Elastic Transcoder to assign + // to the thumbnails that it stores in your Amazon S3 bucket. ThumbnailConfig *PipelineOutputConfig `type:"structure"` } @@ -2623,10 +2623,10 @@ type JobAlbumArt struct { // A policy that determines how Elastic Transcoder will handle the existence // of multiple album artwork files. // - // Replace: The specified album art will replace any existing album art. - // Prepend: The specified album art will be placed in front of any existing - // album art. Append: The specified album art will be placed after any existing - // album art. Fallback: If the original input file contains artwork, Elastic + // Replace: The specified album art will replace any existing album art. + // Prepend: The specified album art will be placed in front of any existing + // album art. Append: The specified album art will be placed after any existing + // album art. Fallback: If the original input file contains artwork, Elastic // Transcoder will use that artwork for the output. If the original input does // not contain artwork, Elastic Transcoder will use the specified album art // file. @@ -2773,7 +2773,7 @@ type JobOutput struct { // from one format to another. All captions must be in UTF-8. Elastic Transcoder // supports two types of captions: // - // Embedded: Embedded captions are included in the same file as the audio + // Embedded: Embedded captions are included in the same file as the audio // and video. Elastic Transcoder supports only one embedded caption per language, // to a maximum of 300 embedded captions per file. // @@ -2784,7 +2784,7 @@ type JobOutput struct { // // Elastic Transcoder supports a maximum of one embedded format per output. // - // Sidecar: Sidecar captions are kept in a separate metadata file from the + // Sidecar: Sidecar captions are kept in a separate metadata file from the // audio and video data. Sidecar captions require a player that is capable of // understanding the relationship between the video file and the sidecar file. // Elastic Transcoder supports only one sidecar caption per language, to a maximum @@ -2795,7 +2795,7 @@ type JobOutput struct { // // Valid outputs include: dfxp (first div element only), scc, srt, and webvtt. // - // If you want ttml or smpte-tt compatible captions, specify dfxp as your + // If you want ttml or smpte-tt compatible captions, specify dfxp as your // output format. // // Elastic Transcoder does not support OCR (Optical Character Recognition), @@ -2888,7 +2888,7 @@ type JobOutput struct { // The status of one output in a job. If you specified only one output for the // job, Outputs:Status is always the same as Job:Status. If you specified more - // than one output: Job:Status and Outputs:Status for all of the outputs is + // than one output: Job:Status and Outputs:Status for all of the outputs is // Submitted until Elastic Transcoder starts to process the first output. When // Elastic Transcoder starts to process the first output, Outputs:Status for // that output and Job:Status both change to Progressing. For each output, the @@ -3280,11 +3280,11 @@ type Permission struct { _ struct{} `type:"structure"` // The permission that you want to give to the AWS user that is listed in Grantee. - // Valid values include: READ: The grantee can read the thumbnails and metadata - // for thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. READ_ACP: + // Valid values include: READ: The grantee can read the thumbnails and metadata + // for thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. READ_ACP: // The grantee can read the object ACL for thumbnails that Elastic Transcoder - // adds to the Amazon S3 bucket. WRITE_ACP: The grantee can write the ACL for - // the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. FULL_CONTROL: + // adds to the Amazon S3 bucket. WRITE_ACP: The grantee can write the ACL for + // the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. FULL_CONTROL: // The grantee has READ, READ_ACP, and WRITE_ACP permissions for the thumbnails // that Elastic Transcoder adds to the Amazon S3 bucket. Access []*string `type:"list"` @@ -3296,11 +3296,11 @@ type Permission struct { // group. Grantee *string `min:"1" type:"string"` - // The type of value that appears in the Grantee object: Canonical: Either + // The type of value that appears in the Grantee object: Canonical: Either // the canonical user ID for an AWS account or an origin access identity for // an Amazon CloudFront distribution. A canonical user ID is not the same as - // an AWS account number. Email: The registered email address of an AWS account. - // Group: One of the following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, + // an AWS account number. Email: The registered email address of an AWS account. + // Group: One of the following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, // or LogDelivery. GranteeType *string `type:"string"` } @@ -3349,27 +3349,26 @@ type Pipeline struct { // to save transcoded files and playlists. Either you specify both ContentConfig // and ThumbnailConfig, or you specify OutputBucket. // - // Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save - // transcoded files and playlists. Permissions: A list of the users and/or - // predefined Amazon S3 groups you want to have access to transcoded files and - // playlists, and the type of access that you want them to have. GranteeType: - // The type of value that appears in the Grantee object: Canonical: Either - // the canonical user ID for an AWS account or an origin access identity for - // an Amazon CloudFront distribution. Email: The registered email address of - // an AWS account. Group: One of the following predefined Amazon S3 groups: - // AllUsers, AuthenticatedUsers, or LogDelivery. Grantee: The AWS user or - // group that you want to have access to transcoded files and playlists. Access: - // The permission that you want to give to the AWS user that is listed in Grantee. - // Valid values include: READ: The grantee can read the objects and metadata - // for objects that Elastic Transcoder adds to the Amazon S3 bucket. READ_ACP: - // The grantee can read the object ACL for objects that Elastic Transcoder adds - // to the Amazon S3 bucket. WRITE_ACP: The grantee can write the ACL for the - // objects that Elastic Transcoder adds to the Amazon S3 bucket. FULL_CONTROL: - // The grantee has READ, READ_ACP, and WRITE_ACP permissions for the objects - // that Elastic Transcoder adds to the Amazon S3 bucket. StorageClass: - // The Amazon S3 storage class, Standard or ReducedRedundancy, that you want - // Elastic Transcoder to assign to the video files and playlists that it stores - // in your Amazon S3 bucket. + // Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save + // transcoded files and playlists. Permissions: A list of the users and/or predefined + // Amazon S3 groups you want to have access to transcoded files and playlists, + // and the type of access that you want them to have. GranteeType: The type + // of value that appears in the Grantee object: Canonical: Either the canonical + // user ID for an AWS account or an origin access identity for an Amazon CloudFront + // distribution. Email: The registered email address of an AWS account. Group: + // One of the following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, + // or LogDelivery. Grantee: The AWS user or group that you want to have access + // to transcoded files and playlists. Access: The permission that you want to + // give to the AWS user that is listed in Grantee. Valid values include: READ: + // The grantee can read the objects and metadata for objects that Elastic Transcoder + // adds to the Amazon S3 bucket. READ_ACP: The grantee can read the object ACL + // for objects that Elastic Transcoder adds to the Amazon S3 bucket. WRITE_ACP: + // The grantee can write the ACL for the objects that Elastic Transcoder adds + // to the Amazon S3 bucket. FULL_CONTROL: The grantee has READ, READ_ACP, and + // WRITE_ACP permissions for the objects that Elastic Transcoder adds to the + // Amazon S3 bucket. StorageClass: The Amazon S3 storage class, Standard + // or ReducedRedundancy, that you want Elastic Transcoder to assign to the video + // files and playlists that it stores in your Amazon S3 bucket. ContentConfig *PipelineOutputConfig `type:"structure"` // The identifier for the pipeline. You use this value to identify the pipeline @@ -3391,12 +3390,12 @@ type Pipeline struct { // notify to report job status. // // To receive notifications, you must also subscribe to the new topic in the - // Amazon SNS console. Progressing (optional): The Amazon Simple Notification + // Amazon SNS console. Progressing (optional): The Amazon Simple Notification // Service (Amazon SNS) topic that you want to notify when Elastic Transcoder - // has started to process the job. Completed (optional): The Amazon SNS topic + // has started to process the job. Completed (optional): The Amazon SNS topic // that you want to notify when Elastic Transcoder has finished processing the - // job. Warning (optional): The Amazon SNS topic that you want to notify when - // Elastic Transcoder encounters a warning condition. Error (optional): The + // job. Warning (optional): The Amazon SNS topic that you want to notify when + // Elastic Transcoder encounters a warning condition. Error (optional): The // Amazon SNS topic that you want to notify when Elastic Transcoder encounters // an error condition. Notifications *Notifications `type:"structure"` @@ -3412,35 +3411,34 @@ type Pipeline struct { // The current status of the pipeline: // - // Active: The pipeline is processing jobs. Paused: The pipeline is not - // currently processing jobs. + // Active: The pipeline is processing jobs. Paused: The pipeline is not currently + // processing jobs. Status *string `type:"string"` // Information about the Amazon S3 bucket in which you want Elastic Transcoder // to save thumbnail files. Either you specify both ContentConfig and ThumbnailConfig, // or you specify OutputBucket. // - // Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save - // thumbnail files. Permissions: A list of the users and/or predefined Amazon + // Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save + // thumbnail files. Permissions: A list of the users and/or predefined Amazon // S3 groups you want to have access to thumbnail files, and the type of access // that you want them to have. GranteeType: The type of value that appears - // in the Grantee object: Canonical: Either the canonical user ID for an AWS + // in the Grantee object: Canonical: Either the canonical user ID for an AWS // account or an origin access identity for an Amazon CloudFront distribution. - // A canonical user ID is not the same as an AWS account number. Email: The - // registered email address of an AWS account. Group: One of the following - // predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery. - // Grantee: The AWS user or group that you want to have access to thumbnail - // files. Access: The permission that you want to give to the AWS user that - // is listed in Grantee. Valid values include: READ: The grantee can read - // the thumbnails and metadata for thumbnails that Elastic Transcoder adds to - // the Amazon S3 bucket. READ_ACP: The grantee can read the object ACL for - // thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. WRITE_ACP: - // The grantee can write the ACL for the thumbnails that Elastic Transcoder - // adds to the Amazon S3 bucket. FULL_CONTROL: The grantee has READ, READ_ACP, - // and WRITE_ACP permissions for the thumbnails that Elastic Transcoder adds - // to the Amazon S3 bucket. StorageClass: The Amazon S3 storage class, - // Standard or ReducedRedundancy, that you want Elastic Transcoder to assign - // to the thumbnails that it stores in your Amazon S3 bucket. + // A canonical user ID is not the same as an AWS account number. Email: The + // registered email address of an AWS account. Group: One of the following predefined + // Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery. Grantee: + // The AWS user or group that you want to have access to thumbnail files. Access: + // The permission that you want to give to the AWS user that is listed in Grantee. + // Valid values include: READ: The grantee can read the thumbnails and metadata + // for thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. READ_ACP: + // The grantee can read the object ACL for thumbnails that Elastic Transcoder + // adds to the Amazon S3 bucket. WRITE_ACP: The grantee can write the ACL for + // the thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. FULL_CONTROL: + // The grantee has READ, READ_ACP, and WRITE_ACP permissions for the thumbnails + // that Elastic Transcoder adds to the Amazon S3 bucket. StorageClass: The + // Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic + // Transcoder to assign to the thumbnails that it stores in your Amazon S3 bucket. ThumbnailConfig *PipelineOutputConfig `type:"structure"` } @@ -3757,10 +3755,10 @@ type PresetWatermark struct { _ struct{} `type:"structure"` // The horizontal position of the watermark unless you specify a non-zero value - // for HorizontalOffset: Left: The left edge of the watermark is aligned with - // the left border of the video. Right: The right edge of the watermark is - // aligned with the right border of the video. Center: The watermark is centered - // between the left and right borders. + // for HorizontalOffset: Left: The left edge of the watermark is aligned with + // the left border of the video. Right: The right edge of the watermark is aligned + // with the right border of the video. Center: The watermark is centered between + // the left and right borders. HorizontalAlign *string `type:"string"` // The amount by which you want the horizontal position of the watermark to @@ -3814,25 +3812,25 @@ type PresetWatermark struct { // a value of 0 for Opacity. The .jpg file format doesn't support transparency. Opacity *string `type:"string"` - // A value that controls scaling of the watermark: Fit: Elastic Transcoder + // A value that controls scaling of the watermark: Fit: Elastic Transcoder // scales the watermark so it matches the value that you specified in either - // MaxWidth or MaxHeight without exceeding the other value. Stretch: Elastic + // MaxWidth or MaxHeight without exceeding the other value. Stretch: Elastic // Transcoder stretches the watermark to match the values that you specified // for MaxWidth and MaxHeight. If the relative proportions of the watermark // and the values of MaxWidth and MaxHeight are different, the watermark will - // be distorted. ShrinkToFit: Elastic Transcoder scales the watermark down - // so that its dimensions match the values that you specified for at least one + // be distorted. ShrinkToFit: Elastic Transcoder scales the watermark down so + // that its dimensions match the values that you specified for at least one // of MaxWidth and MaxHeight without exceeding either value. If you specify // this option, Elastic Transcoder does not scale the watermark up. SizingPolicy *string `type:"string"` // A value that determines how Elastic Transcoder interprets values that you // specified for HorizontalOffset, VerticalOffset, MaxWidth, and MaxHeight: - // Content: HorizontalOffset and VerticalOffset values are calculated based + // Content: HorizontalOffset and VerticalOffset values are calculated based // on the borders of the video excluding black bars added by Elastic Transcoder, // if any. In addition, MaxWidth and MaxHeight, if specified as a percentage, // are calculated based on the borders of the video excluding black bars added - // by Elastic Transcoder, if any. Frame: HorizontalOffset and VerticalOffset + // by Elastic Transcoder, if any. Frame: HorizontalOffset and VerticalOffset // values are calculated based on the borders of the video including black bars // added by Elastic Transcoder, if any. In addition, MaxWidth and MaxHeight, // if specified as a percentage, are calculated based on the borders of the @@ -3840,10 +3838,10 @@ type PresetWatermark struct { Target *string `type:"string"` // The vertical position of the watermark unless you specify a non-zero value - // for VerticalOffset: Top: The top edge of the watermark is aligned with - // the top border of the video. Bottom: The bottom edge of the watermark is - // aligned with the bottom border of the video. Center: The watermark is centered - // between the top and bottom borders. + // for VerticalOffset: Top: The top edge of the watermark is aligned with the + // top border of the video. Bottom: The bottom edge of the watermark is aligned + // with the bottom border of the video. Center: The watermark is centered between + // the top and bottom borders. VerticalAlign *string `type:"string"` // VerticalOffset The amount by which you want the vertical position of the @@ -4177,22 +4175,22 @@ type Thumbnails struct { // Specify one of the following values to control scaling of thumbnails: // - // Fit: Elastic Transcoder scales thumbnails so they match the value that + // Fit: Elastic Transcoder scales thumbnails so they match the value that // you specified in thumbnail MaxWidth or MaxHeight settings without exceeding - // the other value. Fill: Elastic Transcoder scales thumbnails so they match + // the other value. Fill: Elastic Transcoder scales thumbnails so they match // the value that you specified in thumbnail MaxWidth or MaxHeight settings // and matches or exceeds the other value. Elastic Transcoder centers the image // in thumbnails and then crops in the dimension (if any) that exceeds the maximum - // value. Stretch: Elastic Transcoder stretches thumbnails to match the values + // value. Stretch: Elastic Transcoder stretches thumbnails to match the values // that you specified for thumbnail MaxWidth and MaxHeight settings. If the // relative proportions of the input video and thumbnails are different, the - // thumbnails will be distorted. Keep: Elastic Transcoder does not scale thumbnails. + // thumbnails will be distorted. Keep: Elastic Transcoder does not scale thumbnails. // If either dimension of the input video exceeds the values that you specified // for thumbnail MaxWidth and MaxHeight settings, Elastic Transcoder crops the - // thumbnails. ShrinkToFit: Elastic Transcoder scales thumbnails down so that + // thumbnails. ShrinkToFit: Elastic Transcoder scales thumbnails down so that // their dimensions match the values that you specified for at least one of // thumbnail MaxWidth and MaxHeight without exceeding either value. If you specify - // this option, Elastic Transcoder does not scale thumbnails up. ShrinkToFill: + // this option, Elastic Transcoder does not scale thumbnails up. ShrinkToFill: // Elastic Transcoder scales thumbnails down so that their dimensions match // the values that you specified for at least one of MaxWidth and MaxHeight // without dropping below either value. If you specify this option, Elastic @@ -4290,21 +4288,21 @@ type UpdatePipelineInput struct { // If you specify values for ContentConfig and ThumbnailConfig, omit the OutputBucket // object. // - // Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save - // transcoded files and playlists. Permissions (Optional): The Permissions - // object specifies which users you want to have access to transcoded files - // and the type of access you want them to have. You can grant permissions to - // a maximum of 30 users and/or predefined Amazon S3 groups. Grantee Type: - // Specify the type of value that appears in the Grantee object: Canonical: - // The value in the Grantee object is either the canonical user ID for an AWS - // account or an origin access identity for an Amazon CloudFront distribution. - // For more information about canonical user IDs, see Access Control List (ACL) - // Overview in the Amazon Simple Storage Service Developer Guide. For more information + // Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save + // transcoded files and playlists. Permissions (Optional): The Permissions object + // specifies which users you want to have access to transcoded files and the + // type of access you want them to have. You can grant permissions to a maximum + // of 30 users and/or predefined Amazon S3 groups. Grantee Type: Specify the + // type of value that appears in the Grantee object: Canonical: The value in + // the Grantee object is either the canonical user ID for an AWS account or + // an origin access identity for an Amazon CloudFront distribution. For more + // information about canonical user IDs, see Access Control List (ACL) Overview + // in the Amazon Simple Storage Service Developer Guide. For more information // about using CloudFront origin access identities to require that users use // CloudFront URLs instead of Amazon S3 URLs, see Using an Origin Access Identity // to Restrict Access to Your Amazon S3 Content. A canonical user ID is not - // the same as an AWS account number. Email: The value in the Grantee object - // is the registered email address of an AWS account. Group: The value in the + // the same as an AWS account number. Email: The value in the Grantee object + // is the registered email address of an AWS account. Group: The value in the // Grantee object is one of the following predefined Amazon S3 groups: AllUsers, // AuthenticatedUsers, or LogDelivery. Grantee: The AWS user or group that // you want to have access to transcoded files and playlists. To identify the @@ -4313,12 +4311,12 @@ type UpdatePipelineInput struct { // address of an AWS account, or a predefined Amazon S3 group Access: The // permission that you want to give to the AWS user that you specified in Grantee. // Permissions are granted on the files that Elastic Transcoder adds to the - // bucket, including playlists and video files. Valid values include: READ: + // bucket, including playlists and video files. Valid values include: READ: // The grantee can read the objects and metadata for objects that Elastic Transcoder - // adds to the Amazon S3 bucket. READ_ACP: The grantee can read the object - // ACL for objects that Elastic Transcoder adds to the Amazon S3 bucket. WRITE_ACP: + // adds to the Amazon S3 bucket. READ_ACP: The grantee can read the object ACL + // for objects that Elastic Transcoder adds to the Amazon S3 bucket. WRITE_ACP: // The grantee can write the ACL for the objects that Elastic Transcoder adds - // to the Amazon S3 bucket. FULL_CONTROL: The grantee has READ, READ_ACP, and + // to the Amazon S3 bucket. FULL_CONTROL: The grantee has READ, READ_ACP, and // WRITE_ACP permissions for the objects that Elastic Transcoder adds to the // Amazon S3 bucket. StorageClass: The Amazon S3 storage class, Standard // or ReducedRedundancy, that you want Elastic Transcoder to assign to the video @@ -4360,8 +4358,8 @@ type UpdatePipelineInput struct { // If you specify values for ContentConfig and ThumbnailConfig, omit the OutputBucket // object. // - // Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save - // thumbnail files. Permissions (Optional): The Permissions object specifies + // Bucket: The Amazon S3 bucket in which you want Elastic Transcoder to save + // thumbnail files. Permissions (Optional): The Permissions object specifies // which users and/or predefined Amazon S3 groups you want to have access to // thumbnail files, and the type of access you want them to have. You can grant // permissions to a maximum of 30 users and/or predefined Amazon S3 groups. @@ -4369,25 +4367,25 @@ type UpdatePipelineInput struct { // Canonical: The value in the Grantee object is either the canonical user // ID for an AWS account or an origin access identity for an Amazon CloudFront // distribution. A canonical user ID is not the same as an AWS account number. - // Email: The value in the Grantee object is the registered email address - // of an AWS account. Group: The value in the Grantee object is one of the - // following predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery. + // Email: The value in the Grantee object is the registered email address of + // an AWS account. Group: The value in the Grantee object is one of the following + // predefined Amazon S3 groups: AllUsers, AuthenticatedUsers, or LogDelivery. // Grantee: The AWS user or group that you want to have access to thumbnail // files. To identify the user or group, you can specify the canonical user // ID for an AWS account, an origin access identity for a CloudFront distribution, // the registered email address of an AWS account, or a predefined Amazon S3 // group. Access: The permission that you want to give to the AWS user that // you specified in Grantee. Permissions are granted on the thumbnail files - // that Elastic Transcoder adds to the bucket. Valid values include: READ: + // that Elastic Transcoder adds to the bucket. Valid values include: READ: // The grantee can read the thumbnails and metadata for objects that Elastic - // Transcoder adds to the Amazon S3 bucket. READ_ACP: The grantee can read - // the object ACL for thumbnails that Elastic Transcoder adds to the Amazon - // S3 bucket. WRITE_ACP: The grantee can write the ACL for the thumbnails - // that Elastic Transcoder adds to the Amazon S3 bucket. FULL_CONTROL: The - // grantee has READ, READ_ACP, and WRITE_ACP permissions for the thumbnails - // that Elastic Transcoder adds to the Amazon S3 bucket. StorageClass: The - // Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic - // Transcoder to assign to the thumbnails that it stores in your Amazon S3 bucket. + // Transcoder adds to the Amazon S3 bucket. READ_ACP: The grantee can read the + // object ACL for thumbnails that Elastic Transcoder adds to the Amazon S3 bucket. + // WRITE_ACP: The grantee can write the ACL for the thumbnails that Elastic + // Transcoder adds to the Amazon S3 bucket. FULL_CONTROL: The grantee has READ, + // READ_ACP, and WRITE_ACP permissions for the thumbnails that Elastic Transcoder + // adds to the Amazon S3 bucket. StorageClass: The Amazon S3 storage class, + // Standard or ReducedRedundancy, that you want Elastic Transcoder to assign + // to the thumbnails that it stores in your Amazon S3 bucket. ThumbnailConfig *PipelineOutputConfig `type:"structure"` } @@ -4439,15 +4437,15 @@ type UpdatePipelineNotificationsInput struct { // that you want to notify to report job status. // // To receive notifications, you must also subscribe to the new topic in the - // Amazon SNS console. Progressing: The topic ARN for the Amazon Simple Notification + // Amazon SNS console. Progressing: The topic ARN for the Amazon Simple Notification // Service (Amazon SNS) topic that you want to notify when Elastic Transcoder // has started to process jobs that are added to this pipeline. This is the - // ARN that Amazon SNS returned when you created the topic. Completed: The - // topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder + // ARN that Amazon SNS returned when you created the topic. Completed: The topic + // ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder // has finished processing a job. This is the ARN that Amazon SNS returned when - // you created the topic. Warning: The topic ARN for the Amazon SNS topic that + // you created the topic. Warning: The topic ARN for the Amazon SNS topic that // you want to notify when Elastic Transcoder encounters a warning condition. - // This is the ARN that Amazon SNS returned when you created the topic. Error: + // This is the ARN that Amazon SNS returned when you created the topic. Error: // The topic ARN for the Amazon SNS topic that you want to notify when Elastic // Transcoder encounters an error condition. This is the ARN that Amazon SNS // returned when you created the topic. @@ -4632,9 +4630,9 @@ type VideoParameters struct { // The H.264 profile that you want to use for the output file. Elastic Transcoder // supports the following profiles: // - // baseline: The profile most commonly used for videoconferencing and for - // mobile applications. main: The profile used for standard-definition digital - // TV broadcasts. high: The profile used for high-definition digital TV broadcasts + // baseline: The profile most commonly used for videoconferencing and for + // mobile applications. main: The profile used for standard-definition digital + // TV broadcasts. high: The profile used for high-definition digital TV broadcasts // and for Blu-ray discs. Level (H.264 Only) // // The H.264 level that you want to use for the output file. Elastic Transcoder @@ -4704,11 +4702,11 @@ type VideoParameters struct { // is null, and you are using one of the resolution changes from the list below, // Elastic Transcoder applies the following color space conversions: // - // Standard to HD, 720x480 to 1920x1080 - Elastic Transcoder applies Bt601ToBt709 - // Standard to HD, 720x576 to 1920x1080 - Elastic Transcoder applies Bt601ToBt709 - // HD to Standard, 1920x1080 to 720x480 - Elastic Transcoder applies Bt709ToBt601 - // HD to Standard, 1920x1080 to 720x576 - Elastic Transcoder applies Bt709ToBt601 - // Elastic Transcoder may change the behavior of the ColorspaceConversionMode + // Standard to HD, 720x480 to 1920x1080 - Elastic Transcoder applies Bt601ToBt709 + // Standard to HD, 720x576 to 1920x1080 - Elastic Transcoder applies Bt601ToBt709 + // HD to Standard, 1920x1080 to 720x480 - Elastic Transcoder applies Bt709ToBt601 + // HD to Standard, 1920x1080 to 720x576 - Elastic Transcoder applies Bt709ToBt601 + // Elastic Transcoder may change the behavior of the ColorspaceConversionMode // Auto mode in the future. All outputs in a playlist must use the same ColorSpaceConversionMode. // If you do not specify a ColorSpaceConversionMode, Elastic Transcoder does // not change the color space of a file. If you are unsure what ColorSpaceConversionMode @@ -4739,9 +4737,9 @@ type VideoParameters struct { // // Whether to use a fixed value for FixedGOP. Valid values are true and false: // - // true: Elastic Transcoder uses the value of KeyframesMaxDist for the distance + // true: Elastic Transcoder uses the value of KeyframesMaxDist for the distance // between key frames (the number of frames in a group of pictures, or GOP). - // false: The distance between key frames can vary. FixedGOP must be set to + // false: The distance between key frames can vary. FixedGOP must be set to // true for fmp4 containers. FixedGOP *string `type:"string"` @@ -4759,8 +4757,8 @@ type VideoParameters struct { // // where: // - // width in pixels and height in pixels represent the Resolution of the output - // video. maximum recommended decoding speed in Luma samples/second is less + // width in pixels and height in pixels represent the Resolution of the output + // video. maximum recommended decoding speed in Luma samples/second is less // than or equal to the maximum value listed in the following table, based on // the value that you specified for Level. The maximum recommended decoding // speed in Luma samples/second for each level is described in the following @@ -4825,9 +4823,9 @@ type VideoParameters struct { // The width and height of the video in the output file, in pixels. Valid // values are auto and width x height: // - // auto: Elastic Transcoder attempts to preserve the width and height of - // the input file, subject to the following rules. width x height: The width - // and height of the output video in pixels. Note the following about specifying + // auto: Elastic Transcoder attempts to preserve the width and height of the + // input file, subject to the following rules. width x height: The width and + // height of the output video in pixels. Note the following about specifying // the width and height: // // The width must be an even integer between 128 and 4096, inclusive. The @@ -4845,26 +4843,25 @@ type VideoParameters struct { // Specify one of the following values to control scaling of the output video: // - // Fit: Elastic Transcoder scales the output video so it matches the value + // Fit: Elastic Transcoder scales the output video so it matches the value // that you specified in either MaxWidth or MaxHeight without exceeding the - // other value. Fill: Elastic Transcoder scales the output video so it matches + // other value. Fill: Elastic Transcoder scales the output video so it matches // the value that you specified in either MaxWidth or MaxHeight and matches // or exceeds the other value. Elastic Transcoder centers the output video and - // then crops it in the dimension (if any) that exceeds the maximum value. - // Stretch: Elastic Transcoder stretches the output video to match the values - // that you specified for MaxWidth and MaxHeight. If the relative proportions - // of the input video and the output video are different, the output video will - // be distorted. Keep: Elastic Transcoder does not scale the output video. - // If either dimension of the input video exceeds the values that you specified - // for MaxWidth and MaxHeight, Elastic Transcoder crops the output video. ShrinkToFit: - // Elastic Transcoder scales the output video down so that its dimensions match - // the values that you specified for at least one of MaxWidth and MaxHeight - // without exceeding either value. If you specify this option, Elastic Transcoder - // does not scale the video up. ShrinkToFill: Elastic Transcoder scales the - // output video down so that its dimensions match the values that you specified - // for at least one of MaxWidth and MaxHeight without dropping below either - // value. If you specify this option, Elastic Transcoder does not scale the - // video up. + // then crops it in the dimension (if any) that exceeds the maximum value. Stretch: + // Elastic Transcoder stretches the output video to match the values that you + // specified for MaxWidth and MaxHeight. If the relative proportions of the + // input video and the output video are different, the output video will be + // distorted. Keep: Elastic Transcoder does not scale the output video. If either + // dimension of the input video exceeds the values that you specified for MaxWidth + // and MaxHeight, Elastic Transcoder crops the output video. ShrinkToFit: Elastic + // Transcoder scales the output video down so that its dimensions match the + // values that you specified for at least one of MaxWidth and MaxHeight without + // exceeding either value. If you specify this option, Elastic Transcoder does + // not scale the video up. ShrinkToFill: Elastic Transcoder scales the output + // video down so that its dimensions match the values that you specified for + // at least one of MaxWidth and MaxHeight without dropping below either value. + // If you specify this option, Elastic Transcoder does not scale the video up. SizingPolicy *string `type:"string"` // Settings for the size, location, and opacity of graphics that you want Elastic diff --git a/vendor/vendor.json b/vendor/vendor.json index 3a4dd9e86..d50287eaa 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -277,458 +277,590 @@ "revision": "4239b77079c7b5d1243b7b4736304ce8ddb6f0f2" }, { - "checksumSHA1": "zrKMMpGfvfCUU07ydetOaOKum5U=", + "checksumSHA1": "4AOg5/w5X4YYFGRV+V0pLKAhe8c=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "AWg3FBA1NTPdIVZipaQf/rGx38o=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/awserr", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "dkfyy7aRNZ6BmUZ4ZdLIcMMXiPA=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/awsutil", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "RsYlRfQceaAgqjIrExwNsb/RBEM=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/client", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "ieAJ+Cvp/PKv1LpUEnUXpc3OI6E=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/client/metadata", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "gNWirlrTfSLbOe421hISBAhTqa4=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/corehandlers", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "EiauD48zRlXIFvAENgZ+PXSEnT0=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/credentials", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "KQiUK/zr3mqnAXD7x/X55/iNme0=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "NUJUTWlc1sV8b7WjfiYc4JZbXl0=", "path": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "svFeyM3oQkk0nfQ0pguDjMgV2M4=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/defaults", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "U0SthWum+t9ACanK7SDJOg3dO6M=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/ec2metadata", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "NyUg1P8ZS/LHAAQAk/4C5O4X3og=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/request", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "46SVikiXo5xuy/CS6mM1XVTUU7w=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/session", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "0HzXzMByDLiJSqrMEqbg5URAx0o=", "path": "github.com/aws/aws-sdk-go/aws/signer/v4", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "sgft7A0lRCVD7QBogydg46lr3NM=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/endpoints", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "wk7EyvDaHwb5qqoOP/4d3cV0708=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "G1he3uSmd1h8ZRnKOIWuDrWp2zQ=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/ec2query", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "gHqZ41fSrCEUftkImHKGW+cKxFk=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "MPzz1x/qt6f2R/JW6aELbm/qT4k=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/jsonrpc", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "nHHyS4+VgZOV7F3Xu87crArmbds=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/query", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "5xzix1R8prUyWxgLnzUQoxTsfik=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "TW/7U+/8ormL7acf6z2rv2hDD+s=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/rest", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "ayzKZc+f+OrjOtE2bz4+lrlKR7c=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/restjson", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "ttxyyPnlmMDqX+sY10BwbwwA+jo=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/restxml", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "LsCIsjbzX2r3n/AhpNJvAC5ueNA=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "F6mth+G7dXN1GI+nktaGo8Lx8aE=", "path": "github.com/aws/aws-sdk-go/private/signer/v2", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/signer/v4", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011" + "revision": "2cc71659118a868dc7544a7ef0808eb42d487011", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "Eo9yODN5U99BK0pMzoqnBm7PCrY=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/waiter", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "lD48Br3S98XvKfKID0QiTbBgC1M=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/apigateway", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "AUA6op9dlm0X4vv1YPFnIFs6404=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/autoscaling", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { - "checksumSHA1": "HMNQSV7Om3yvNiougcTrfZVJFbE=", + "checksumSHA1": "vp/AYdsQnZtoPqtX86VsgmLIx1w=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/cloudformation", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "4deSd9La3EF2Cmq+tD5rcvhfTGQ=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/cloudfront", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "eCFTaV9GKqv/UEzwRgFFUaFz098=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/cloudtrail", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "b9W5mR0lazSwYV6Pl8HNslokIpo=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/cloudwatch", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "mWNJKpt18ASs9/RhnIjILcsGlng=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/cloudwatchevents", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "Q6xeArbCzOunYsn2tFyTA5LN1Cg=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/cloudwatchlogs", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "p5a/DcdUvhTx0PCRR+/CRXk9g6c=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/codecommit", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "p9BTPHO+J8OdzK2btdcGGAaTmhk=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/codedeploy", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "t1fZO+x4OG6e7T8HIi2Yr2wR9D4=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/directoryservice", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "y+pZPK8hcTDwq1zHuRduWE14flw=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/dynamodb", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "gqlYKqMKCuQ3fzNTyDw6jiG1sCs=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/ec2", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "IEHq+VLH1fud1oQ4MXj1nqfpgUY=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/ecr", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "sHPoLMWXO5tM63ipuxVXduuRypI=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/ecs", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "1vOgFGxLhjNe6BK3RJaV1OqisCs=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/efs", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "rjSScNzMTvEHv7Lk5KcxDpNU5EE=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/elasticache", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "RZF1yHtJhAqaMwbeAM/6BdLLavk=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/elasticbeanstalk", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "TAuizMIsvgeuZhmGTYPA7LOXHvY=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/elasticsearchservice", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { - "checksumSHA1": "B/g+Usd8rImjgUpVPLyNTL0LaUQ=", + "checksumSHA1": "qHuJHGUAuuizD9834MP3gVupfdo=", "path": "github.com/aws/aws-sdk-go/service/elastictranscoder", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "1c9xsISLQWKSrORIpdokCCWCe2M=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/elb", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "bvVmHWxCOk0Cmw333zQ5jutPCZQ=", "comment": "v1.1.15", "path": "github.com/aws/aws-sdk-go/service/emr", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "TtIAgZ+evpkKB5bBYCB69k0wZoU=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/firehose", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "B1EtgBrv//gYqA+Sp6a/SK2zLO4=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/glacier", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "kXJ9ycLAIj0PFSFbfrA/LR/hIi8=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/iam", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "2n5/m0ClE4OyQRNdjfLwg+nSY3o=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/kinesis", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "/cFX1/Gr6M+r9232gLIV+4np7Po=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/kms", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "jM0EhAIybh0fyLHxrmVSmG3JLmU=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/lambda", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "aLwDFgrPzIBidURxso1ujcr2pDs=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/opsworks", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "w0aQAtZ42oGeVOqwwG15OBGoU1s=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/rds", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "mgImZ/bluUOY9GpQ/oAnscIXwrA=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/redshift", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "6ejP+X+O9e6y40GICj9Vcn1MuBY=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/route53", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "68YN+UopWOSISIcQQ6zSVbyaDzQ=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/s3", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "X9g/Vdq939ijN2gcumwOyYfHM2U=", "path": "github.com/aws/aws-sdk-go/service/ses", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "DW5kDRWLA2yAgYh9vsI+0uVqq/Q=", "path": "github.com/aws/aws-sdk-go/service/simpledb", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "+ic7vevBfganFLENR29pJaEf4Tw=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/sns", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "oLAlquYlQzgYFS9ochS/iQ9+uXY=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/sqs", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "checksumSHA1": "6a2WM0r/rXUxFjxH73jYL88LBSw=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/sts", - "revision": "3c37d29820480639ff03fd66df00a0f27984f88d", - "revisionTime": "2016-07-13T21:13:24Z" + "revision": "565027b24171359f23f883d0fc48c228cdde301d", + "revisionTime": "2016-07-21T22:15:38Z", + "version": "v1.2.7", + "versionExact": "v1.2.7" }, { "path": "github.com/bgentry/speakeasy", From 17b16f543e359e57ed19f10c872af45c7c170950 Mon Sep 17 00:00:00 2001 From: Brian Menges Date: Mon, 25 Jul 2016 04:32:24 -0700 Subject: [PATCH 0390/1238] Ignore IOPS on non io1 AWS devices (#7783) - Already ignoring IOPS on ebs attached non-io1 devices; extended to root_block_device - Added warning captured from #4146 / [../blob/master/builtin/providers/aws/resource_aws_ebs_volume.go#L104](resource_aws_ebs_volume.go#L104) - Added test when setting IOPS to 330 (11GiB * 30 = 330) on GP2 root device results in AWS reported 100 IOPS (successfully ignored input) --- .../providers/aws/resource_aws_instance.go | 10 ++- .../aws/resource_aws_instance_test.go | 68 +++++++++++++++++++ 2 files changed, 77 insertions(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_instance.go b/builtin/providers/aws/resource_aws_instance.go index 7681d6ed2..892235965 100644 --- a/builtin/providers/aws/resource_aws_instance.go +++ b/builtin/providers/aws/resource_aws_instance.go @@ -931,8 +931,16 @@ func readBlockDeviceMappingsFromConfig( ebs.VolumeType = aws.String(v) } - if v, ok := bd["iops"].(int); ok && v > 0 { + if v, ok := bd["iops"].(int); ok && v > 0 && *ebs.VolumeType == "io1" { + // Only set the iops attribute if the volume type is io1. Setting otherwise + // can trigger a refresh/plan loop based on the computed value that is given + // from AWS, and prevent us from specifying 0 as a valid iops. + // See https://github.com/hashicorp/terraform/pull/4146 + // See https://github.com/hashicorp/terraform/issues/7765 ebs.Iops = aws.Int64(int64(v)) + } else if v, ok := bd["iops"].(int); ok && v > 0 && *ebs.VolumeType != "io1" { + // Message user about incompatibility + log.Printf("[WARN] IOPs is only valid for storate type io1 for EBS Volumes") } if dn, err := fetchRootDeviceName(d.Get("ami").(string), conn); err == nil { diff --git a/builtin/providers/aws/resource_aws_instance_test.go b/builtin/providers/aws/resource_aws_instance_test.go index 0efcd5fe8..0abc4d6b3 100644 --- a/builtin/providers/aws/resource_aws_instance_test.go +++ b/builtin/providers/aws/resource_aws_instance_test.go @@ -105,6 +105,56 @@ func TestAccAWSInstance_basic(t *testing.T) { }) } +func TestAccAWSInstance_GP2IopsDevice(t *testing.T) { + var v ec2.Instance + + testCheck := func() resource.TestCheckFunc { + return func(*terraform.State) error { + + // Map out the block devices by name, which should be unique. + blockDevices := make(map[string]*ec2.InstanceBlockDeviceMapping) + for _, blockDevice := range v.BlockDeviceMappings { + blockDevices[*blockDevice.DeviceName] = blockDevice + } + + // Check if the root block device exists. + if _, ok := blockDevices["/dev/sda1"]; !ok { + return fmt.Errorf("block device doesn't exist: /dev/sda1") + } + + return nil + } + } + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: "aws_instance.foo", + IDRefreshIgnore: []string{ + "ephemeral_block_device", "user_data", "security_groups", "vpc_security_groups"}, + Providers: testAccProviders, + CheckDestroy: testAccCheckInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccInstanceGP2IopsDevice, + //Config: testAccInstanceConfigBlockDevices, + Check: resource.ComposeTestCheckFunc( + testAccCheckInstanceExists( + "aws_instance.foo", &v), + resource.TestCheckResourceAttr( + "aws_instance.foo", "root_block_device.#", "1"), + resource.TestCheckResourceAttr( + "aws_instance.foo", "root_block_device.0.volume_size", "11"), + resource.TestCheckResourceAttr( + "aws_instance.foo", "root_block_device.0.volume_type", "gp2"), + resource.TestCheckResourceAttr( + "aws_instance.foo", "root_block_device.0.iops", "100"), + testCheck(), + ), + }, + }, + }) +} + func TestAccAWSInstance_blockDevices(t *testing.T) { var v ec2.Instance @@ -738,6 +788,24 @@ resource "aws_instance" "foo" { } ` +const testAccInstanceGP2IopsDevice = ` +resource "aws_instance" "foo" { + # us-west-2 + ami = "ami-55a7ea65" + + # In order to attach an encrypted volume to an instance you need to have an + # m3.medium or larger. See "Supported Instance Types" in: + # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html + instance_type = "m3.medium" + + root_block_device { + volume_type = "gp2" + volume_size = 11 + iops = 330 + } +} +` + const testAccInstanceConfigBlockDevices = ` resource "aws_instance" "foo" { # us-west-2 From f8575f1edd3a7c6238fc222334a3c80999ca76a6 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 25 Jul 2016 12:34:49 +0100 Subject: [PATCH 0391/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 85691cc75..8dd699a49 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -219,6 +219,7 @@ BUG FIXES: * provider/aws: Bump rds_cluster timeout to 15 mins [GH-7604] * provider/aws: Fix ICMP fields in `aws_network_acl_rule` to allow ICMP code 0 (echo reply) to be configured [GH-7669] * provider/aws: Fix bug with Updating `aws_autoscaling_group` `enabled_metrics` [GH-7698] + * provider/aws: Ignore IOPS on non io1 AWS root_block_device [GH-7783] * provider/azurerm: Fixes terraform crash when using SSH keys with `azurerm_virtual_machine` [GH-6766] * provider/azurerm: Fix a bug causing 'diffs do not match' on `azurerm_network_interface` resources [GH-6790] * provider/azurerm: Normalizes `availability_set_id` casing to avoid spurious diffs in `azurerm_virtual_machine` [GH-6768] From 9f314a3c29944885a9d61e54eb62e363d9a64c23 Mon Sep 17 00:00:00 2001 From: Raphael Randschau Date: Mon, 25 Jul 2016 13:49:09 +0200 Subject: [PATCH 0392/1238] provider/scaleway: Expose IPv6 support, improve documentation (#7784) * provider/scaleway: update api version * provider/scaleway: expose ipv6 support, rename ip attributes since it can be both ipv4 and ipv6, choose a more generic name. * provider/scaleway: allow servers in different SGs * provider/scaleway: update documentation * provider/scaleway: Update docs with security group * provider/scaleway: add testcase for server security groups * provider/scaleway: make deleting of security rules more resilient * provider/scaleway: make deletion of security group more resilient * provider/scaleway: guard against missing server --- .../scaleway/resource_scaleway_ip.go | 4 +- .../scaleway/resource_scaleway_ip_test.go | 6 +- .../resource_scaleway_security_group.go | 11 +- .../resource_scaleway_security_group_rule.go | 9 + .../scaleway/resource_scaleway_server.go | 44 +++- .../scaleway/resource_scaleway_server_test.go | 86 +++++++ .../scaleway/scaleway-cli/pkg/api/api.go | 132 +++++++---- .../scaleway/scaleway-cli/pkg/api/cache.go | 212 ++++++++++++------ .../scaleway/scaleway-cli/pkg/api/logger.go | 30 ++- .../providers/scaleway/r/server.html.markdown | 9 +- 10 files changed, 417 insertions(+), 126 deletions(-) diff --git a/builtin/providers/scaleway/resource_scaleway_ip.go b/builtin/providers/scaleway/resource_scaleway_ip.go index b4fe7003f..100930a33 100644 --- a/builtin/providers/scaleway/resource_scaleway_ip.go +++ b/builtin/providers/scaleway/resource_scaleway_ip.go @@ -54,7 +54,9 @@ func resourceScalewayIPRead(d *schema.ResourceData, m interface{}) error { } d.Set("ip", resp.IP.Address) - d.Set("server", resp.IP.Server.Identifier) + if resp.IP.Server != nil { + d.Set("server", resp.IP.Server.Identifier) + } return nil } diff --git a/builtin/providers/scaleway/resource_scaleway_ip_test.go b/builtin/providers/scaleway/resource_scaleway_ip_test.go index 464817eff..f32cae1f5 100644 --- a/builtin/providers/scaleway/resource_scaleway_ip_test.go +++ b/builtin/providers/scaleway/resource_scaleway_ip_test.go @@ -112,7 +112,11 @@ func testAccCheckScalewayIPAttachment(n string, check func(string) bool, msg str return err } - if !check(ip.IP.Server.Identifier) { + var serverID = "" + if ip.IP.Server != nil { + serverID = ip.IP.Server.Identifier + } + if !check(serverID) { return fmt.Errorf("IP check failed: %q", msg) } diff --git a/builtin/providers/scaleway/resource_scaleway_security_group.go b/builtin/providers/scaleway/resource_scaleway_security_group.go index 6c5da13d6..a3421dee8 100644 --- a/builtin/providers/scaleway/resource_scaleway_security_group.go +++ b/builtin/providers/scaleway/resource_scaleway_security_group.go @@ -90,7 +90,7 @@ func resourceScalewaySecurityGroupRead(d *schema.ResourceData, m interface{}) er func resourceScalewaySecurityGroupUpdate(d *schema.ResourceData, m interface{}) error { scaleway := m.(*Client).scaleway - var req = api.ScalewayNewSecurityGroup{ + var req = api.ScalewayUpdateSecurityGroup{ Organization: scaleway.Organization, Name: d.Get("name").(string), Description: d.Get("description").(string), @@ -110,6 +110,15 @@ func resourceScalewaySecurityGroupDelete(d *schema.ResourceData, m interface{}) err := scaleway.DeleteSecurityGroup(d.Id()) if err != nil { + if serr, ok := err.(api.ScalewayAPIError); ok { + log.Printf("[DEBUG] error reading Security Group Rule: %q\n", serr.APIMessage) + + if serr.StatusCode == 404 { + d.SetId("") + return nil + } + } + return err } diff --git a/builtin/providers/scaleway/resource_scaleway_security_group_rule.go b/builtin/providers/scaleway/resource_scaleway_security_group_rule.go index 85c2f3575..d79070bfe 100644 --- a/builtin/providers/scaleway/resource_scaleway_security_group_rule.go +++ b/builtin/providers/scaleway/resource_scaleway_security_group_rule.go @@ -154,6 +154,15 @@ func resourceScalewaySecurityGroupRuleDelete(d *schema.ResourceData, m interface err := scaleway.DeleteSecurityGroupRule(d.Get("security_group").(string), d.Id()) if err != nil { + if serr, ok := err.(api.ScalewayAPIError); ok { + log.Printf("[DEBUG] error reading Security Group Rule: %q\n", serr.APIMessage) + + if serr.StatusCode == 404 { + d.SetId("") + return nil + } + } + return err } diff --git a/builtin/providers/scaleway/resource_scaleway_server.go b/builtin/providers/scaleway/resource_scaleway_server.go index 659c5c29f..0266fcc9d 100644 --- a/builtin/providers/scaleway/resource_scaleway_server.go +++ b/builtin/providers/scaleway/resource_scaleway_server.go @@ -40,11 +40,24 @@ func resourceScalewayServer() *schema.Resource { }, Optional: true, }, - "ipv4_address_private": &schema.Schema{ + "enable_ipv6": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "dynamic_ip_required": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "security_group": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "private_ip": &schema.Schema{ Type: schema.TypeString, Computed: true, }, - "ipv4_address_public": &schema.Schema{ + "public_ip": &schema.Schema{ Type: schema.TypeString, Computed: true, }, @@ -53,10 +66,6 @@ func resourceScalewayServer() *schema.Resource { Optional: true, Computed: true, }, - "dynamic_ip_required": &schema.Schema{ - Type: schema.TypeBool, - Optional: true, - }, "state_detail": &schema.Schema{ Type: schema.TypeString, Computed: true, @@ -70,9 +79,11 @@ func resourceScalewayServerCreate(d *schema.ResourceData, m interface{}) error { image := d.Get("image").(string) var server = api.ScalewayServerDefinition{ - Name: d.Get("name").(string), - Image: String(image), - Organization: scaleway.Organization, + Name: d.Get("name").(string), + Image: String(image), + Organization: scaleway.Organization, + EnableIPV6: d.Get("enable_ipv6").(bool), + SecurityGroup: d.Get("security_group").(string), } server.DynamicIPRequired = Bool(d.Get("dynamic_ip_required").(bool)) @@ -127,8 +138,9 @@ func resourceScalewayServerRead(d *schema.ResourceData, m interface{}) error { return err } - d.Set("ipv4_address_private", server.PrivateIP) - d.Set("ipv4_address_public", server.PublicAddress.IP) + d.Set("private_ip", server.PrivateIP) + d.Set("public_ip", server.PublicAddress.IP) + d.Set("state", server.State) d.Set("state_detail", server.StateDetail) d.Set("tags", server.Tags) @@ -161,10 +173,20 @@ func resourceScalewayServerUpdate(d *schema.ResourceData, m interface{}) error { } } + if d.HasChange("enable_ipv6") { + req.EnableIPV6 = Bool(d.Get("enable_ipv6").(bool)) + } + if d.HasChange("dynamic_ip_required") { req.DynamicIPRequired = Bool(d.Get("dynamic_ip_required").(bool)) } + if d.HasChange("security_group") { + req.SecurityGroup = &api.ScalewaySecurityGroup{ + Identifier: d.Get("security_group").(string), + } + } + if err := scaleway.PatchServer(d.Id(), req); err != nil { return fmt.Errorf("Failed patching scaleway server: %q", err) } diff --git a/builtin/providers/scaleway/resource_scaleway_server_test.go b/builtin/providers/scaleway/resource_scaleway_server_test.go index 27fdab537..a9ef42727 100644 --- a/builtin/providers/scaleway/resource_scaleway_server_test.go +++ b/builtin/providers/scaleway/resource_scaleway_server_test.go @@ -31,6 +31,30 @@ func TestAccScalewayServer_Basic(t *testing.T) { }) } +func TestAccScalewayServer_SecurityGroup(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckScalewayServerDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckScalewayServerConfig_SecurityGroup, + Check: resource.ComposeTestCheckFunc( + testAccCheckScalewayServerExists("scaleway_server.base"), + testAccCheckScalewayServerSecurityGroup("scaleway_server.base", "blue"), + ), + }, + resource.TestStep{ + Config: testAccCheckScalewayServerConfig_SecurityGroup_Update, + Check: resource.ComposeTestCheckFunc( + testAccCheckScalewayServerExists("scaleway_server.base"), + testAccCheckScalewayServerSecurityGroup("scaleway_server.base", "red"), + ), + }, + }, + }) +} + func testAccCheckScalewayServerDestroy(s *terraform.State) error { client := testAccProvider.Meta().(*Client).scaleway @@ -77,6 +101,28 @@ func testAccCheckScalewayServerAttributes(n string) resource.TestCheckFunc { } } +func testAccCheckScalewayServerSecurityGroup(n, securityGroupName string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Unknown resource: %s", n) + } + + client := testAccProvider.Meta().(*Client).scaleway + server, err := client.GetServer(rs.Primary.ID) + + if err != nil { + return err + } + + if server.SecurityGroup.Name != securityGroupName { + return fmt.Errorf("Server has wrong security_group") + } + + return nil + } +} + func testAccCheckScalewayServerExists(n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -114,3 +160,43 @@ resource "scaleway_server" "base" { type = "C1" tags = [ "terraform-test" ] }`, armImageIdentifier) + +var testAccCheckScalewayServerConfig_SecurityGroup = fmt.Sprintf(` +resource "scaleway_security_group" "blue" { + name = "blue" + description = "blue" +} + +resource "scaleway_security_group" "red" { + name = "red" + description = "red" +} + +resource "scaleway_server" "base" { + name = "test" + # ubuntu 14.04 + image = "%s" + type = "C1" + tags = [ "terraform-test" ] + security_group = "${scaleway_security_group.blue.id}" +}`, armImageIdentifier) + +var testAccCheckScalewayServerConfig_SecurityGroup_Update = fmt.Sprintf(` +resource "scaleway_security_group" "blue" { + name = "blue" + description = "blue" +} + +resource "scaleway_security_group" "red" { + name = "red" + description = "red" +} + +resource "scaleway_server" "base" { + name = "test" + # ubuntu 14.04 + image = "%s" + type = "C1" + tags = [ "terraform-test" ] + security_group = "${scaleway_security_group.red.id}" +}`, armImageIdentifier) diff --git a/vendor/github.com/scaleway/scaleway-cli/pkg/api/api.go b/vendor/github.com/scaleway/scaleway-cli/pkg/api/api.go index cd3a81e28..976d28564 100644 --- a/vendor/github.com/scaleway/scaleway-cli/pkg/api/api.go +++ b/vendor/github.com/scaleway/scaleway-cli/pkg/api/api.go @@ -98,7 +98,7 @@ func (e ScalewayAPIError) Error() string { "Message": e.Message, "APIMessage": e.APIMessage, } { - fmt.Fprintf(&b, " %-30s %s", fmt.Sprintf("%s: ", k), v) + fmt.Fprintf(&b, "%s: %v ", k, v) } return b.String() } @@ -419,13 +419,13 @@ type ScalewayGetSecurityGroup struct { // ScalewayIPDefinition represents the IP's fields type ScalewayIPDefinition struct { - Organization string `json:"organization"` - Reverse string `json:"reverse"` - ID string `json:"id"` - Server struct { + Organization string `json:"organization"` + Reverse *string `json:"reverse"` + ID string `json:"id"` + Server *struct { Identifier string `json:"id,omitempty"` Name string `json:"name,omitempty"` - } `json:"server,omitempty"` + } `json:"server"` Address string `json:"address"` } @@ -448,11 +448,20 @@ type ScalewaySecurityGroup struct { Name string `json:"name,omitempty"` } -// ScalewayNewSecurityGroup definition POST/PUT request /security_groups +// ScalewayNewSecurityGroup definition POST request /security_groups type ScalewayNewSecurityGroup struct { - Organization string `json:"organization"` - Name string `json:"name"` - Description string `json:"description"` + Organization string `json:"organization"` + Name string `json:"name"` + Description string `json:"description"` + OrganizationDefault bool `json:"organization_default"` +} + +// ScalewayUpdateSecurityGroup definition PUT request /security_groups +type ScalewayUpdateSecurityGroup struct { + Organization string `json:"organization"` + Name string `json:"name"` + Description string `json:"description"` + OrganizationDefault bool `json:"organization_default"` } // ScalewayServer represents a Scaleway server @@ -584,6 +593,8 @@ type ScalewayServerDefinition struct { PublicIP string `json:"public_ip,omitempty"` EnableIPV6 bool `json:"enable_ipv6,omitempty"` + + SecurityGroup string `json:"security_group,omitempty"` } // ScalewayOneServer represents the response of a GET /servers/UUID API call @@ -832,27 +843,26 @@ type MarketImages struct { // NewScalewayAPI creates a ready-to-use ScalewayAPI client func NewScalewayAPI(organization, token, userAgent string, options ...func(*ScalewayAPI)) (*ScalewayAPI, error) { - cache, err := NewScalewayCache() - if err != nil { - return nil, err - } s := &ScalewayAPI{ // exposed Organization: organization, Token: token, - Cache: cache, Logger: NewDefaultLogger(), - verbose: os.Getenv("SCW_VERBOSE_API") != "", - password: "", - userAgent: userAgent, // internal - client: &http.Client{}, + client: &http.Client{}, + verbose: os.Getenv("SCW_VERBOSE_API") != "", + password: "", + userAgent: userAgent, } for _, option := range options { option(s) } - + cache, err := NewScalewayCache(func() { s.Logger.Debugf("Writing cache file to disk") }) + if err != nil { + return nil, err + } + s.Cache = cache if os.Getenv("SCW_TLSVERIFY") == "0" { s.client.Transport = &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, @@ -1273,62 +1283,77 @@ func (s *ScalewayAPI) PutVolume(volumeID string, definition ScalewayVolumePutDef // ResolveServer attempts to find a matching Identifier for the input string func (s *ScalewayAPI) ResolveServer(needle string) (ScalewayResolverResults, error) { - servers := s.Cache.LookUpServers(needle, true) + servers, err := s.Cache.LookUpServers(needle, true) + if err != nil { + return servers, err + } if len(servers) == 0 { - if _, err := s.GetServers(true, 0); err != nil { + if _, err = s.GetServers(true, 0); err != nil { return nil, err } - servers = s.Cache.LookUpServers(needle, true) + servers, err = s.Cache.LookUpServers(needle, true) } - return servers, nil + return servers, err } // ResolveVolume attempts to find a matching Identifier for the input string func (s *ScalewayAPI) ResolveVolume(needle string) (ScalewayResolverResults, error) { - volumes := s.Cache.LookUpVolumes(needle, true) + volumes, err := s.Cache.LookUpVolumes(needle, true) + if err != nil { + return volumes, err + } if len(volumes) == 0 { - if _, err := s.GetVolumes(); err != nil { + if _, err = s.GetVolumes(); err != nil { return nil, err } - volumes = s.Cache.LookUpVolumes(needle, true) + volumes, err = s.Cache.LookUpVolumes(needle, true) } - return volumes, nil + return volumes, err } // ResolveSnapshot attempts to find a matching Identifier for the input string func (s *ScalewayAPI) ResolveSnapshot(needle string) (ScalewayResolverResults, error) { - snapshots := s.Cache.LookUpSnapshots(needle, true) + snapshots, err := s.Cache.LookUpSnapshots(needle, true) + if err != nil { + return snapshots, err + } if len(snapshots) == 0 { - if _, err := s.GetSnapshots(); err != nil { + if _, err = s.GetSnapshots(); err != nil { return nil, err } - snapshots = s.Cache.LookUpSnapshots(needle, true) + snapshots, err = s.Cache.LookUpSnapshots(needle, true) } - return snapshots, nil + return snapshots, err } // ResolveImage attempts to find a matching Identifier for the input string func (s *ScalewayAPI) ResolveImage(needle string) (ScalewayResolverResults, error) { - images := s.Cache.LookUpImages(needle, true) + images, err := s.Cache.LookUpImages(needle, true) + if err != nil { + return images, err + } if len(images) == 0 { - if _, err := s.GetImages(); err != nil { + if _, err = s.GetImages(); err != nil { return nil, err } - images = s.Cache.LookUpImages(needle, true) + images, err = s.Cache.LookUpImages(needle, true) } - return images, nil + return images, err } // ResolveBootscript attempts to find a matching Identifier for the input string func (s *ScalewayAPI) ResolveBootscript(needle string) (ScalewayResolverResults, error) { - bootscripts := s.Cache.LookUpBootscripts(needle, true) + bootscripts, err := s.Cache.LookUpBootscripts(needle, true) + if err != nil { + return bootscripts, err + } if len(bootscripts) == 0 { - if _, err := s.GetBootscripts(); err != nil { + if _, err = s.GetBootscripts(); err != nil { return nil, err } - bootscripts = s.Cache.LookUpBootscripts(needle, true) + bootscripts, err = s.Cache.LookUpBootscripts(needle, true) } - return bootscripts, nil + return bootscripts, err } // GetImages gets the list of images from the ScalewayAPI @@ -2154,7 +2179,7 @@ func (s *ScalewayAPI) DeleteSecurityGroup(securityGroupID string) error { } // PutSecurityGroup updates a SecurityGroup -func (s *ScalewayAPI) PutSecurityGroup(group ScalewayNewSecurityGroup, securityGroupID string) error { +func (s *ScalewayAPI) PutSecurityGroup(group ScalewayUpdateSecurityGroup, securityGroupID string) error { resp, err := s.PutResponse(ComputeAPI, fmt.Sprintf("security_groups/%s", securityGroupID), group) if resp != nil { defer resp.Body.Close() @@ -2313,6 +2338,24 @@ func (s *ScalewayAPI) AttachIP(ipID, serverID string) error { return err } +// DetachIP detaches an IP from a server +func (s *ScalewayAPI) DetachIP(ipID string) error { + ip, err := s.GetIP(ipID) + if err != nil { + return err + } + ip.IP.Server = nil + resp, err := s.PutResponse(ComputeAPI, fmt.Sprintf("ips/%s", ipID), ip.IP) + if resp != nil { + defer resp.Body.Close() + } + if err != nil { + return err + } + _, err = s.handleHTTPError([]int{200}, resp) + return err +} + // DeleteIP deletes an IP func (s *ScalewayAPI) DeleteIP(ipID string) error { resp, err := s.DeleteResponse(ComputeAPI, fmt.Sprintf("ips/%s", ipID)) @@ -2322,11 +2365,8 @@ func (s *ScalewayAPI) DeleteIP(ipID string) error { if err != nil { return err } - - if _, err := s.handleHTTPError([]int{204}, resp); err != nil { - return err - } - return nil + _, err = s.handleHTTPError([]int{204}, resp) + return err } // GetIP returns a ScalewayGetIP diff --git a/vendor/github.com/scaleway/scaleway-cli/pkg/api/cache.go b/vendor/github.com/scaleway/scaleway-cli/pkg/api/cache.go index 72d119059..529bdaaf2 100644 --- a/vendor/github.com/scaleway/scaleway-cli/pkg/api/cache.go +++ b/vendor/github.com/scaleway/scaleway-cli/pkg/api/cache.go @@ -8,7 +8,6 @@ import ( "encoding/json" "fmt" "io/ioutil" - "log" "os" "path/filepath" "regexp" @@ -60,6 +59,8 @@ type ScalewayCache struct { // Lock allows ScalewayCache to be used concurrently Lock sync.Mutex `json:"-"` + + hookSave func() } const ( @@ -92,16 +93,16 @@ type ScalewayResolverResult struct { type ScalewayResolverResults []ScalewayResolverResult // NewScalewayResolverResult returns a new ScalewayResolverResult -func NewScalewayResolverResult(Identifier, Name, Arch string, Type int) ScalewayResolverResult { +func NewScalewayResolverResult(Identifier, Name, Arch string, Type int) (ScalewayResolverResult, error) { if err := anonuuid.IsUUID(Identifier); err != nil { - log.Fatal(err) + return ScalewayResolverResult{}, err } return ScalewayResolverResult{ Identifier: Identifier, Type: Type, Name: Name, Arch: Arch, - } + }, nil } func (s ScalewayResolverResults) Len() int { @@ -160,7 +161,10 @@ REDO: } // NewScalewayCache loads a per-user cache -func NewScalewayCache() (*ScalewayCache, error) { +func NewScalewayCache(hookSave func()) (*ScalewayCache, error) { + var cache ScalewayCache + + cache.hookSave = hookSave homeDir := os.Getenv("HOME") // *nix if homeDir == "" { // Windows homeDir = os.Getenv("USERPROFILE") @@ -169,7 +173,6 @@ func NewScalewayCache() (*ScalewayCache, error) { homeDir = "/tmp" } cachePath := filepath.Join(homeDir, ".scw-cache.db") - var cache ScalewayCache cache.Path = cachePath _, err := os.Stat(cachePath) if os.IsNotExist(err) { @@ -210,13 +213,13 @@ func NewScalewayCache() (*ScalewayCache, error) { } // Clear removes all information from the cache -func (s *ScalewayCache) Clear() { - s.Images = make(map[string][CacheMaxfield]string) - s.Snapshots = make(map[string][CacheMaxfield]string) - s.Volumes = make(map[string][CacheMaxfield]string) - s.Bootscripts = make(map[string][CacheMaxfield]string) - s.Servers = make(map[string][CacheMaxfield]string) - s.Modified = true +func (c *ScalewayCache) Clear() { + c.Images = make(map[string][CacheMaxfield]string) + c.Snapshots = make(map[string][CacheMaxfield]string) + c.Volumes = make(map[string][CacheMaxfield]string) + c.Bootscripts = make(map[string][CacheMaxfield]string) + c.Servers = make(map[string][CacheMaxfield]string) + c.Modified = true } // Flush flushes the cache database @@ -229,8 +232,7 @@ func (c *ScalewayCache) Save() error { c.Lock.Lock() defer c.Lock.Unlock() - log.Printf("Writing cache file to disk") - + c.hookSave() if c.Modified { file, err := ioutil.TempFile(filepath.Dir(c.Path), filepath.Base(c.Path)) if err != nil { @@ -259,15 +261,19 @@ func (s *ScalewayResolverResult) ComputeRankMatch(needle string) { } // LookUpImages attempts to return identifiers matching a pattern -func (c *ScalewayCache) LookUpImages(needle string, acceptUUID bool) ScalewayResolverResults { +func (c *ScalewayCache) LookUpImages(needle string, acceptUUID bool) (ScalewayResolverResults, error) { c.Lock.Lock() defer c.Lock.Unlock() var res ScalewayResolverResults + var exactMatches ScalewayResolverResults if acceptUUID && anonuuid.IsUUID(needle) == nil { if fields, ok := c.Images[needle]; ok { - entry := NewScalewayResolverResult(needle, fields[CacheTitle], fields[CacheArch], IdentifierImage) + entry, err := NewScalewayResolverResult(needle, fields[CacheTitle], fields[CacheArch], IdentifierImage) + if err != nil { + return ScalewayResolverResults{}, err + } entry.ComputeRankMatch(needle) res = append(res, entry) } @@ -276,41 +282,53 @@ func (c *ScalewayCache) LookUpImages(needle string, acceptUUID bool) ScalewayRes needle = regexp.MustCompile(`^user/`).ReplaceAllString(needle, "") // FIXME: if 'user/' is in needle, only watch for a user image nameRegex := regexp.MustCompile(`(?i)` + regexp.MustCompile(`[_-]`).ReplaceAllString(needle, ".*")) - var exactMatches ScalewayResolverResults for identifier, fields := range c.Images { if fields[CacheTitle] == needle { - entry := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierImage) + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierImage) + if err != nil { + return ScalewayResolverResults{}, err + } entry.ComputeRankMatch(needle) exactMatches = append(exactMatches, entry) } if strings.HasPrefix(identifier, needle) || nameRegex.MatchString(fields[CacheTitle]) { - entry := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierImage) + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierImage) + if err != nil { + return ScalewayResolverResults{}, err + } entry.ComputeRankMatch(needle) res = append(res, entry) } else if strings.HasPrefix(fields[CacheMarketPlaceUUID], needle) || nameRegex.MatchString(fields[CacheMarketPlaceUUID]) { - entry := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierImage) + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierImage) + if err != nil { + return ScalewayResolverResults{}, err + } entry.ComputeRankMatch(needle) res = append(res, entry) } } if len(exactMatches) == 1 { - return exactMatches + return exactMatches, nil } - return removeDuplicatesResults(res) + return removeDuplicatesResults(res), nil } // LookUpSnapshots attempts to return identifiers matching a pattern -func (c *ScalewayCache) LookUpSnapshots(needle string, acceptUUID bool) ScalewayResolverResults { +func (c *ScalewayCache) LookUpSnapshots(needle string, acceptUUID bool) (ScalewayResolverResults, error) { c.Lock.Lock() defer c.Lock.Unlock() var res ScalewayResolverResults + var exactMatches ScalewayResolverResults if acceptUUID && anonuuid.IsUUID(needle) == nil { if fields, ok := c.Snapshots[needle]; ok { - entry := NewScalewayResolverResult(needle, fields[CacheTitle], fields[CacheArch], IdentifierSnapshot) + entry, err := NewScalewayResolverResult(needle, fields[CacheTitle], fields[CacheArch], IdentifierSnapshot) + if err != nil { + return ScalewayResolverResults{}, err + } entry.ComputeRankMatch(needle) res = append(res, entry) } @@ -318,136 +336,168 @@ func (c *ScalewayCache) LookUpSnapshots(needle string, acceptUUID bool) Scaleway needle = regexp.MustCompile(`^user/`).ReplaceAllString(needle, "") nameRegex := regexp.MustCompile(`(?i)` + regexp.MustCompile(`[_-]`).ReplaceAllString(needle, ".*")) - var exactMatches ScalewayResolverResults for identifier, fields := range c.Snapshots { if fields[CacheTitle] == needle { - entry := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierSnapshot) + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierSnapshot) + if err != nil { + return ScalewayResolverResults{}, err + } entry.ComputeRankMatch(needle) exactMatches = append(exactMatches, entry) } if strings.HasPrefix(identifier, needle) || nameRegex.MatchString(fields[CacheTitle]) { - entry := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierSnapshot) + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierSnapshot) + if err != nil { + return ScalewayResolverResults{}, err + } entry.ComputeRankMatch(needle) res = append(res, entry) } } if len(exactMatches) == 1 { - return exactMatches + return exactMatches, nil } - return removeDuplicatesResults(res) + return removeDuplicatesResults(res), nil } // LookUpVolumes attempts to return identifiers matching a pattern -func (c *ScalewayCache) LookUpVolumes(needle string, acceptUUID bool) ScalewayResolverResults { +func (c *ScalewayCache) LookUpVolumes(needle string, acceptUUID bool) (ScalewayResolverResults, error) { c.Lock.Lock() defer c.Lock.Unlock() var res ScalewayResolverResults + var exactMatches ScalewayResolverResults if acceptUUID && anonuuid.IsUUID(needle) == nil { if fields, ok := c.Volumes[needle]; ok { - entry := NewScalewayResolverResult(needle, fields[CacheTitle], fields[CacheArch], IdentifierVolume) + entry, err := NewScalewayResolverResult(needle, fields[CacheTitle], fields[CacheArch], IdentifierVolume) + if err != nil { + return ScalewayResolverResults{}, err + } entry.ComputeRankMatch(needle) res = append(res, entry) } } nameRegex := regexp.MustCompile(`(?i)` + regexp.MustCompile(`[_-]`).ReplaceAllString(needle, ".*")) - var exactMatches ScalewayResolverResults for identifier, fields := range c.Volumes { if fields[CacheTitle] == needle { - entry := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierVolume) + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierVolume) + if err != nil { + return ScalewayResolverResults{}, err + } entry.ComputeRankMatch(needle) exactMatches = append(exactMatches, entry) } if strings.HasPrefix(identifier, needle) || nameRegex.MatchString(fields[CacheTitle]) { - entry := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierVolume) + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierVolume) + if err != nil { + return ScalewayResolverResults{}, err + } entry.ComputeRankMatch(needle) res = append(res, entry) } } if len(exactMatches) == 1 { - return exactMatches + return exactMatches, nil } - return removeDuplicatesResults(res) + return removeDuplicatesResults(res), nil } // LookUpBootscripts attempts to return identifiers matching a pattern -func (c *ScalewayCache) LookUpBootscripts(needle string, acceptUUID bool) ScalewayResolverResults { +func (c *ScalewayCache) LookUpBootscripts(needle string, acceptUUID bool) (ScalewayResolverResults, error) { c.Lock.Lock() defer c.Lock.Unlock() var res ScalewayResolverResults + var exactMatches ScalewayResolverResults if acceptUUID && anonuuid.IsUUID(needle) == nil { if fields, ok := c.Bootscripts[needle]; ok { - entry := NewScalewayResolverResult(needle, fields[CacheTitle], fields[CacheArch], IdentifierBootscript) + entry, err := NewScalewayResolverResult(needle, fields[CacheTitle], fields[CacheArch], IdentifierBootscript) + if err != nil { + return ScalewayResolverResults{}, err + } entry.ComputeRankMatch(needle) res = append(res, entry) } } nameRegex := regexp.MustCompile(`(?i)` + regexp.MustCompile(`[_-]`).ReplaceAllString(needle, ".*")) - var exactMatches ScalewayResolverResults for identifier, fields := range c.Bootscripts { if fields[CacheTitle] == needle { - entry := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierBootscript) + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierBootscript) + if err != nil { + return ScalewayResolverResults{}, err + } entry.ComputeRankMatch(needle) exactMatches = append(exactMatches, entry) } if strings.HasPrefix(identifier, needle) || nameRegex.MatchString(fields[CacheTitle]) { - entry := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierBootscript) + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierBootscript) + if err != nil { + return ScalewayResolverResults{}, err + } entry.ComputeRankMatch(needle) res = append(res, entry) } } if len(exactMatches) == 1 { - return exactMatches + return exactMatches, nil } - return removeDuplicatesResults(res) + return removeDuplicatesResults(res), nil } // LookUpServers attempts to return identifiers matching a pattern -func (c *ScalewayCache) LookUpServers(needle string, acceptUUID bool) ScalewayResolverResults { +func (c *ScalewayCache) LookUpServers(needle string, acceptUUID bool) (ScalewayResolverResults, error) { c.Lock.Lock() defer c.Lock.Unlock() var res ScalewayResolverResults + var exactMatches ScalewayResolverResults if acceptUUID && anonuuid.IsUUID(needle) == nil { if fields, ok := c.Servers[needle]; ok { - entry := NewScalewayResolverResult(needle, fields[CacheTitle], fields[CacheArch], IdentifierServer) + entry, err := NewScalewayResolverResult(needle, fields[CacheTitle], fields[CacheArch], IdentifierServer) + if err != nil { + return ScalewayResolverResults{}, err + } entry.ComputeRankMatch(needle) res = append(res, entry) } } nameRegex := regexp.MustCompile(`(?i)` + regexp.MustCompile(`[_-]`).ReplaceAllString(needle, ".*")) - var exactMatches ScalewayResolverResults for identifier, fields := range c.Servers { if fields[CacheTitle] == needle { - entry := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierServer) + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierServer) + if err != nil { + return ScalewayResolverResults{}, err + } entry.ComputeRankMatch(needle) exactMatches = append(exactMatches, entry) } if strings.HasPrefix(identifier, needle) || nameRegex.MatchString(fields[CacheTitle]) { - entry := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierServer) + entry, err := NewScalewayResolverResult(identifier, fields[CacheTitle], fields[CacheArch], IdentifierServer) + if err != nil { + return ScalewayResolverResults{}, err + } entry.ComputeRankMatch(needle) res = append(res, entry) } } if len(exactMatches) == 1 { - return exactMatches + return exactMatches, nil } - return removeDuplicatesResults(res) + return removeDuplicatesResults(res), nil } // removeDuplicatesResults transforms an array into a unique array @@ -492,52 +542,86 @@ func parseNeedle(input string) (identifierType int, needle string) { } // LookUpIdentifiers attempts to return identifiers matching a pattern -func (c *ScalewayCache) LookUpIdentifiers(needle string) ScalewayResolverResults { +func (c *ScalewayCache) LookUpIdentifiers(needle string) (ScalewayResolverResults, error) { results := ScalewayResolverResults{} identifierType, needle := parseNeedle(needle) if identifierType&(IdentifierUnknown|IdentifierServer) > 0 { - for _, result := range c.LookUpServers(needle, false) { - entry := NewScalewayResolverResult(result.Identifier, result.Name, result.Arch, IdentifierServer) + servers, err := c.LookUpServers(needle, false) + if err != nil { + return ScalewayResolverResults{}, err + } + for _, result := range servers { + entry, err := NewScalewayResolverResult(result.Identifier, result.Name, result.Arch, IdentifierServer) + if err != nil { + return ScalewayResolverResults{}, err + } entry.ComputeRankMatch(needle) results = append(results, entry) } } if identifierType&(IdentifierUnknown|IdentifierImage) > 0 { - for _, result := range c.LookUpImages(needle, false) { - entry := NewScalewayResolverResult(result.Identifier, result.Name, result.Arch, IdentifierImage) + images, err := c.LookUpImages(needle, false) + if err != nil { + return ScalewayResolverResults{}, err + } + for _, result := range images { + entry, err := NewScalewayResolverResult(result.Identifier, result.Name, result.Arch, IdentifierImage) + if err != nil { + return ScalewayResolverResults{}, err + } entry.ComputeRankMatch(needle) results = append(results, entry) } } if identifierType&(IdentifierUnknown|IdentifierSnapshot) > 0 { - for _, result := range c.LookUpSnapshots(needle, false) { - entry := NewScalewayResolverResult(result.Identifier, result.Name, result.Arch, IdentifierSnapshot) + snapshots, err := c.LookUpSnapshots(needle, false) + if err != nil { + return ScalewayResolverResults{}, err + } + for _, result := range snapshots { + entry, err := NewScalewayResolverResult(result.Identifier, result.Name, result.Arch, IdentifierSnapshot) + if err != nil { + return ScalewayResolverResults{}, err + } entry.ComputeRankMatch(needle) results = append(results, entry) } } if identifierType&(IdentifierUnknown|IdentifierVolume) > 0 { - for _, result := range c.LookUpVolumes(needle, false) { - entry := NewScalewayResolverResult(result.Identifier, result.Name, result.Arch, IdentifierVolume) + volumes, err := c.LookUpVolumes(needle, false) + if err != nil { + return ScalewayResolverResults{}, err + } + for _, result := range volumes { + entry, err := NewScalewayResolverResult(result.Identifier, result.Name, result.Arch, IdentifierVolume) + if err != nil { + return ScalewayResolverResults{}, err + } entry.ComputeRankMatch(needle) results = append(results, entry) } } if identifierType&(IdentifierUnknown|IdentifierBootscript) > 0 { - for _, result := range c.LookUpBootscripts(needle, false) { - entry := NewScalewayResolverResult(result.Identifier, result.Name, result.Arch, IdentifierBootscript) + bootscripts, err := c.LookUpBootscripts(needle, false) + if err != nil { + return ScalewayResolverResults{}, err + } + for _, result := range bootscripts { + entry, err := NewScalewayResolverResult(result.Identifier, result.Name, result.Arch, IdentifierBootscript) + if err != nil { + return ScalewayResolverResults{}, err + } entry.ComputeRankMatch(needle) results = append(results, entry) } } - - return results + return results, nil } // InsertServer registers a server in the cache diff --git a/vendor/github.com/scaleway/scaleway-cli/pkg/api/logger.go b/vendor/github.com/scaleway/scaleway-cli/pkg/api/logger.go index d14a59dcb..58ad93716 100644 --- a/vendor/github.com/scaleway/scaleway-cli/pkg/api/logger.go +++ b/vendor/github.com/scaleway/scaleway-cli/pkg/api/logger.go @@ -32,18 +32,46 @@ type defaultLogger struct { } func (l *defaultLogger) LogHTTP(r *http.Request) { - l.Printf("%s %s\n", r.Method, r.URL.Path) + l.Printf("%s %s\n", r.Method, r.URL.RawPath) } + func (l *defaultLogger) Fatalf(format string, v ...interface{}) { l.Printf("[FATAL] %s\n", fmt.Sprintf(format, v)) os.Exit(1) } + func (l *defaultLogger) Debugf(format string, v ...interface{}) { l.Printf("[DEBUG] %s\n", fmt.Sprintf(format, v)) } + func (l *defaultLogger) Infof(format string, v ...interface{}) { l.Printf("[INFO ] %s\n", fmt.Sprintf(format, v)) } + func (l *defaultLogger) Warnf(format string, v ...interface{}) { l.Printf("[WARN ] %s\n", fmt.Sprintf(format, v)) } + +type disableLogger struct { +} + +// NewDisableLogger returns a logger which is configured to do nothing +func NewDisableLogger() Logger { + return &disableLogger{} +} + +func (d *disableLogger) LogHTTP(r *http.Request) { +} + +func (d *disableLogger) Fatalf(format string, v ...interface{}) { + panic(fmt.Sprintf(format, v)) +} + +func (d *disableLogger) Debugf(format string, v ...interface{}) { +} + +func (d *disableLogger) Infof(format string, v ...interface{}) { +} + +func (d *disableLogger) Warnf(format string, v ...interface{}) { +} diff --git a/website/source/docs/providers/scaleway/r/server.html.markdown b/website/source/docs/providers/scaleway/r/server.html.markdown index 0d6008bab..d03f70df5 100644 --- a/website/source/docs/providers/scaleway/r/server.html.markdown +++ b/website/source/docs/providers/scaleway/r/server.html.markdown @@ -28,11 +28,18 @@ The following arguments are supported: * `name` - (Required) name of ARM server * `image` - (Required) base image of ARM server * `type` - (Required) type of ARM server +* `bootscript` - (Optional) server bootscript +* `tags` - (Optional) list of tags for server +* `enable_ipv6` - (Optional) enable ipv6 +* `dynamic_ip_required` - (Optional) make server publicly available +* `security_group` - (Optional) assign security group to server -Field `name`, `type` are editable. +Field `name`, `type`, `tags`, `dynamic_ip_required`, `security_group` are editable. ## Attributes Reference The following attributes are exported: * `id` - id of the new resource +* `private_ip` - private ip of the new resource +* `public_ip` - public ip of the new resource From 7de54533f901cd575bcf8a04382742c74348bdd4 Mon Sep 17 00:00:00 2001 From: Partha Dutta Date: Mon, 25 Jul 2016 13:25:36 +0100 Subject: [PATCH 0393/1238] Support greater than twenty db parameters (#7364) * aws_db_parameter_group: Support more than 20 parameters in a single update * create test to prove greater than 20 database parameters can be processed * update test to prove updating greater than 20 database parameters can be processed * Issues with certain key value database parameters Cannot create a passing test for database parameters "innodb_file_per_table" and "binlog_format" It seems that these parameters can be created and tested successfully BUT after the "parameter group" has been destroyed, it then makes a "DescribeDBParameterGroups" call This fails with a 404 error...makes sense since the group does not exist Have very little understanding of how the test framework works, so am struggling to debug Currently commented out to have a passing test * reorder create database parameter group dataset * reorder update database parameter group dataset * typo: excede => exceed * add one extra database parameter; now it is 41 in total * added test for additonal database parameter added in previous commit * remove commented out database parameters from test --- .../aws/resource_aws_db_parameter_group.go | 29 +- .../resource_aws_db_parameter_group_test.go | 301 ++++++++++++++++++ 2 files changed, 321 insertions(+), 9 deletions(-) diff --git a/builtin/providers/aws/resource_aws_db_parameter_group.go b/builtin/providers/aws/resource_aws_db_parameter_group.go index 5f450a292..67ada3048 100644 --- a/builtin/providers/aws/resource_aws_db_parameter_group.go +++ b/builtin/providers/aws/resource_aws_db_parameter_group.go @@ -201,18 +201,29 @@ func resourceAwsDbParameterGroupUpdate(d *schema.ResourceData, meta interface{}) } if len(parameters) > 0 { - modifyOpts := rds.ModifyDBParameterGroupInput{ - DBParameterGroupName: aws.String(d.Get("name").(string)), - Parameters: parameters, - } + // We can only modify 20 parameters at a time, so walk them until + // we've got them all. + maxParams := 20 + for parameters != nil { + paramsToModify := make([]*rds.Parameter, 0) + if len(parameters) <= maxParams { + paramsToModify, parameters = parameters[:], nil + } else { + paramsToModify, parameters = parameters[:maxParams], parameters[maxParams:] + } + modifyOpts := rds.ModifyDBParameterGroupInput{ + DBParameterGroupName: aws.String(d.Get("name").(string)), + Parameters: paramsToModify, + } - log.Printf("[DEBUG] Modify DB Parameter Group: %s", modifyOpts) - _, err = rdsconn.ModifyDBParameterGroup(&modifyOpts) - if err != nil { - return fmt.Errorf("Error modifying DB Parameter Group: %s", err) + log.Printf("[DEBUG] Modify DB Parameter Group: %s", modifyOpts) + _, err = rdsconn.ModifyDBParameterGroup(&modifyOpts) + if err != nil { + return fmt.Errorf("Error modifying DB Parameter Group: %s", err) + } } + d.SetPartial("parameter") } - d.SetPartial("parameter") } if arn, err := buildRDSPGARN(d, meta); err == nil { diff --git a/builtin/providers/aws/resource_aws_db_parameter_group_test.go b/builtin/providers/aws/resource_aws_db_parameter_group_test.go index 26c0e574d..17a11c741 100644 --- a/builtin/providers/aws/resource_aws_db_parameter_group_test.go +++ b/builtin/providers/aws/resource_aws_db_parameter_group_test.go @@ -14,6 +14,206 @@ import ( "github.com/hashicorp/terraform/terraform" ) +func TestAccAWSDBParameterGroup_limit(t *testing.T) { + var v rds.DBParameterGroup + + groupName := fmt.Sprintf("parameter-group-test-terraform-%d", acctest.RandInt()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSDBParameterGroupDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: createAwsDbParameterGroupsExceedDefaultAwsLimit(groupName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSDBParameterGroupExists("aws_db_parameter_group.large", &v), + testAccCheckAWSDBParameterGroupAttributes(&v, groupName), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "name", groupName), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "family", "mysql5.6"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "description", "RDS default parameter group: Exceed default AWS parameter group limit of twenty"), + + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2421266705.name", "character_set_server"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2421266705.value", "utf8"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2478663599.name", "character_set_client"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2478663599.value", "utf8"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1680942586.name", "collation_server"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1680942586.value", "utf8_general_ci"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2450940716.name", "collation_connection"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2450940716.value", "utf8_general_ci"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.242489837.name", "join_buffer_size"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.242489837.value", "16777216"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2026669454.name", "key_buffer_size"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2026669454.value", "67108864"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2705275319.name", "max_connections"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2705275319.value", "3200"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3512697936.name", "max_heap_table_size"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3512697936.value", "67108864"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.780730667.name", "performance_schema"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.780730667.value", "1"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2020346918.name", "performance_schema_users_size"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2020346918.value", "1048576"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1460834103.name", "query_cache_limit"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1460834103.value", "2097152"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.484865451.name", "query_cache_size"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.484865451.value", "67108864"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.255276438.name", "sort_buffer_size"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.255276438.value", "16777216"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2981725119.name", "table_open_cache"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2981725119.value", "4096"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2703661820.name", "tmp_table_size"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2703661820.value", "67108864"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2386583229.name", "binlog_cache_size"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2386583229.value", "131072"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.4012389720.name", "innodb_flush_log_at_trx_commit"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.4012389720.value", "0"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2688783017.name", "innodb_open_files"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2688783017.value", "4000"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.782983977.name", "innodb_read_io_threads"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.782983977.value", "64"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2809980413.name", "innodb_thread_concurrency"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2809980413.value", "0"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3599115250.name", "innodb_write_io_threads"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3599115250.value", "64"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2557156277.name", "character_set_connection"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2557156277.value", "utf8"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2475346812.name", "character_set_database"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2475346812.value", "utf8"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1986528518.name", "character_set_filesystem"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1986528518.value", "utf8"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1708034931.name", "character_set_results"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1708034931.value", "utf8"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1937131004.name", "event_scheduler"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1937131004.value", "on"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3437079877.name", "innodb_buffer_pool_dump_at_shutdown"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3437079877.value", "1"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1092112861.name", "innodb_file_format"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1092112861.value", "barracuda"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.615571931.name", "innodb_io_capacity"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.615571931.value", "2000"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1065962799.name", "innodb_io_capacity_max"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1065962799.value", "3000"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1411161182.name", "innodb_lock_wait_timeout"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1411161182.value", "120"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3133315879.name", "innodb_max_dirty_pages_pct"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3133315879.value", "90"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.950177639.name", "log_bin_trust_function_creators"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.950177639.value", "1"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.591700516.name", "log_warnings"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.591700516.value", "2"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1918306725.name", "log_output"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1918306725.value", "file"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.386204433.name", "max_allowed_packet"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.386204433.value", "1073741824"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1700901269.name", "max_connect_errors"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1700901269.value", "100"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2839701698.name", "query_cache_min_res_unit"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2839701698.value", "512"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.427634017.name", "slow_query_log"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.427634017.value", "1"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.881816039.name", "sync_binlog"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.881816039.value", "0"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.748684209.name", "tx_isolation"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.748684209.value", "repeatable-read"), + ), + }, + resource.TestStep{ + Config: updateAwsDbParameterGroupsExceedDefaultAwsLimit(groupName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSDBParameterGroupExists("aws_db_parameter_group.large", &v), + testAccCheckAWSDBParameterGroupAttributes(&v, groupName), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "name", groupName), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "family", "mysql5.6"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "description", "Updated RDS default parameter group: Exceed default AWS parameter group limit of twenty"), + + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2421266705.name", "character_set_server"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2421266705.value", "utf8"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2478663599.name", "character_set_client"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2478663599.value", "utf8"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1680942586.name", "collation_server"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1680942586.value", "utf8_general_ci"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2450940716.name", "collation_connection"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2450940716.value", "utf8_general_ci"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.242489837.name", "join_buffer_size"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.242489837.value", "16777216"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2026669454.name", "key_buffer_size"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2026669454.value", "67108864"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2705275319.name", "max_connections"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2705275319.value", "3200"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3512697936.name", "max_heap_table_size"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3512697936.value", "67108864"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.780730667.name", "performance_schema"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.780730667.value", "1"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2020346918.name", "performance_schema_users_size"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2020346918.value", "1048576"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1460834103.name", "query_cache_limit"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1460834103.value", "2097152"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.484865451.name", "query_cache_size"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.484865451.value", "67108864"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.255276438.name", "sort_buffer_size"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.255276438.value", "16777216"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2981725119.name", "table_open_cache"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2981725119.value", "4096"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2703661820.name", "tmp_table_size"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2703661820.value", "67108864"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2386583229.name", "binlog_cache_size"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2386583229.value", "131072"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.4012389720.name", "innodb_flush_log_at_trx_commit"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.4012389720.value", "0"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2688783017.name", "innodb_open_files"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2688783017.value", "4000"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.782983977.name", "innodb_read_io_threads"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.782983977.value", "64"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2809980413.name", "innodb_thread_concurrency"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2809980413.value", "0"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3599115250.name", "innodb_write_io_threads"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3599115250.value", "64"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2557156277.name", "character_set_connection"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2557156277.value", "utf8"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2475346812.name", "character_set_database"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2475346812.value", "utf8"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1986528518.name", "character_set_filesystem"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1986528518.value", "utf8"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1708034931.name", "character_set_results"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1708034931.value", "utf8"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1937131004.name", "event_scheduler"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1937131004.value", "on"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3437079877.name", "innodb_buffer_pool_dump_at_shutdown"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3437079877.value", "1"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1092112861.name", "innodb_file_format"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1092112861.value", "barracuda"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.615571931.name", "innodb_io_capacity"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.615571931.value", "2000"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1065962799.name", "innodb_io_capacity_max"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1065962799.value", "3000"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1411161182.name", "innodb_lock_wait_timeout"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1411161182.value", "120"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3133315879.name", "innodb_max_dirty_pages_pct"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.3133315879.value", "90"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.950177639.name", "log_bin_trust_function_creators"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.950177639.value", "1"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.591700516.name", "log_warnings"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.591700516.value", "2"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1918306725.name", "log_output"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1918306725.value", "file"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.386204433.name", "max_allowed_packet"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.386204433.value", "1073741824"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1700901269.name", "max_connect_errors"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.1700901269.value", "100"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2839701698.name", "query_cache_min_res_unit"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.2839701698.value", "512"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.427634017.name", "slow_query_log"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.427634017.value", "1"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.881816039.name", "sync_binlog"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.881816039.value", "0"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.748684209.name", "tx_isolation"), + resource.TestCheckResourceAttr("aws_db_parameter_group.large", "parameter.748684209.value", "repeatable-read"), + ), + }, + }, + }) +} + func TestAccAWSDBParameterGroup_basic(t *testing.T) { var v rds.DBParameterGroup @@ -311,3 +511,104 @@ resource "aws_db_parameter_group" "bar" { description = "Test parameter group for terraform" }`, n) } + +func createAwsDbParameterGroupsExceedDefaultAwsLimit(n string) string { + return fmt.Sprintf(` +resource "aws_db_parameter_group" "large" { + name = "%s" + family = "mysql5.6" + description = "RDS default parameter group: Exceed default AWS parameter group limit of twenty" + + parameter { name = "binlog_cache_size" value = 131072 } + parameter { name = "character_set_client" value = "utf8" } + parameter { name = "character_set_connection" value = "utf8" } + parameter { name = "character_set_database" value = "utf8" } + parameter { name = "character_set_filesystem" value = "utf8" } + parameter { name = "character_set_results" value = "utf8" } + parameter { name = "character_set_server" value = "utf8" } + parameter { name = "collation_connection" value = "utf8_general_ci" } + parameter { name = "collation_server" value = "utf8_general_ci" } + parameter { name = "event_scheduler" value = "ON" } + parameter { name = "innodb_buffer_pool_dump_at_shutdown" value = 1 } + parameter { name = "innodb_file_format" value = "Barracuda" } + parameter { name = "innodb_flush_log_at_trx_commit" value = 0 } + parameter { name = "innodb_io_capacity" value = 2000 } + parameter { name = "innodb_io_capacity_max" value = 3000 } + parameter { name = "innodb_lock_wait_timeout" value = 120 } + parameter { name = "innodb_max_dirty_pages_pct" value = 90 } + parameter { name = "innodb_open_files" value = 4000 apply_method = "pending-reboot" } + parameter { name = "innodb_read_io_threads" value = 64 apply_method = "pending-reboot" } + parameter { name = "innodb_thread_concurrency" value = 0 } + parameter { name = "innodb_write_io_threads" value = 64 apply_method = "pending-reboot" } + parameter { name = "join_buffer_size" value = 16777216 } + parameter { name = "key_buffer_size" value = 67108864 } + parameter { name = "log_bin_trust_function_creators" value = 1 } + parameter { name = "log_warnings" value = 2 } + parameter { name = "log_output" value = "FILE" } + parameter { name = "max_allowed_packet" value = 1073741824 } + parameter { name = "max_connect_errors" value = 100 } + parameter { name = "max_connections" value = 3200 } + parameter { name = "max_heap_table_size" value = 67108864 } + parameter { name = "performance_schema" value = 1 apply_method = "pending-reboot" } + parameter { name = "performance_schema_users_size" value = 1048576 apply_method = "pending-reboot" } + parameter { name = "query_cache_limit" value = 2097152 } + parameter { name = "query_cache_min_res_unit" value = 512 } + parameter { name = "query_cache_size" value = 67108864 } + parameter { name = "slow_query_log" value = 1 } + parameter { name = "sort_buffer_size" value = 16777216 } + parameter { name = "sync_binlog" value = 0 } + parameter { name = "table_open_cache" value = 4096 } + parameter { name = "tmp_table_size" value = 67108864 } + parameter { name = "tx_isolation" value = "REPEATABLE-READ" } +}`, n) +} + +func updateAwsDbParameterGroupsExceedDefaultAwsLimit(n string) string { + return fmt.Sprintf(` +resource "aws_db_parameter_group" "large" { + name = "%s" + family = "mysql5.6" + description = "Updated RDS default parameter group: Exceed default AWS parameter group limit of twenty" + parameter { name = "binlog_cache_size" value = 131072 } + parameter { name = "character_set_client" value = "utf8" } + parameter { name = "character_set_connection" value = "utf8" } + parameter { name = "character_set_database" value = "utf8" } + parameter { name = "character_set_filesystem" value = "utf8" } + parameter { name = "character_set_results" value = "utf8" } + parameter { name = "character_set_server" value = "utf8" } + parameter { name = "collation_connection" value = "utf8_general_ci" } + parameter { name = "collation_server" value = "utf8_general_ci" } + parameter { name = "event_scheduler" value = "ON" } + parameter { name = "innodb_buffer_pool_dump_at_shutdown" value = 1 } + parameter { name = "innodb_file_format" value = "Barracuda" } + parameter { name = "innodb_flush_log_at_trx_commit" value = 0 } + parameter { name = "innodb_io_capacity" value = 2000 } + parameter { name = "innodb_io_capacity_max" value = 3000 } + parameter { name = "innodb_lock_wait_timeout" value = 120 } + parameter { name = "innodb_max_dirty_pages_pct" value = 90 } + parameter { name = "innodb_open_files" value = 4000 apply_method = "pending-reboot" } + parameter { name = "innodb_read_io_threads" value = 64 apply_method = "pending-reboot" } + parameter { name = "innodb_thread_concurrency" value = 0 } + parameter { name = "innodb_write_io_threads" value = 64 apply_method = "pending-reboot" } + parameter { name = "join_buffer_size" value = 16777216 } + parameter { name = "key_buffer_size" value = 67108864 } + parameter { name = "log_bin_trust_function_creators" value = 1 } + parameter { name = "log_warnings" value = 2 } + parameter { name = "log_output" value = "FILE" } + parameter { name = "max_allowed_packet" value = 1073741824 } + parameter { name = "max_connect_errors" value = 100 } + parameter { name = "max_connections" value = 3200 } + parameter { name = "max_heap_table_size" value = 67108864 } + parameter { name = "performance_schema" value = 1 apply_method = "pending-reboot" } + parameter { name = "performance_schema_users_size" value = 1048576 apply_method = "pending-reboot" } + parameter { name = "query_cache_limit" value = 2097152 } + parameter { name = "query_cache_min_res_unit" value = 512 } + parameter { name = "query_cache_size" value = 67108864 } + parameter { name = "slow_query_log" value = 1 } + parameter { name = "sort_buffer_size" value = 16777216 } + parameter { name = "sync_binlog" value = 0 } + parameter { name = "table_open_cache" value = 4096 } + parameter { name = "tmp_table_size" value = 67108864 } + parameter { name = "tx_isolation" value = "REPEATABLE-READ" } +}`, n) +} From e711912fafce4ebd29896204f50f3ae531b68bb4 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 25 Jul 2016 13:26:39 +0100 Subject: [PATCH 0394/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8dd699a49..71b8bb17e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -124,6 +124,7 @@ IMPROVEMENTS: * provider/aws: Support kms_key_id for `aws_rds_cluster` [GH-7662] * provider/aws: Allow setting a `poll_interval` on `aws_elastic_beanstalk_environment` [GH-7523] * provider/aws: Add support for Kinesis streams shard-level metrics [GH-7684] + * provider/aws: Support create / update greater than twenty db parameters in `aws_db_parameter_group` [GH-7364] * provider/azurerm: Add support for EnableIPForwarding to `azurerm_network_interface` [GH-6807] * provider/azurerm: Add support for exporting the `azurerm_storage_account` access keys [GH-6742] * provider/azurerm: The Azure SDK now exposes better error messages [GH-6976] From 31b8cde45cdf61294c5b84d82d34e121f021b661 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 25 Jul 2016 15:31:00 +0100 Subject: [PATCH 0395/1238] provider/aws: Support Import `aws_dynamodb_table` (#7352) There were some changes required to the Read func to get this working. The initial set of tests showed the following: ``` testing.go:255: Step 1 error: ImportStateVerify attributes not equivalent. Difference is shown below. Top is actual, bottom is expected. (map[string]string) { } (map[string]string) (len=8) { (string) (len=8) "hash_key": (string) (len=16) "TestTableHashKey", (string) (len=23) "local_secondary_index.#": (string) (len=1) "1", (string) (len=36) "local_secondary_index.884610231.name": (string) (len=12) "TestTableLSI", (string) (len=52) "local_secondary_index.884610231.non_key_attributes.#": (string) (len=1) "0", (string) (len=47) "local_secondary_index.884610231.projection_type": (string) (len=3) "ALL", (string) (len=41) "local_secondary_index.884610231.range_key": (string) (len=15) "TestLSIRangeKey", (string) (len=4) "name": (string) (len=38) "TerraformTestTable-2710929679033484576", (string) (len=9) "range_key": (string) (len=17) "TestTableRangeKey" } ``` On investigation, this was telling me that `hash_key`, `range_key`, `name` and `local_secondary_index` were not being set on the Read func When they were being set, all looks as expected: ``` make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSDynamoDbTable_' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSDynamoDbTable_ -timeout 120m === RUN TestAccAWSDynamoDbTable_importBasic --- PASS: TestAccAWSDynamoDbTable_importBasic (20.39s) === RUN TestAccAWSDynamoDbTable_basic --- PASS: TestAccAWSDynamoDbTable_basic (39.99s) === RUN TestAccAWSDynamoDbTable_streamSpecification --- PASS: TestAccAWSDynamoDbTable_streamSpecification (50.44s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 110.841s ``` --- .../aws/import_aws_dynamodb_table_test.go | 28 +++++++++++++ .../aws/resource_aws_dynamodb_table.go | 41 +++++++++++++++++++ 2 files changed, 69 insertions(+) create mode 100644 builtin/providers/aws/import_aws_dynamodb_table_test.go diff --git a/builtin/providers/aws/import_aws_dynamodb_table_test.go b/builtin/providers/aws/import_aws_dynamodb_table_test.go new file mode 100644 index 000000000..49da4b0eb --- /dev/null +++ b/builtin/providers/aws/import_aws_dynamodb_table_test.go @@ -0,0 +1,28 @@ +package aws + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAWSDynamoDbTable_importBasic(t *testing.T) { + resourceName := "aws_dynamodb_table.basic-dynamodb-table" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSDynamoDbTableDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSDynamoDbConfigInitialState(), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/aws/resource_aws_dynamodb_table.go b/builtin/providers/aws/resource_aws_dynamodb_table.go index 88c583853..04515b27f 100644 --- a/builtin/providers/aws/resource_aws_dynamodb_table.go +++ b/builtin/providers/aws/resource_aws_dynamodb_table.go @@ -34,6 +34,9 @@ func resourceAwsDynamoDbTable() *schema.Resource { Read: resourceAwsDynamoDbTableRead, Update: resourceAwsDynamoDbTableUpdate, Delete: resourceAwsDynamoDbTableDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "arn": &schema.Schema{ @@ -601,6 +604,44 @@ func resourceAwsDynamoDbTableRead(d *schema.ResourceData, meta interface{}) erro } d.Set("attribute", attributes) + d.Set("name", table.TableName) + + for _, attribute := range table.KeySchema { + if *attribute.KeyType == "HASH" { + d.Set("hash_key", attribute.AttributeName) + } + + if *attribute.KeyType == "RANGE" { + d.Set("range_key", attribute.AttributeName) + } + } + + lsiList := make([]map[string]interface{}, 0, len(table.LocalSecondaryIndexes)) + for _, lsiObject := range table.LocalSecondaryIndexes { + lsi := map[string]interface{}{ + "name": *lsiObject.IndexName, + "projection_type": *lsiObject.Projection.ProjectionType, + } + + for _, attribute := range lsiObject.KeySchema { + + if *attribute.KeyType == "RANGE" { + lsi["range_key"] = *attribute.AttributeName + } + } + nkaList := make([]string, len(lsiObject.Projection.NonKeyAttributes)) + for _, nka := range lsiObject.Projection.NonKeyAttributes { + nkaList = append(nkaList, *nka) + } + lsi["non_key_attributes"] = nkaList + + lsiList = append(lsiList, lsi) + } + + err = d.Set("local_secondary_index", lsiList) + if err != nil { + return err + } gsiList := make([]map[string]interface{}, 0, len(table.GlobalSecondaryIndexes)) for _, gsiObject := range table.GlobalSecondaryIndexes { From 1c13cc994b593496a045437453d2280cf03384c4 Mon Sep 17 00:00:00 2001 From: Peter McAtominey Date: Mon, 25 Jul 2016 17:57:02 +0100 Subject: [PATCH 0396/1238] provider/azurerm: add option to delete VMs Data disks on termination (#7793) ``` TF_ACC=1 go test ./builtin/providers/azurerm -v -run TestAccAzureRMVirtualMachine_deleteVHD -timeout 120m === RUN TestAccAzureRMVirtualMachine_deleteVHDOptOut --- PASS: TestAccAzureRMVirtualMachine_deleteVHDOptOut (621.84s) === RUN TestAccAzureRMVirtualMachine_deleteVHDOptIn --- PASS: TestAccAzureRMVirtualMachine_deleteVHDOptIn (623.95s) PASS ok github.com/hashicorp/terraform/builtin/providers/azurerm 1245.930s ``` --- .../azurerm/resource_arm_virtual_machine.go | 52 +++++++++++++++---- .../resource_arm_virtual_machine_test.go | 36 +++++++++---- .../azurerm/r/virtual_machine.html.markdown | 1 + 3 files changed, 69 insertions(+), 20 deletions(-) diff --git a/builtin/providers/azurerm/resource_arm_virtual_machine.go b/builtin/providers/azurerm/resource_arm_virtual_machine.go index b2121df8a..dfd9f6d90 100644 --- a/builtin/providers/azurerm/resource_arm_virtual_machine.go +++ b/builtin/providers/azurerm/resource_arm_virtual_machine.go @@ -206,6 +206,12 @@ func resourceArmVirtualMachine() *schema.Resource { }, }, + "delete_data_disks_on_termination": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "os_profile": { Type: schema.TypeSet, Required: true, @@ -561,19 +567,43 @@ func resourceArmVirtualMachineDelete(d *schema.ResourceData, meta interface{}) e return err } - if deleteOsDisk := d.Get("delete_os_disk_on_termination").(bool); !deleteOsDisk { - log.Printf("[INFO] delete_os_disk_on_termination is false, skipping delete") - return nil + // delete OS Disk if opted in + if deleteOsDisk := d.Get("delete_os_disk_on_termination").(bool); deleteOsDisk { + log.Printf("[INFO] delete_os_disk_on_termination is enabled, deleting") + + osDisk, err := expandAzureRmVirtualMachineOsDisk(d) + if err != nil { + return fmt.Errorf("Error expanding OS Disk: %s", err) + } + + if err = resourceArmVirtualMachineDeleteVhd(*osDisk.Vhd.URI, resGroup, meta); err != nil { + return fmt.Errorf("Error deleting OS Disk VHD: %s", err) + } } - osDisk, err := expandAzureRmVirtualMachineOsDisk(d) - if err != nil { - return fmt.Errorf("Error expanding OS Disk") + // delete Data disks if opted in + if deleteDataDisks := d.Get("delete_data_disks_on_termination").(bool); deleteDataDisks { + log.Printf("[INFO] delete_data_disks_on_termination is enabled, deleting each data disk") + + disks, err := expandAzureRmVirtualMachineDataDisk(d) + if err != nil { + return fmt.Errorf("Error expanding Data Disks: %s", err) + } + + for _, disk := range disks { + if err = resourceArmVirtualMachineDeleteVhd(*disk.Vhd.URI, resGroup, meta); err != nil { + return fmt.Errorf("Error deleting Data Disk VHD: %s", err) + } + } } - vhdURL, err := url.Parse(*osDisk.Vhd.URI) + return nil +} + +func resourceArmVirtualMachineDeleteVhd(uri, resGroup string, meta interface{}) error { + vhdURL, err := url.Parse(uri) if err != nil { - return fmt.Errorf("Cannot parse OS Disk VHD URI: %s", err) + return fmt.Errorf("Cannot parse Disk VHD URI: %s", err) } // VHD URI is in the form: https://storageAccountName.blob.core.windows.net/containerName/blobName @@ -582,12 +612,12 @@ func resourceArmVirtualMachineDelete(d *schema.ResourceData, meta interface{}) e containerName := path[0] blobName := path[1] - blobClient, storageAccountExists, err := meta.(*ArmClient).getBlobStorageClientForStorageAccount(id.ResourceGroup, storageAccountName) + blobClient, saExists, err := meta.(*ArmClient).getBlobStorageClientForStorageAccount(resGroup, storageAccountName) if err != nil { - return fmt.Errorf("Error creating blob store account for VHD deletion: %s", err) + return fmt.Errorf("Error creating blob store client for VHD deletion: %s", err) } - if !storageAccountExists { + if !saExists { log.Printf("[INFO] Storage Account %q doesn't exist so the VHD blob won't exist", storageAccountName) return nil } diff --git a/builtin/providers/azurerm/resource_arm_virtual_machine_test.go b/builtin/providers/azurerm/resource_arm_virtual_machine_test.go index 39a138577..91418877c 100644 --- a/builtin/providers/azurerm/resource_arm_virtual_machine_test.go +++ b/builtin/providers/azurerm/resource_arm_virtual_machine_test.go @@ -181,7 +181,7 @@ func TestAccAzureRMVirtualMachine_winRMConfig(t *testing.T) { func TestAccAzureRMVirtualMachine_deleteVHDOptOut(t *testing.T) { var vm compute.VirtualMachine ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMVirtualMachine_basicLinuxMachine, ri, ri, ri, ri, ri, ri, ri) + preConfig := fmt.Sprintf(testAccAzureRMVirtualMachine_withDataDisk, ri, ri, ri, ri, ri, ri, ri) postConfig := fmt.Sprintf(testAccAzureRMVirtualMachine_basicLinuxMachineDeleteVM, ri, ri, ri, ri, ri) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -196,7 +196,10 @@ func TestAccAzureRMVirtualMachine_deleteVHDOptOut(t *testing.T) { }, { Config: postConfig, - Check: testCheckAzureRMVirtualMachineOSDiskVHDExistance(true), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMVirtualMachineVHDExistance("myosdisk1.vhd", true), + testCheckAzureRMVirtualMachineVHDExistance("mydatadisk1.vhd", true), + ), }, }, }) @@ -205,7 +208,7 @@ func TestAccAzureRMVirtualMachine_deleteVHDOptOut(t *testing.T) { func TestAccAzureRMVirtualMachine_deleteVHDOptIn(t *testing.T) { var vm compute.VirtualMachine ri := acctest.RandInt() - preConfig := fmt.Sprintf(testAccAzureRMVirtualMachine_basicLinuxMachineDestroyOSDisk, ri, ri, ri, ri, ri, ri, ri) + preConfig := fmt.Sprintf(testAccAzureRMVirtualMachine_basicLinuxMachineDestroyDisks, ri, ri, ri, ri, ri, ri, ri) postConfig := fmt.Sprintf(testAccAzureRMVirtualMachine_basicLinuxMachineDeleteVM, ri, ri, ri, ri, ri) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -220,7 +223,10 @@ func TestAccAzureRMVirtualMachine_deleteVHDOptIn(t *testing.T) { }, { Config: postConfig, - Check: testCheckAzureRMVirtualMachineOSDiskVHDExistance(false), + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMVirtualMachineVHDExistance("myosdisk1.vhd", false), + testCheckAzureRMVirtualMachineVHDExistance("mydatadisk1.vhd", false), + ), }, }, }) @@ -322,7 +328,7 @@ func testCheckAzureRMVirtualMachineDestroy(s *terraform.State) error { return nil } -func testCheckAzureRMVirtualMachineOSDiskVHDExistance(shouldExist bool) resource.TestCheckFunc { +func testCheckAzureRMVirtualMachineVHDExistance(name string, shouldExist bool) resource.TestCheckFunc { return func(s *terraform.State) error { for _, rs := range s.RootModule().Resources { if rs.Type != "azurerm_storage_container" { @@ -338,13 +344,15 @@ func testCheckAzureRMVirtualMachineOSDiskVHDExistance(shouldExist bool) resource return fmt.Errorf("Error creating Blob storage client: %s", err) } - exists, err := storageClient.BlobExists(containerName, "myosdisk1.vhd") + exists, err := storageClient.BlobExists(containerName, name) if err != nil { - return fmt.Errorf("Error checking if OS Disk VHD Blob exists: %s", err) + return fmt.Errorf("Error checking if Disk VHD Blob exists: %s", err) } if exists && !shouldExist { - return fmt.Errorf("OS Disk VHD Blob still exists") + return fmt.Errorf("Disk VHD Blob still exists") + } else if !exists && shouldExist { + return fmt.Errorf("Disk VHD Blob should exist") } } @@ -529,7 +537,7 @@ resource "azurerm_virtual_machine" "test" { } ` -var testAccAzureRMVirtualMachine_basicLinuxMachineDestroyOSDisk = ` +var testAccAzureRMVirtualMachine_basicLinuxMachineDestroyDisks = ` resource "azurerm_resource_group" "test" { name = "acctestrg-%d" location = "West US" @@ -602,6 +610,16 @@ resource "azurerm_virtual_machine" "test" { delete_os_disk_on_termination = true + storage_data_disk { + name = "mydatadisk1" + vhd_uri = "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}/mydatadisk1.vhd" + disk_size_gb = "1023" + create_option = "Empty" + lun = 0 + } + + delete_data_disks_on_termination = true + os_profile { computer_name = "hostname%d" admin_username = "testadmin" diff --git a/website/source/docs/providers/azurerm/r/virtual_machine.html.markdown b/website/source/docs/providers/azurerm/r/virtual_machine.html.markdown index 7f719337c..accf830dd 100644 --- a/website/source/docs/providers/azurerm/r/virtual_machine.html.markdown +++ b/website/source/docs/providers/azurerm/r/virtual_machine.html.markdown @@ -212,6 +212,7 @@ The following arguments are supported: * `storage_os_disk` - (Required) A Storage OS Disk block as referenced below. * `delete_os_disk_on_termination` - (Optional) Flag to enable deletion of the OS Disk VHD blob when the VM is deleted, defaults to `false` * `storage_data_disk` - (Optional) A list of Storage Data disk blocks as referenced below. +* `delete_data_disks_on_termination` - (Optional) Flag to enable deletion of Storage Disk VHD blobs when the VM is deleted, defaults to `false` * `os_profile` - (Required) An OS Profile block as documented below. * `os_profile_windows_config` - (Required, when a windows machine) A Windows config block as documented below. * `os_profile_linux_config` - (Required, when a linux machine) A Linux config block as documented below. From f5e882eca66a9697642b40d4f12db8263c810c26 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 25 Jul 2016 17:57:35 +0100 Subject: [PATCH 0397/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 71b8bb17e..d99e45249 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -130,6 +130,7 @@ IMPROVEMENTS: * provider/azurerm: The Azure SDK now exposes better error messages [GH-6976] * provider/azurerm: `azurerm_dns_zone` now returns `name_servers` [GH-7434] * provider/azurerm: dump entire Request/Response in autorest Decorator [GH-7719] + * provider/azurerm: add option to delete VMs Data disks on termination [GH-7793] * provider/clc: Add support for hyperscale and bareMetal server types and package installation * provider/clc: Fix optional server password [GH-6414] * provider/cloudstack: Add support for affinity groups to `cloudstack_instance` [GH-6898] From 3c702f2d75f611ccf9b69e20acd4cc08fc1c1661 Mon Sep 17 00:00:00 2001 From: Jonathan McCall Date: Mon, 25 Jul 2016 13:48:33 -0400 Subject: [PATCH 0398/1238] Ignore missing ENI attachment when trying to detach (#7185) --- builtin/providers/aws/resource_aws_network_interface.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_network_interface.go b/builtin/providers/aws/resource_aws_network_interface.go index ccfdbfc8e..5c9f8263e 100644 --- a/builtin/providers/aws/resource_aws_network_interface.go +++ b/builtin/providers/aws/resource_aws_network_interface.go @@ -196,7 +196,9 @@ func resourceAwsNetworkInterfaceDetach(oa *schema.Set, meta interface{}, eniId s conn := meta.(*AWSClient).ec2conn _, detach_err := conn.DetachNetworkInterface(detach_request) if detach_err != nil { - return fmt.Errorf("Error detaching ENI: %s", detach_err) + if awsErr, _ := detach_err.(awserr.Error); awsErr.Code() != "InvalidAttachmentID.NotFound" { + return fmt.Errorf("Error detaching ENI: %s", detach_err) + } } log.Printf("[DEBUG] Waiting for ENI (%s) to become dettached", eniId) From fcfb7f4e1b6e9157ab7e91fdd2467f785c0a9bbf Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 25 Jul 2016 18:49:41 +0100 Subject: [PATCH 0399/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d99e45249..aff3abef9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -222,6 +222,7 @@ BUG FIXES: * provider/aws: Fix ICMP fields in `aws_network_acl_rule` to allow ICMP code 0 (echo reply) to be configured [GH-7669] * provider/aws: Fix bug with Updating `aws_autoscaling_group` `enabled_metrics` [GH-7698] * provider/aws: Ignore IOPS on non io1 AWS root_block_device [GH-7783] + * provider/aws: Ignore missing ENI attachment when trying to detach ENI [GH-7185] * provider/azurerm: Fixes terraform crash when using SSH keys with `azurerm_virtual_machine` [GH-6766] * provider/azurerm: Fix a bug causing 'diffs do not match' on `azurerm_network_interface` resources [GH-6790] * provider/azurerm: Normalizes `availability_set_id` casing to avoid spurious diffs in `azurerm_virtual_machine` [GH-6768] From afb06f907ff58f9a13345dfd48a40cda36e774a9 Mon Sep 17 00:00:00 2001 From: Zachary Salzbank Date: Mon, 25 Jul 2016 14:52:40 -0400 Subject: [PATCH 0400/1238] providers/aws: expose network interface id (#6751) Expose the network interface ID that is created with a new instance. This can be useful when associating an existing elastic IP to the default interface on an instance that has multiple network interfaces. --- builtin/providers/aws/resource_aws_instance.go | 7 +++++++ website/source/docs/providers/aws/r/instance.html.markdown | 1 + 2 files changed, 8 insertions(+) diff --git a/builtin/providers/aws/resource_aws_instance.go b/builtin/providers/aws/resource_aws_instance.go index 892235965..ebd611109 100644 --- a/builtin/providers/aws/resource_aws_instance.go +++ b/builtin/providers/aws/resource_aws_instance.go @@ -128,6 +128,11 @@ func resourceAwsInstance() *schema.Resource { Computed: true, }, + "network_interface_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "public_ip": &schema.Schema{ Type: schema.TypeString, Computed: true, @@ -488,10 +493,12 @@ func resourceAwsInstanceRead(d *schema.ResourceData, meta interface{}) error { for _, ni := range instance.NetworkInterfaces { if *ni.Attachment.DeviceIndex == 0 { d.Set("subnet_id", ni.SubnetId) + d.Set("network_interface_id", ni.NetworkInterfaceId) } } } else { d.Set("subnet_id", instance.SubnetId) + d.Set("network_interface_id", "") } d.Set("ebs_optimized", instance.EbsOptimized) if instance.SubnetId != nil && *instance.SubnetId != "" { diff --git a/website/source/docs/providers/aws/r/instance.html.markdown b/website/source/docs/providers/aws/r/instance.html.markdown index 5681109b5..67d9ec85e 100644 --- a/website/source/docs/providers/aws/r/instance.html.markdown +++ b/website/source/docs/providers/aws/r/instance.html.markdown @@ -152,6 +152,7 @@ The following attributes are exported: * `public_dns` - The public DNS name assigned to the instance. For EC2-VPC, this is only available if you've enabled DNS hostnames for your VPC * `public_ip` - The public IP address assigned to the instance, if applicable. **NOTE**: If you are using an [`aws_eip`](/docs/providers/aws/r/eip.html) with your instance, you should refer to the EIP's address directly and not use `public_ip`, as this field will change after the EIP is attached. +* `network_interface_id` - The ID of the network interface that was created with the instance. * `private_dns` - The private DNS name assigned to the instance. Can only be used inside the Amazon EC2, and only available if you've enabled DNS hostnames for your VPC From b4749f0c8fcf6e0368383a4455313836f50643a8 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 25 Jul 2016 19:53:18 +0100 Subject: [PATCH 0401/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index aff3abef9..72558d038 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -125,6 +125,7 @@ IMPROVEMENTS: * provider/aws: Allow setting a `poll_interval` on `aws_elastic_beanstalk_environment` [GH-7523] * provider/aws: Add support for Kinesis streams shard-level metrics [GH-7684] * provider/aws: Support create / update greater than twenty db parameters in `aws_db_parameter_group` [GH-7364] + * providers/aws: expose network interface id [GH-6751] * provider/azurerm: Add support for EnableIPForwarding to `azurerm_network_interface` [GH-6807] * provider/azurerm: Add support for exporting the `azurerm_storage_account` access keys [GH-6742] * provider/azurerm: The Azure SDK now exposes better error messages [GH-6976] From a61687add1644572f6e56bee1adf37265353833a Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 25 Jul 2016 19:53:36 +0100 Subject: [PATCH 0402/1238] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 72558d038..330c2dfa7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -125,7 +125,7 @@ IMPROVEMENTS: * provider/aws: Allow setting a `poll_interval` on `aws_elastic_beanstalk_environment` [GH-7523] * provider/aws: Add support for Kinesis streams shard-level metrics [GH-7684] * provider/aws: Support create / update greater than twenty db parameters in `aws_db_parameter_group` [GH-7364] - * providers/aws: expose network interface id [GH-6751] + * providers/aws: expose network interface id in `aws_instance` [GH-6751] * provider/azurerm: Add support for EnableIPForwarding to `azurerm_network_interface` [GH-6807] * provider/azurerm: Add support for exporting the `azurerm_storage_account` access keys [GH-6742] * provider/azurerm: The Azure SDK now exposes better error messages [GH-6976] From 8378333f6489dd0d7fed293439925ded69beab13 Mon Sep 17 00:00:00 2001 From: Mike Tougeron Date: Mon, 25 Jul 2016 13:41:24 -0700 Subject: [PATCH 0403/1238] Terraform 0.7.0 data resource for remote state does not use the 'output' path (#7802) --- website/source/docs/providers/terraform/d/remote_state.html.md | 2 +- website/source/docs/providers/terraform/index.html.markdown | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/website/source/docs/providers/terraform/d/remote_state.html.md b/website/source/docs/providers/terraform/d/remote_state.html.md index 683ba264e..5adc37170 100644 --- a/website/source/docs/providers/terraform/d/remote_state.html.md +++ b/website/source/docs/providers/terraform/d/remote_state.html.md @@ -22,7 +22,7 @@ data "terraform_remote_state" "vpc" { resource "aws_instance" "foo" { # ... - subnet_id = "${data.terraform_remote_state.vpc.output.subnet_id}" + subnet_id = "${data.terraform_remote_state.vpc.subnet_id}" } ``` diff --git a/website/source/docs/providers/terraform/index.html.markdown b/website/source/docs/providers/terraform/index.html.markdown index 20a9dfee3..aba805349 100644 --- a/website/source/docs/providers/terraform/index.html.markdown +++ b/website/source/docs/providers/terraform/index.html.markdown @@ -26,6 +26,6 @@ data "terraform_remote_state" "vpc" { resource "aws_instance" "foo" { # ... - subnet_id = "${data.terraform_remote_state.vpc.output.subnet_id}" + subnet_id = "${data.terraform_remote_state.vpc.subnet_id}" } ``` From 1249cb8ba89f1e25c2d7cdb6bc5b90dcba37180f Mon Sep 17 00:00:00 2001 From: Brad Feehan Date: Tue, 26 Jul 2016 16:35:52 +1000 Subject: [PATCH 0404/1238] Fix typo in aws_vpc resource docs (VPN -> VPC) (#7805) --- website/source/docs/providers/aws/r/vpc.html.markdown | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/source/docs/providers/aws/r/vpc.html.markdown b/website/source/docs/providers/aws/r/vpc.html.markdown index 0125ef3d2..d13ba043c 100644 --- a/website/source/docs/providers/aws/r/vpc.html.markdown +++ b/website/source/docs/providers/aws/r/vpc.html.markdown @@ -67,8 +67,8 @@ The following attributes are exported: ## Import -VPNs can be imported using the `vpn id`, e.g. +VPCs can be imported using the `vpc id`, e.g. ``` -$ terraform import aws_vpn.test_vpn vpc-a01106c2 -``` \ No newline at end of file +$ terraform import aws_vpc.test_vpc vpc-a01106c2 +``` From ba10720e5dad7f4a35966b5a1771bdc8d4b28279 Mon Sep 17 00:00:00 2001 From: Andy Chan Date: Tue, 26 Jul 2016 01:38:51 -0700 Subject: [PATCH 0405/1238] Adding passthrough behavior for API Gateway integration (#7801) --- .../resource_aws_api_gateway_integration.go | 23 +++++++++++++++---- ...source_aws_api_gateway_integration_test.go | 7 ++++++ builtin/providers/aws/validators.go | 9 ++++++++ .../r/api_gateway_integration.html.markdown | 1 + 4 files changed, 35 insertions(+), 5 deletions(-) diff --git a/builtin/providers/aws/resource_aws_api_gateway_integration.go b/builtin/providers/aws/resource_aws_api_gateway_integration.go index 68f9c50bf..d82d78e6d 100644 --- a/builtin/providers/aws/resource_aws_api_gateway_integration.go +++ b/builtin/providers/aws/resource_aws_api_gateway_integration.go @@ -79,6 +79,12 @@ func resourceAwsApiGatewayIntegration() *schema.Resource { Type: schema.TypeString, Optional: true, }, + + "passthrough_behavior": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateApiGatewayIntegrationPassthroughBehavior, + }, }, } } @@ -106,6 +112,11 @@ func resourceAwsApiGatewayIntegrationCreate(d *schema.ResourceData, meta interfa } } + var passthroughBehavior *string + if v, ok := d.GetOk("passthrough_behavior"); ok { + passthroughBehavior = aws.String(v.(string)) + } + var credentials *string if val, ok := d.GetOk("credentials"); ok { credentials = aws.String(val.(string)) @@ -119,11 +130,12 @@ func resourceAwsApiGatewayIntegrationCreate(d *schema.ResourceData, meta interfa IntegrationHttpMethod: integrationHttpMethod, Uri: uri, // TODO reimplement once [GH-2143](https://github.com/hashicorp/terraform/issues/2143) has been implemented - RequestParameters: aws.StringMap(parameters), - RequestTemplates: aws.StringMap(templates), - Credentials: credentials, - CacheNamespace: nil, - CacheKeyParameters: nil, + RequestParameters: aws.StringMap(parameters), + RequestTemplates: aws.StringMap(templates), + Credentials: credentials, + CacheNamespace: nil, + CacheKeyParameters: nil, + PassthroughBehavior: passthroughBehavior, }) if err != nil { return fmt.Errorf("Error creating API Gateway Integration: %s", err) @@ -163,6 +175,7 @@ func resourceAwsApiGatewayIntegrationRead(d *schema.ResourceData, meta interface d.Set("type", integration.Type) d.Set("uri", integration.Uri) d.Set("request_parameters_in_json", aws.StringValueMap(integration.RequestParameters)) + d.Set("passthrough_behavior", integration.PassthroughBehavior) return nil } diff --git a/builtin/providers/aws/resource_aws_api_gateway_integration_test.go b/builtin/providers/aws/resource_aws_api_gateway_integration_test.go index 090bcf0a9..b33497c1f 100644 --- a/builtin/providers/aws/resource_aws_api_gateway_integration_test.go +++ b/builtin/providers/aws/resource_aws_api_gateway_integration_test.go @@ -34,6 +34,8 @@ func TestAccAWSAPIGatewayIntegration_basic(t *testing.T) { "aws_api_gateway_integration.test", "request_templates.application/json", ""), resource.TestCheckResourceAttr( "aws_api_gateway_integration.test", "request_templates.application/xml", "#set($inputRoot = $input.path('$'))\n{ }"), + resource.TestCheckResourceAttr( + "aws_api_gateway_integration.test", "passthrough_behavior", "WHEN_NO_MATCH"), ), }, @@ -48,6 +50,8 @@ func TestAccAWSAPIGatewayIntegration_basic(t *testing.T) { "aws_api_gateway_integration.test", "integration_http_method", ""), resource.TestCheckResourceAttr( "aws_api_gateway_integration.test", "uri", ""), + resource.TestCheckResourceAttr( + "aws_api_gateway_integration.test", "passthrough_behavior", "NEVER"), ), }, }, @@ -193,6 +197,7 @@ resource "aws_api_gateway_integration" "test" { type = "HTTP" uri = "https://www.google.de" integration_http_method = "GET" + passthrough_behavior = "WHEN_NO_MATCH" } ` @@ -230,5 +235,7 @@ resource "aws_api_gateway_integration" "test" { PARAMS type = "MOCK" + passthrough_behavior = "NEVER" + } ` diff --git a/builtin/providers/aws/validators.go b/builtin/providers/aws/validators.go index 234fe451e..75dd0d1d7 100644 --- a/builtin/providers/aws/validators.go +++ b/builtin/providers/aws/validators.go @@ -451,3 +451,12 @@ func validateDbEventSubscriptionName(v interface{}, k string) (ws []string, erro } return } + +func validateApiGatewayIntegrationPassthroughBehavior(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "WHEN_NO_MATCH" && value != "WHEN_NO_TEMPLATES" && value != "NEVER" { + errors = append(errors, fmt.Errorf( + "%q must be one of 'WHEN_NO_MATCH', 'WHEN_NO_TEMPLATES', 'NEVER'", k)) + } + return +} diff --git a/website/source/docs/providers/aws/r/api_gateway_integration.html.markdown b/website/source/docs/providers/aws/r/api_gateway_integration.html.markdown index b6704bb74..fc7224d30 100644 --- a/website/source/docs/providers/aws/r/api_gateway_integration.html.markdown +++ b/website/source/docs/providers/aws/r/api_gateway_integration.html.markdown @@ -56,6 +56,7 @@ The following arguments are supported: Not all methods are compatible with all `AWS` integrations. e.g. Lambda function [can only be invoked](https://github.com/awslabs/aws-apigateway-importer/issues/9#issuecomment-129651005) via `POST`. * `request_templates` - (Optional) A map of the integration's request templates. +* `passthrough_behavior` - (Optional) The integration passthrough behavior (`WHEN_NO_MATCH`, `WHEN_NO_TEMPLATES`, `NEVER`). **Required** if `request_templates` is used. * `request_parameters_in_json` - (Optional) A map written as a JSON string specifying the request query string parameters and headers that should be passed to the backend responder. From 33a3bf414bb966aa1a1cbc549351eb8d79412ea3 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Tue, 26 Jul 2016 09:39:42 +0100 Subject: [PATCH 0406/1238] Update CHANGELOG.md --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 330c2dfa7..11df71aee 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -125,7 +125,8 @@ IMPROVEMENTS: * provider/aws: Allow setting a `poll_interval` on `aws_elastic_beanstalk_environment` [GH-7523] * provider/aws: Add support for Kinesis streams shard-level metrics [GH-7684] * provider/aws: Support create / update greater than twenty db parameters in `aws_db_parameter_group` [GH-7364] - * providers/aws: expose network interface id in `aws_instance` [GH-6751] + * provider/aws: expose network interface id in `aws_instance` [GH-6751] + * provider/aws: Adding passthrough behavior for API Gateway integration [GH-7801] * provider/azurerm: Add support for EnableIPForwarding to `azurerm_network_interface` [GH-6807] * provider/azurerm: Add support for exporting the `azurerm_storage_account` access keys [GH-6742] * provider/azurerm: The Azure SDK now exposes better error messages [GH-6976] From cc912c39e56cd70e9a3d12eed6f84328ce7c11e8 Mon Sep 17 00:00:00 2001 From: Andreas Skarmutsos Lindh Date: Fri, 15 Jul 2016 13:54:36 +0200 Subject: [PATCH 0407/1238] AWS Application AutoScaling Initial work on two new resource types: * `aws_appautoscaling_target` * `aws_appautoscaling_policy` Fix acc tests --- builtin/providers/aws/config.go | 3 + builtin/providers/aws/provider.go | 2 + .../aws/resource_aws_appautoscaling_policy.go | 331 ++++++++++++++++++ ...resource_aws_appautoscaling_policy_test.go | 148 ++++++++ .../aws/resource_aws_appautoscaling_target.go | 202 +++++++++++ ...resource_aws_appautoscaling_target_test.go | 209 +++++++++++ .../aws/r/appautoscaling_policy.html.markdown | 74 ++++ .../aws/r/appautoscaling_target.html.markdown | 40 +++ 8 files changed, 1009 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_appautoscaling_policy.go create mode 100644 builtin/providers/aws/resource_aws_appautoscaling_policy_test.go create mode 100644 builtin/providers/aws/resource_aws_appautoscaling_target.go create mode 100644 builtin/providers/aws/resource_aws_appautoscaling_target_test.go create mode 100644 website/source/docs/providers/aws/r/appautoscaling_policy.html.markdown create mode 100644 website/source/docs/providers/aws/r/appautoscaling_target.html.markdown diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go index 88bf4d0e7..434bdffdd 100644 --- a/builtin/providers/aws/config.go +++ b/builtin/providers/aws/config.go @@ -18,6 +18,7 @@ import ( "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/apigateway" + "github.com/aws/aws-sdk-go/service/applicationautoscaling" "github.com/aws/aws-sdk-go/service/autoscaling" "github.com/aws/aws-sdk-go/service/cloudformation" "github.com/aws/aws-sdk-go/service/cloudfront" @@ -94,6 +95,7 @@ type AWSClient struct { emrconn *emr.EMR esconn *elasticsearch.ElasticsearchService apigateway *apigateway.APIGateway + appautoscalingconn *applicationautoscaling.ApplicationAutoScaling autoscalingconn *autoscaling.AutoScaling s3conn *s3.S3 sesConn *ses.SES @@ -213,6 +215,7 @@ func (c *Config) Client() (interface{}, error) { } client.apigateway = apigateway.New(sess) + client.appautoscalingconn = applicationautoscaling.New(sess) client.autoscalingconn = autoscaling.New(sess) client.cfconn = cloudformation.New(sess) client.cloudfrontconn = cloudfront.New(sess) diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index 689335889..69e264dd9 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -135,6 +135,8 @@ func Provider() terraform.ResourceProvider { "aws_api_gateway_resource": resourceAwsApiGatewayResource(), "aws_api_gateway_rest_api": resourceAwsApiGatewayRestApi(), "aws_app_cookie_stickiness_policy": resourceAwsAppCookieStickinessPolicy(), + "aws_appautoscaling_target": resourceAwsAppautoscalingTarget(), + "aws_appautoscaling_policy": resourceAwsAppautoscalingPolicy(), "aws_autoscaling_group": resourceAwsAutoscalingGroup(), "aws_autoscaling_notification": resourceAwsAutoscalingNotification(), "aws_autoscaling_policy": resourceAwsAutoscalingPolicy(), diff --git a/builtin/providers/aws/resource_aws_appautoscaling_policy.go b/builtin/providers/aws/resource_aws_appautoscaling_policy.go new file mode 100644 index 000000000..44461e3a7 --- /dev/null +++ b/builtin/providers/aws/resource_aws_appautoscaling_policy.go @@ -0,0 +1,331 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "strconv" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/applicationautoscaling" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsAppautoscalingPolicy() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsAppautoscalingPolicyCreate, + Read: resourceAwsAppautoscalingPolicyRead, + Update: resourceAwsAppautoscalingPolicyUpdate, + Delete: resourceAwsAppautoscalingPolicyDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + // https://github.com/boto/botocore/blob/9f322b1/botocore/data/autoscaling/2011-01-01/service-2.json#L1862-L1873 + value := v.(string) + if len(value) > 255 { + errors = append(errors, fmt.Errorf("q cannot be longer than 255 characters", k)) + } + return + }, + }, + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "policy_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "StepScaling", + }, + "resource_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "scalable_dimension": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "ecs:service:DesiredCount", + ForceNew: true, + }, + "service_namespace": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "ecs", + ForceNew: true, + }, + "adjustment_type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "cooldown": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + "metric_aggregation_type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "min_adjustment_magnitude": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + "alarms": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "step_adjustment": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metric_interval_lower_bound": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "metric_interval_upper_bound": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "scaling_adjustment": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + }, + }, + }, + Set: resourceAwsAppautoscalingAdjustmentHash, + }, + }, + } +} + +func resourceAwsAppautoscalingPolicyCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).appautoscalingconn + + params, err := getAwsAppautoscalingPutScalingPolicyInput(d) + if err != nil { + return err + } + + log.Printf("[DEBUG] ApplicationAutoScaling PutScalingPolicy: %#v", params) + resp, err := conn.PutScalingPolicy(¶ms) + if err != nil { + return fmt.Errorf("Error putting scaling policy: %s", err) + } + + d.Set("arn", resp.PolicyARN) + d.SetId(d.Get("name").(string)) + log.Printf("[INFO] ApplicationAutoScaling scaling PolicyARN: %s", d.Get("arn").(string)) + + return resourceAwsAppautoscalingPolicyRead(d, meta) +} + +func resourceAwsAppautoscalingPolicyRead(d *schema.ResourceData, meta interface{}) error { + p, err := getAwsAppautoscalingPolicy(d, meta) + if err != nil { + return err + } + if p == nil { + d.SetId("") + return nil + } + + log.Printf("[DEBUG] Read ApplicationAutoScaling policy: %s, SP: %s, Obj: %s", d.Get("name"), d.Get("name"), p) + + d.Set("arn", p.PolicyARN) + d.Set("name", p.PolicyName) + d.Set("policy_type", p.PolicyType) + d.Set("resource_id", p.ResourceId) + d.Set("scalable_dimension", p.ScalableDimension) + d.Set("service_namespace", p.ServiceNamespace) + d.Set("alarms", p.Alarms) + d.Set("step_scaling_policy_configuration", p.StepScalingPolicyConfiguration) + + return nil +} + +func resourceAwsAppautoscalingPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).appautoscalingconn + + params, inputErr := getAwsAppautoscalingPutScalingPolicyInput(d) + if inputErr != nil { + return inputErr + } + + log.Printf("[DEBUG] Application Autoscaling Update Scaling Policy: %#v", params) + _, err := conn.PutScalingPolicy(¶ms) + if err != nil { + return err + } + + return resourceAwsAppautoscalingPolicyRead(d, meta) +} + +func resourceAwsAppautoscalingPolicyDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).appautoscalingconn + p, err := getAwsAppautoscalingPolicy(d, meta) + if err != nil { + return fmt.Errorf("Error getting policy: %s", err) + } + if p == nil { + return nil + } + + params := applicationautoscaling.DeleteScalingPolicyInput{ + PolicyName: aws.String(d.Get("name").(string)), + ResourceId: aws.String(d.Get("resource_id").(string)), + ScalableDimension: aws.String(d.Get("scalable_dimension").(string)), + ServiceNamespace: aws.String(d.Get("service_namespace").(string)), + } + log.Printf("[DEBUG] Deleting Application AutoScaling Policy opts: %#v", params) + if _, err := conn.DeleteScalingPolicy(¶ms); err != nil { + return fmt.Errorf("Application AutoScaling Policy: %s", err) + } + + d.SetId("") + return nil +} + +// Takes the result of flatmap.Expand for an array of step adjustments and +// returns a []*applicationautoscaling.StepAdjustment. +func expandAppautoscalingStepAdjustments(configured []interface{}) ([]*applicationautoscaling.StepAdjustment, error) { + var adjustments []*applicationautoscaling.StepAdjustment + + // Loop over our configured step adjustments and create an array + // of aws-sdk-go compatible objects. We're forced to convert strings + // to floats here because there's no way to detect whether or not + // an uninitialized, optional schema element is "0.0" deliberately. + // With strings, we can test for "", which is definitely an empty + // struct value. + for _, raw := range configured { + data := raw.(map[string]interface{}) + a := &applicationautoscaling.StepAdjustment{ + ScalingAdjustment: aws.Int64(int64(data["scaling_adjustment"].(int))), + } + if data["metric_interval_lower_bound"] != "" { + bound := data["metric_interval_lower_bound"] + switch bound := bound.(type) { + case string: + f, err := strconv.ParseFloat(bound, 64) + if err != nil { + return nil, fmt.Errorf( + "metric_interval_lower_bound must be a float value represented as a string") + } + a.MetricIntervalLowerBound = aws.Float64(f) + default: + return nil, fmt.Errorf( + "metric_interval_lower_bound isn't a string. This is a bug. Please file an issue.") + } + } + if data["metric_interval_upper_bound"] != "" { + bound := data["metric_interval_upper_bound"] + switch bound := bound.(type) { + case string: + f, err := strconv.ParseFloat(bound, 64) + if err != nil { + return nil, fmt.Errorf( + "metric_interval_upper_bound must be a float value represented as a string") + } + a.MetricIntervalUpperBound = aws.Float64(f) + default: + return nil, fmt.Errorf( + "metric_interval_upper_bound isn't a string. This is a bug. Please file an issue.") + } + } + adjustments = append(adjustments, a) + } + + return adjustments, nil +} + +func getAwsAppautoscalingPutScalingPolicyInput(d *schema.ResourceData) (applicationautoscaling.PutScalingPolicyInput, error) { + var params = applicationautoscaling.PutScalingPolicyInput{ + PolicyName: aws.String(d.Get("name").(string)), + ResourceId: aws.String(d.Get("resource_id").(string)), + } + + if v, ok := d.GetOk("policy_type"); ok { + params.PolicyType = aws.String(v.(string)) + } + + if v, ok := d.GetOk("service_namespace"); ok { + params.ServiceNamespace = aws.String(v.(string)) + } + + if v, ok := d.GetOk("policy_type"); ok { + params.PolicyType = aws.String(v.(string)) + } + + if v, ok := d.GetOk("scalable_dimension"); ok { + params.ScalableDimension = aws.String(v.(string)) + } + + var adjustmentSteps []*applicationautoscaling.StepAdjustment + if v, ok := d.GetOk("step_adjustment"); ok { + steps, err := expandAppautoscalingStepAdjustments(v.(*schema.Set).List()) + if err != nil { + return params, fmt.Errorf("metric_interval_lower_bound and metric_interval_upper_bound must be strings!") + } + adjustmentSteps = steps + } + + // build StepScalingPolicyConfiguration + params.StepScalingPolicyConfiguration = &applicationautoscaling.StepScalingPolicyConfiguration{ + AdjustmentType: aws.String(d.Get("adjustment_type").(string)), + Cooldown: aws.Int64(int64(d.Get("cooldown").(int))), + MetricAggregationType: aws.String(d.Get("metric_aggregation_type").(string)), + StepAdjustments: adjustmentSteps, + } + + if v, ok := d.GetOk("min_adjustment_magnitude"); ok { + params.StepScalingPolicyConfiguration.MinAdjustmentMagnitude = aws.Int64(int64(v.(int))) + } + + return params, nil +} + +func getAwsAppautoscalingPolicy(d *schema.ResourceData, meta interface{}) (*applicationautoscaling.ScalingPolicy, error) { + conn := meta.(*AWSClient).appautoscalingconn + + params := applicationautoscaling.DescribeScalingPoliciesInput{ + PolicyNames: []*string{aws.String(d.Get("name").(string))}, + ServiceNamespace: aws.String(d.Get("service_namespace").(string)), + } + + log.Printf("[DEBUG] Application AutoScaling Policy Describe Params: %#v", params) + resp, err := conn.DescribeScalingPolicies(¶ms) + if err != nil { + return nil, fmt.Errorf("Error retrieving scaling policies: %s", err) + } + + // find scaling policy + name := d.Get("name") + for idx, sp := range resp.ScalingPolicies { + if *sp.PolicyName == name { + return resp.ScalingPolicies[idx], nil + } + } + + // policy not found + return nil, nil +} + +func resourceAwsAppautoscalingAdjustmentHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + if v, ok := m["metric_interval_lower_bound"]; ok { + buf.WriteString(fmt.Sprintf("%f-", v)) + } + if v, ok := m["metric_interval_upper_bound"]; ok { + buf.WriteString(fmt.Sprintf("%f-", v)) + } + buf.WriteString(fmt.Sprintf("%d-", m["scaling_adjustment"].(int))) + + return hashcode.String(buf.String()) +} diff --git a/builtin/providers/aws/resource_aws_appautoscaling_policy_test.go b/builtin/providers/aws/resource_aws_appautoscaling_policy_test.go new file mode 100644 index 000000000..cef3d7d74 --- /dev/null +++ b/builtin/providers/aws/resource_aws_appautoscaling_policy_test.go @@ -0,0 +1,148 @@ +package aws + +import ( + "fmt" + "os" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/applicationautoscaling" + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSAppautoscalingPolicy_basic(t *testing.T) { + var policy applicationautoscaling.ScalingPolicy + var awsAccountId = os.Getenv("AWS_ACCOUNT_ID") + + randClusterName := fmt.Sprintf("cluster-%s", acctest.RandString(10)) + // randResourceId := fmt.Sprintf("service/%s/%s", randClusterName, acctest.RandString(10)) + randPolicyName := fmt.Sprintf("terraform-test-foobar-%s", acctest.RandString(5)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAppautoscalingPolicyDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSAppautoscalingPolicyConfig(randClusterName, randPolicyName, awsAccountId), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAppautoscalingPolicyExists("aws_appautoscaling_policy.foobar_simple", &policy), + resource.TestCheckResourceAttr("aws_appautoscaling_policy.foobar_simple", "adjustment_type", "ChangeInCapacity"), + resource.TestCheckResourceAttr("aws_appautoscaling_policy.foobar_simple", "policy_type", "StepScaling"), + resource.TestCheckResourceAttr("aws_appautoscaling_policy.foobar_simple", "cooldown", "60"), + resource.TestCheckResourceAttr("aws_appautoscaling_policy.foobar_simple", "name", randPolicyName), + resource.TestCheckResourceAttr("aws_appautoscaling_policy.foobar_simple", "resource_id", fmt.Sprintf("service/%s/foobar", randClusterName)), + resource.TestCheckResourceAttr("aws_appautoscaling_policy.foobar_simple", "service_namespace", "ecs"), + resource.TestCheckResourceAttr("aws_appautoscaling_policy.foobar_simple", "scalable_dimension", "ecs:service:DesiredCount"), + ), + }, + }, + }) +} + +func testAccCheckAWSAppautoscalingPolicyExists(n string, policy *applicationautoscaling.ScalingPolicy) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := testAccProvider.Meta().(*AWSClient).appautoscalingconn + params := &applicationautoscaling.DescribeScalingPoliciesInput{ + ServiceNamespace: aws.String(rs.Primary.Attributes["service_namespace"]), + PolicyNames: []*string{aws.String(rs.Primary.ID)}, + } + resp, err := conn.DescribeScalingPolicies(params) + if err != nil { + return err + } + if len(resp.ScalingPolicies) == 0 { + return fmt.Errorf("ScalingPolicy %s not found", rs.Primary.ID) + } + + return nil + } +} + +func testAccCheckAWSAppautoscalingPolicyDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).appautoscalingconn + + for _, rs := range s.RootModule().Resources { + params := applicationautoscaling.DescribeScalingPoliciesInput{ + ServiceNamespace: aws.String(rs.Primary.Attributes["service_namespace"]), + PolicyNames: []*string{aws.String(rs.Primary.ID)}, + } + + resp, err := conn.DescribeScalingPolicies(¶ms) + + if err == nil { + if len(resp.ScalingPolicies) != 0 && + *resp.ScalingPolicies[0].PolicyName == rs.Primary.ID { + return fmt.Errorf("Application autoscaling policy still exists: %s", rs.Primary.ID) + } + } + } + + return nil +} + +func testAccAWSAppautoscalingPolicyConfig( + randClusterName string, + randPolicyName string, + awsAccountId string) string { + return fmt.Sprintf(` +resource "aws_ecs_cluster" "foo" { + name = "%s" +} + +resource "aws_ecs_task_definition" "task" { + family = "foobar" + container_definitions = < 255 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 255 characters", k)) + } + return + }, + }, + "arn": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "max_capacity": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "min_capacity": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "resource_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "role_arn": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "scalable_dimension": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "ecs:service:DesiredCount", + ForceNew: true, + }, + "service_namespace": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "ecs", + ForceNew: true, + }, + }, + } +} + +func resourceAwsAppautoscalingTargetCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).appautoscalingconn + + var targetOpts applicationautoscaling.RegisterScalableTargetInput + + targetOpts.MaxCapacity = aws.Int64(int64(d.Get("max_capacity").(int))) + targetOpts.MinCapacity = aws.Int64(int64(d.Get("min_capacity").(int))) + targetOpts.ResourceId = aws.String(d.Get("resource_id").(string)) + targetOpts.RoleARN = aws.String(d.Get("role_arn").(string)) + targetOpts.ScalableDimension = aws.String(d.Get("scalable_dimension").(string)) + targetOpts.ServiceNamespace = aws.String(d.Get("service_namespace").(string)) + + log.Printf("[DEBUG] Application autoscaling target create configuration %#v", targetOpts) + _, err := conn.RegisterScalableTarget(&targetOpts) + if err != nil { + return fmt.Errorf("Error creating application autoscaling target: %s", err) + } + + d.SetId(d.Get("resource_id").(string)) + log.Printf("[INFO] Application AutoScaling Target ID: %s", d.Id()) + + return resourceAwsAppautoscalingTargetRead(d, meta) +} + +func resourceAwsAppautoscalingTargetRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).appautoscalingconn + + t, err := getAwsAppautoscalingTarget(d, conn) + if err != nil { + return err + } + if t == nil { + log.Printf("[INFO] Application AutoScaling Target %q not found", d.Id()) + d.SetId("") + return nil + } + + d.Set("max_capacity", t.MaxCapacity) + d.Set("min_capacity", t.MinCapacity) + d.Set("resource_id", t.ResourceId) + d.Set("role_arn", t.RoleARN) + d.Set("scalable_dimension", t.ScalableDimension) + d.Set("service_namespace", t.ServiceNamespace) + + return nil +} + +// Updating Target is not supported +// func getAwsAppautoscalingTargetUpdate(d *schema.ResourceData, meta interface{}) error { +// conn := meta.(*AWSClient).appautoscalingconn + +// } + +func resourceAwsAppautoscalingTargetDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).appautoscalingconn + + t, err := getAwsAppautoscalingTarget(d, conn) + if err != nil { + return err + } + if t == nil { + log.Printf("[INFO] Application AutoScaling Target %q not found", d.Id()) + d.SetId("") + return nil + } + + log.Printf("[DEBUG] Application AutoScaling Target destroy: %#v", d.Id()) + deleteOpts := applicationautoscaling.DeregisterScalableTargetInput{ + ResourceId: aws.String(d.Get("resource_id").(string)), + ServiceNamespace: aws.String(d.Get("service_namespace").(string)), + ScalableDimension: aws.String(d.Get("scalable_dimension").(string)), + } + + err = resource.Retry(5*time.Minute, func() *resource.RetryError { + if _, err := conn.DeregisterScalableTarget(&deleteOpts); err != nil { + if awserr, ok := err.(awserr.Error); ok { + // @TODO: We should do stuff here depending on the actual error returned + return resource.RetryableError(awserr) + } + // Non recognized error, no retry. + return resource.NonRetryableError(err) + } + // Successful delete + return nil + }) + if err != nil { + return err + } + + return resource.Retry(5*time.Minute, func() *resource.RetryError { + if t, _ = getAwsAppautoscalingTarget(d, conn); t != nil { + return resource.RetryableError( + fmt.Errorf("Application AutoScaling Target still exists")) + } + return nil + }) +} + +func getAwsAppautoscalingTarget( + d *schema.ResourceData, + conn *applicationautoscaling.ApplicationAutoScaling) (*applicationautoscaling.ScalableTarget, error) { + + tgtName := d.Id() + describeOpts := applicationautoscaling.DescribeScalableTargetsInput{ + ResourceIds: []*string{aws.String(tgtName)}, + ServiceNamespace: aws.String(d.Get("service_namespace").(string)), + } + + log.Printf("[DEBUG] Application AutoScaling Target describe configuration: %#v", describeOpts) + describeTargets, err := conn.DescribeScalableTargets(&describeOpts) + if err != nil { + // @TODO: We should probably send something else back if we're trying to access an unknown Resource ID + // targetserr, ok := err.(awserr.Error) + // if ok && targetserr.Code() == "" + return nil, fmt.Errorf("Error retrieving Application AutoScaling Target: %s", err) + } + + for idx, tgt := range describeTargets.ScalableTargets { + if *tgt.ResourceId == tgtName { + return describeTargets.ScalableTargets[idx], nil + } + } + + return nil, nil +} diff --git a/builtin/providers/aws/resource_aws_appautoscaling_target_test.go b/builtin/providers/aws/resource_aws_appautoscaling_target_test.go new file mode 100644 index 000000000..c53262336 --- /dev/null +++ b/builtin/providers/aws/resource_aws_appautoscaling_target_test.go @@ -0,0 +1,209 @@ +package aws + +import ( + "fmt" + "os" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/applicationautoscaling" + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSAppautoScalingTarget_basic(t *testing.T) { + var target applicationautoscaling.ScalableTarget + var awsAccountId = os.Getenv("AWS_ACCOUNT_ID") + + randClusterName := fmt.Sprintf("cluster-%s", acctest.RandString(10)) + randResourceId := fmt.Sprintf("service/%s/%s", randClusterName, acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: "aws_appautoscaling_target.bar", + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSAppautoscalingTargetDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSAppautoscalingTargetConfig(randClusterName, randResourceId, awsAccountId), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAppautoscalingTargetExists("aws_appautoscaling_target.bar", &target), + testAccCheckAWSAppautoscalingTargetAttributes(&target, randResourceId), + resource.TestCheckResourceAttr("aws_appautoscaling_target.bar", "service_namespace", "ecs"), + resource.TestCheckResourceAttr("aws_appautoscaling_target.bar", "resource_id", fmt.Sprintf("service/%s/foobar", randClusterName)), + resource.TestCheckResourceAttr("aws_appautoscaling_target.bar", "scalable_dimension", "ecs:service:DesiredCount"), + resource.TestCheckResourceAttr("aws_appautoscaling_target.bar", "min_capacity", "1"), + resource.TestCheckResourceAttr("aws_appautoscaling_target.bar", "max_capacity", "3"), + ), + }, + + resource.TestStep{ + Config: testAccAWSAppautoscalingTargetConfigUpdate(randClusterName, randResourceId, awsAccountId), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSAppautoscalingTargetExists("aws_appautoscaling_target.bar", &target), + resource.TestCheckResourceAttr("aws_appautoscaling_target.bar", "min_capacity", "3"), + resource.TestCheckResourceAttr("aws_appautoscaling_target.bar", "max_capacity", "6"), + ), + }, + }, + }) +} + +func testAccCheckAWSAppautoscalingTargetDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).appautoscalingconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_appautoscaling_target" { + continue + } + + // Try to find the target + describeTargets, err := conn.DescribeScalableTargets( + &applicationautoscaling.DescribeScalableTargetsInput{ + ResourceIds: []*string{aws.String(rs.Primary.ID)}, + }, + ) + + if err == nil { + if len(describeTargets.ScalableTargets) != 0 && + *describeTargets.ScalableTargets[0].ResourceId == rs.Primary.ID { + return fmt.Errorf("Application AutoScaling Target still exists") + } + } + + // Verify error + e, ok := err.(awserr.Error) + if !ok { + return err + } + if e.Code() != "" { + return e + } + } + + return nil +} + +func testAccCheckAWSAppautoscalingTargetExists(n string, target *applicationautoscaling.ScalableTarget) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Application AutoScaling Target ID is set") + } + + conn := testAccProvider.Meta().(*AWSClient).appautoscalingconn + + describeTargets, err := conn.DescribeScalableTargets( + &applicationautoscaling.DescribeScalableTargetsInput{ + ResourceIds: []*string{aws.String(rs.Primary.ID)}, + }, + ) + + if err != nil { + return err + } + + if len(describeTargets.ScalableTargets) != 1 || + *describeTargets.ScalableTargets[0].ResourceId != rs.Primary.ID { + return fmt.Errorf("Application AutoScaling ResourceId not found") + } + + *target = *describeTargets.ScalableTargets[0] + + return nil + } +} + +func testAccCheckAWSAppautoscalingTargetAttributes(target *applicationautoscaling.ScalableTarget, resourceId string) resource.TestCheckFunc { + return nil +} + +func testAccAWSAppautoscalingTargetConfig( + randClusterName string, + randResourceId string, + awsAccountId string) string { + return fmt.Sprintf(` +resource "aws_ecs_cluster" "foo" { + name = "%s" +} +resource "aws_ecs_task_definition" "task" { + family = "foobar" + container_definitions = < Date: Sat, 16 Jul 2016 15:21:45 +0200 Subject: [PATCH 0408/1238] added applicationautoscaling from aws-sdk-go using: `govendor add github.com/aws/aws-sdk-go/service/applicationautoscaling@v1.2.5` introduce a retry for scalable target creation Due to possible inconsistencies in IAM, let's retry creation of the scalable target before we fail. Added IAM role as part of acceptance test --- ...resource_aws_appautoscaling_policy_test.go | 42 +- .../aws/resource_aws_appautoscaling_target.go | 16 +- ...resource_aws_appautoscaling_target_test.go | 124 +- .../service/applicationautoscaling/api.go | 1450 +++++++++++++++++ .../service/applicationautoscaling/service.go | 112 ++ vendor/vendor.json | 6 + 6 files changed, 1729 insertions(+), 21 deletions(-) create mode 100644 vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/service.go diff --git a/builtin/providers/aws/resource_aws_appautoscaling_policy_test.go b/builtin/providers/aws/resource_aws_appautoscaling_policy_test.go index cef3d7d74..62d6d777d 100644 --- a/builtin/providers/aws/resource_aws_appautoscaling_policy_test.go +++ b/builtin/providers/aws/resource_aws_appautoscaling_policy_test.go @@ -2,7 +2,6 @@ package aws import ( "fmt" - "os" "testing" "github.com/aws/aws-sdk-go/aws" @@ -14,9 +13,8 @@ import ( func TestAccAWSAppautoscalingPolicy_basic(t *testing.T) { var policy applicationautoscaling.ScalingPolicy - var awsAccountId = os.Getenv("AWS_ACCOUNT_ID") - randClusterName := fmt.Sprintf("cluster-%s", acctest.RandString(10)) + randClusterName := fmt.Sprintf("cluster%s", acctest.RandString(10)) // randResourceId := fmt.Sprintf("service/%s/%s", randClusterName, acctest.RandString(10)) randPolicyName := fmt.Sprintf("terraform-test-foobar-%s", acctest.RandString(5)) @@ -26,7 +24,7 @@ func TestAccAWSAppautoscalingPolicy_basic(t *testing.T) { CheckDestroy: testAccCheckAWSAppautoscalingPolicyDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccAWSAppautoscalingPolicyConfig(randClusterName, randPolicyName, awsAccountId), + Config: testAccAWSAppautoscalingPolicyConfig(randClusterName, randPolicyName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSAppautoscalingPolicyExists("aws_appautoscaling_policy.foobar_simple", &policy), resource.TestCheckResourceAttr("aws_appautoscaling_policy.foobar_simple", "adjustment_type", "ChangeInCapacity"), @@ -90,9 +88,37 @@ func testAccCheckAWSAppautoscalingPolicyDestroy(s *terraform.State) error { func testAccAWSAppautoscalingPolicyConfig( randClusterName string, - randPolicyName string, - awsAccountId string) string { + randPolicyName string) string { return fmt.Sprintf(` +resource "aws_iam_role" "autoscale_role" { + name = "%s" + path = "/" + + assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"*\"},\"Action\":[\"sts:AssumeRole\"]}]}" +} + +resource "aws_iam_role_policy" "autoscale_role_policy" { + name = "%s" + role = "${aws_iam_role.autoscale_role.id}" + + policy = < 0 { + return invalidParams + } + return nil +} + +type DeleteScalingPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteScalingPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteScalingPolicyOutput) GoString() string { + return s.String() +} + +type DeregisterScalableTargetInput struct { + _ struct{} `type:"structure"` + + // The unique identifier string for the resource associated with the scalable + // target. For Amazon ECS services, this value is the resource type, followed + // by the cluster name and service name, such as service/default/sample-webapp. + ResourceId *string `min:"1" type:"string" required:"true"` + + // The scalable dimension associated with the scalable target. The scalable + // dimension contains the service namespace, resource type, and scaling property, + // such as ecs:service:DesiredCount for the desired task count of an Amazon + // ECS service. + ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` + + // The namespace for the AWS service that the scalable target is associated + // with. For more information, see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // in the Amazon Web Services General Reference. + ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` +} + +// String returns the string representation +func (s DeregisterScalableTargetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterScalableTargetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeregisterScalableTargetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeregisterScalableTargetInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) + } + if s.ScalableDimension == nil { + invalidParams.Add(request.NewErrParamRequired("ScalableDimension")) + } + if s.ServiceNamespace == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceNamespace")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeregisterScalableTargetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeregisterScalableTargetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeregisterScalableTargetOutput) GoString() string { + return s.String() +} + +type DescribeScalableTargetsInput struct { + _ struct{} `type:"structure"` + + // The maximum number of scalable target results returned by DescribeScalableTargets + // in paginated output. When this parameter is used, DescribeScalableTargets + // returns up to MaxResults results in a single page along with a NextToken + // response element. The remaining results of the initial request can be seen + // by sending another DescribeScalableTargets request with the returned NextToken + // value. This value can be between 1 and 50. If this parameter is not used, + // then DescribeScalableTargets returns up to 50 results and a NextToken value, + // if applicable. + MaxResults *int64 `type:"integer"` + + // The NextToken value returned from a previous paginated DescribeScalableTargets + // request. Pagination continues from the end of the previous results that returned + // the NextToken value. This value is null when there are no more results to + // return. + NextToken *string `type:"string"` + + // The unique identifier string for the resource associated with the scalable + // target. For Amazon ECS services, this value is the resource type, followed + // by the cluster name and service name, such as service/default/sample-webapp. + // If you specify a scalable dimension, you must also specify a resource ID. + ResourceIds []*string `type:"list"` + + // The scalable dimension associated with the scalable target. The scalable + // dimension contains the service namespace, resource type, and scaling property, + // such as ecs:service:DesiredCount for the desired task count of an Amazon + // ECS service. If you specify a scalable dimension, you must also specify a + // resource ID. + ScalableDimension *string `type:"string" enum:"ScalableDimension"` + + // The namespace for the AWS service that the scalable target is associated + // with. For more information, see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // in the Amazon Web Services General Reference. + ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` +} + +// String returns the string representation +func (s DescribeScalableTargetsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalableTargetsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeScalableTargetsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeScalableTargetsInput"} + if s.ServiceNamespace == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceNamespace")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeScalableTargetsOutput struct { + _ struct{} `type:"structure"` + + // The NextToken value to include in a future DescribeScalableTargets request. + // When the results of a DescribeScalableTargets request exceed MaxResults, + // this value can be used to retrieve the next page of results. This value is + // null when there are no more results to return. + NextToken *string `type:"string"` + + // The list of scalable targets that matches the request parameters. + ScalableTargets []*ScalableTarget `type:"list"` +} + +// String returns the string representation +func (s DescribeScalableTargetsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalableTargetsOutput) GoString() string { + return s.String() +} + +type DescribeScalingActivitiesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of scaling activity results returned by DescribeScalingActivities + // in paginated output. When this parameter is used, DescribeScalingActivities + // returns up to MaxResults results in a single page along with a NextToken + // response element. The remaining results of the initial request can be seen + // by sending another DescribeScalingActivities request with the returned NextToken + // value. This value can be between 1 and 50. If this parameter is not used, + // then DescribeScalingActivities returns up to 50 results and a NextToken value, + // if applicable. + MaxResults *int64 `type:"integer"` + + // The NextToken value returned from a previous paginated DescribeScalingActivities + // request. Pagination continues from the end of the previous results that returned + // the NextToken value. This value is null when there are no more results to + // return. + NextToken *string `type:"string"` + + // The unique identifier string for the resource associated with the scaling + // activity. For Amazon ECS services, this value is the resource type, followed + // by the cluster name and service name, such as service/default/sample-webapp. + // If you specify a scalable dimension, you must also specify a resource ID. + ResourceId *string `min:"1" type:"string"` + + // The scalable dimension associated with the scaling activity. The scalable + // dimension contains the service namespace, resource type, and scaling property, + // such as ecs:service:DesiredCount for the desired task count of an Amazon + // ECS service. If you specify a scalable dimension, you must also specify a + // resource ID. + ScalableDimension *string `type:"string" enum:"ScalableDimension"` + + // The namespace for the AWS service that the scaling activity is associated + // with. For more information, see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // in the Amazon Web Services General Reference. + ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` +} + +// String returns the string representation +func (s DescribeScalingActivitiesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalingActivitiesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeScalingActivitiesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeScalingActivitiesInput"} + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) + } + if s.ServiceNamespace == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceNamespace")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeScalingActivitiesOutput struct { + _ struct{} `type:"structure"` + + // The NextToken value to include in a future DescribeScalingActivities request. + // When the results of a DescribeScalingActivities request exceed MaxResults, + // this value can be used to retrieve the next page of results. This value is + // null when there are no more results to return. + NextToken *string `type:"string"` + + // A list of scaling activity objects. + ScalingActivities []*ScalingActivity `type:"list"` +} + +// String returns the string representation +func (s DescribeScalingActivitiesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalingActivitiesOutput) GoString() string { + return s.String() +} + +type DescribeScalingPoliciesInput struct { + _ struct{} `type:"structure"` + + // The maximum number of scaling policy results returned by DescribeScalingPolicies + // in paginated output. When this parameter is used, DescribeScalingPolicies + // returns up to MaxResults results in a single page along with a NextToken + // response element. The remaining results of the initial request can be seen + // by sending another DescribeScalingPolicies request with the returned NextToken + // value. This value can be between 1 and 50. If this parameter is not used, + // then DescribeScalingPolicies returns up to 50 results and a NextToken value, + // if applicable. + MaxResults *int64 `type:"integer"` + + // The NextToken value returned from a previous paginated DescribeScalingPolicies + // request. Pagination continues from the end of the previous results that returned + // the NextToken value. This value is null when there are no more results to + // return. + NextToken *string `type:"string"` + + // The names of the scaling policies to describe. + PolicyNames []*string `type:"list"` + + // The unique resource identifier string of the scalable target that the scaling + // policy is associated with. For Amazon ECS services, this value is the resource + // type, followed by the cluster name and service name, such as service/default/sample-webapp. + // If you specify a scalable dimension, you must also specify a resource ID. + ResourceId *string `min:"1" type:"string"` + + // The scalable dimension of the scalable target that the scaling policy is + // associated with. The scalable dimension contains the service namespace, resource + // type, and scaling property, such as ecs:service:DesiredCount for the desired + // task count of an Amazon ECS service. If you specify a scalable dimension, + // you must also specify a resource ID. + ScalableDimension *string `type:"string" enum:"ScalableDimension"` + + // The AWS service namespace of the scalable target that the scaling policy + // is associated with. For more information, see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // in the Amazon Web Services General Reference. + ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` +} + +// String returns the string representation +func (s DescribeScalingPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalingPoliciesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeScalingPoliciesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeScalingPoliciesInput"} + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) + } + if s.ServiceNamespace == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceNamespace")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DescribeScalingPoliciesOutput struct { + _ struct{} `type:"structure"` + + // The NextToken value to include in a future DescribeScalingPolicies request. + // When the results of a DescribeScalingPolicies request exceed MaxResults, + // this value can be used to retrieve the next page of results. This value is + // null when there are no more results to return. + NextToken *string `type:"string"` + + // A list of scaling policy objects. + ScalingPolicies []*ScalingPolicy `type:"list"` +} + +// String returns the string representation +func (s DescribeScalingPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScalingPoliciesOutput) GoString() string { + return s.String() +} + +type PutScalingPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the scaling policy. + PolicyName *string `min:"1" type:"string" required:"true"` + + // The policy type. This parameter is required if you are creating a new policy. + PolicyType *string `type:"string" enum:"PolicyType"` + + // The unique resource identifier string for the scalable target that this scaling + // policy applies to. For Amazon ECS services, this value is the resource type, + // followed by the cluster name and service name, such as service/default/sample-webapp. + ResourceId *string `min:"1" type:"string" required:"true"` + + // The scalable dimension of the scalable target that this scaling policy applies + // to. The scalable dimension contains the service namespace, resource type, + // and scaling property, such as ecs:service:DesiredCount for the desired task + // count of an Amazon ECS service. + ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` + + // The AWS service namespace of the scalable target that this scaling policy + // applies to. For more information, see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // in the Amazon Web Services General Reference. + ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` + + // The configuration for the step scaling policy. This parameter is required + // if you are creating a new policy. For more information, see StepScalingPolicyConfiguration + // and StepAdjustment. + StepScalingPolicyConfiguration *StepScalingPolicyConfiguration `type:"structure"` +} + +// String returns the string representation +func (s PutScalingPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutScalingPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutScalingPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutScalingPolicyInput"} + if s.PolicyName == nil { + invalidParams.Add(request.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) + } + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) + } + if s.ScalableDimension == nil { + invalidParams.Add(request.NewErrParamRequired("ScalableDimension")) + } + if s.ServiceNamespace == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceNamespace")) + } + if s.StepScalingPolicyConfiguration != nil { + if err := s.StepScalingPolicyConfiguration.Validate(); err != nil { + invalidParams.AddNested("StepScalingPolicyConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type PutScalingPolicyOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the resulting scaling policy. + PolicyARN *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutScalingPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutScalingPolicyOutput) GoString() string { + return s.String() +} + +type RegisterScalableTargetInput struct { + _ struct{} `type:"structure"` + + // The maximum value for this scalable target to scale out to in response to + // scaling activities. This parameter is required if you are registering a new + // scalable target, and it is optional if you are updating an existing one. + MaxCapacity *int64 `type:"integer"` + + // The minimum value for this scalable target to scale in to in response to + // scaling activities. This parameter is required if you are registering a new + // scalable target, and it is optional if you are updating an existing one. + MinCapacity *int64 `type:"integer"` + + // The unique identifier string for the resource to associate with the scalable + // target. For Amazon ECS services, this value is the resource type, followed + // by the cluster name and service name, such as service/default/sample-webapp. + ResourceId *string `min:"1" type:"string" required:"true"` + + // The ARN of the IAM role that allows Application Auto Scaling to modify your + // scalable target on your behalf. This parameter is required if you are registering + // a new scalable target, and it is optional if you are updating an existing + // one. + RoleARN *string `min:"1" type:"string"` + + // The scalable dimension associated with the scalable target. The scalable + // dimension contains the service namespace, resource type, and scaling property, + // such as ecs:service:DesiredCount for the desired task count of an Amazon + // ECS service. + ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` + + // The namespace for the AWS service that the scalable target is associated + // with. For Amazon ECS services, the namespace value is ecs. For more information, + // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // in the Amazon Web Services General Reference. + ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` +} + +// String returns the string representation +func (s RegisterScalableTargetInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterScalableTargetInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RegisterScalableTargetInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RegisterScalableTargetInput"} + if s.ResourceId == nil { + invalidParams.Add(request.NewErrParamRequired("ResourceId")) + } + if s.ResourceId != nil && len(*s.ResourceId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) + } + if s.RoleARN != nil && len(*s.RoleARN) < 1 { + invalidParams.Add(request.NewErrParamMinLen("RoleARN", 1)) + } + if s.ScalableDimension == nil { + invalidParams.Add(request.NewErrParamRequired("ScalableDimension")) + } + if s.ServiceNamespace == nil { + invalidParams.Add(request.NewErrParamRequired("ServiceNamespace")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RegisterScalableTargetOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RegisterScalableTargetOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RegisterScalableTargetOutput) GoString() string { + return s.String() +} + +// An object representing a scalable target. +type ScalableTarget struct { + _ struct{} `type:"structure"` + + // The Unix timestamp for when the scalable target was created. + CreationTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // The maximum value for this scalable target to scale out to in response to + // scaling activities. + MaxCapacity *int64 `type:"integer" required:"true"` + + // The minimum value for this scalable target to scale in to in response to + // scaling activities. + MinCapacity *int64 `type:"integer" required:"true"` + + // The unique identifier string for the resource associated with the scalable + // target. For Amazon ECS services, this value is the resource type, followed + // by the cluster name and service name, such as service/default/sample-webapp. + ResourceId *string `min:"1" type:"string" required:"true"` + + // The ARN of the IAM role that allows Application Auto Scaling to modify your + // scalable target on your behalf. + RoleARN *string `min:"1" type:"string" required:"true"` + + // The scalable dimension associated with the scalable target. The scalable + // dimension contains the service namespace, resource type, and scaling property, + // such as ecs:service:DesiredCount for the desired task count of an Amazon + // ECS service. + ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` + + // The namespace for the AWS service that the scalable target is associated + // with. For more information, see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // in the Amazon Web Services General Reference. + ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` +} + +// String returns the string representation +func (s ScalableTarget) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScalableTarget) GoString() string { + return s.String() +} + +// An object representing a scaling activity. +type ScalingActivity struct { + _ struct{} `type:"structure"` + + // The unique identifier string for the scaling activity. + ActivityId *string `type:"string" required:"true"` + + // A simple description of what caused the scaling activity to happen. + Cause *string `type:"string" required:"true"` + + // A simple description of what action the scaling activity intends to accomplish. + Description *string `type:"string" required:"true"` + + // The details about the scaling activity. + Details *string `type:"string"` + + // The Unix timestamp for when the scaling activity ended. + EndTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The unique identifier string for the resource associated with the scaling + // activity. For Amazon ECS services, this value is the resource type, followed + // by the cluster name and service name, such as service/default/sample-webapp. + ResourceId *string `min:"1" type:"string" required:"true"` + + // The scalable dimension associated with the scaling activity. The scalable + // dimension contains the service namespace, resource type, and scaling property, + // such as ecs:service:DesiredCount for the desired task count of an Amazon + // ECS service. + ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` + + // The namespace for the AWS service that the scaling activity is associated + // with. For more information, see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // in the Amazon Web Services General Reference. + ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` + + // The Unix timestamp for when the scaling activity began. + StartTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // Indicates the status of the scaling activity. + StatusCode *string `type:"string" required:"true" enum:"ScalingActivityStatusCode"` + + // A simple message about the current status of the scaling activity. + StatusMessage *string `type:"string"` +} + +// String returns the string representation +func (s ScalingActivity) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScalingActivity) GoString() string { + return s.String() +} + +// An object representing a scaling policy. +type ScalingPolicy struct { + _ struct{} `type:"structure"` + + // The CloudWatch alarms that are associated with the scaling policy. + Alarms []*Alarm `type:"list"` + + // The Unix timestamp for when the scaling policy was created. + CreationTime *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` + + // The Amazon Resource Name (ARN) of the scaling policy. + PolicyARN *string `min:"1" type:"string" required:"true"` + + // The name of the scaling policy. + PolicyName *string `min:"1" type:"string" required:"true"` + + // The scaling policy type. + PolicyType *string `type:"string" required:"true" enum:"PolicyType"` + + // The unique identifier string for the resource associated with the scaling + // policy. For Amazon ECS services, this value is the resource type, followed + // by the cluster name and service name, such as service/default/sample-webapp. + ResourceId *string `min:"1" type:"string" required:"true"` + + // The scalable dimension associated with the scaling policy. The scalable dimension + // contains the service namespace, resource type, and scaling property, such + // as ecs:service:DesiredCount for the desired task count of an Amazon ECS service. + ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` + + // The namespace for the AWS service that the scaling policy is associated with. + // For more information, see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // in the Amazon Web Services General Reference. + ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` + + // The configuration for the step scaling policy. + StepScalingPolicyConfiguration *StepScalingPolicyConfiguration `type:"structure"` +} + +// String returns the string representation +func (s ScalingPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScalingPolicy) GoString() string { + return s.String() +} + +// An object representing a step adjustment for a StepScalingPolicyConfiguration. +// Describes an adjustment based on the difference between the value of the +// aggregated CloudWatch metric and the breach threshold that you've defined +// for the alarm. +// +// For the following examples, suppose that you have an alarm with a breach +// threshold of 50: +// +// If you want the adjustment to be triggered when the metric is greater +// than or equal to 50 and less than 60, specify a lower bound of 0 and an upper +// bound of 10. +// +// If you want the adjustment to be triggered when the metric is greater +// than 40 and less than or equal to 50, specify a lower bound of -10 and an +// upper bound of 0. +// +// There are a few rules for the step adjustments for your step policy: +// +// The ranges of your step adjustments can't overlap or have a gap. +// +// At most one step adjustment can have a null lower bound. If one step adjustment +// has a negative lower bound, then there must be a step adjustment with a null +// lower bound. +// +// At most one step adjustment can have a null upper bound. If one step adjustment +// has a positive upper bound, then there must be a step adjustment with a null +// upper bound. +// +// The upper and lower bound can't be null in the same step adjustment. +type StepAdjustment struct { + _ struct{} `type:"structure"` + + // The lower bound for the difference between the alarm threshold and the CloudWatch + // metric. If the metric value is above the breach threshold, the lower bound + // is inclusive (the metric must be greater than or equal to the threshold plus + // the lower bound). Otherwise, it is exclusive (the metric must be greater + // than the threshold plus the lower bound). A null value indicates negative + // infinity. + MetricIntervalLowerBound *float64 `type:"double"` + + // The upper bound for the difference between the alarm threshold and the CloudWatch + // metric. If the metric value is above the breach threshold, the upper bound + // is exclusive (the metric must be less than the threshold plus the upper bound). + // Otherwise, it is inclusive (the metric must be less than or equal to the + // threshold plus the upper bound). A null value indicates positive infinity. + // + // The upper bound must be greater than the lower bound. + MetricIntervalUpperBound *float64 `type:"double"` + + // The amount by which to scale, based on the specified adjustment type. A positive + // value adds to the current scalable dimension while a negative number removes + // from the current scalable dimension. + ScalingAdjustment *int64 `type:"integer" required:"true"` +} + +// String returns the string representation +func (s StepAdjustment) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StepAdjustment) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StepAdjustment) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StepAdjustment"} + if s.ScalingAdjustment == nil { + invalidParams.Add(request.NewErrParamRequired("ScalingAdjustment")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// An object representing a step scaling policy configuration. +type StepScalingPolicyConfiguration struct { + _ struct{} `type:"structure"` + + // The adjustment type, which specifies how the ScalingAdjustment parameter + // in a StepAdjustment is interpreted. + AdjustmentType *string `type:"string" enum:"AdjustmentType"` + + // The amount of time, in seconds, after a scaling activity completes where + // previous trigger-related scaling activities can influence future scaling + // events. + // + // For scale out policies, while Cooldown is in effect, the capacity that has + // been added by the previous scale out event that initiated the Cooldown is + // calculated as part of the desired capacity for the next scale out. The intention + // is to continuously (but not excessively) scale out. For example, an alarm + // triggers a step scaling policy to scale out an Amazon ECS service by 2 tasks, + // the scaling activity completes successfully, and a Cooldown period of 5 minutes + // starts. During the Cooldown period, if the alarm triggers the same policy + // again but at a more aggressive step adjustment to scale out the service by + // 3 tasks, the 2 tasks that were added in the previous scale out event are + // considered part of that capacity and only 1 additional task is added to the + // desired count. + // + // For scale in policies, the Cooldown period is used to block subsequent scale + // in requests until it has expired. The intention is to scale in conservatively + // to protect your application's availability. However, if another alarm triggers + // a scale out policy during the Cooldown period after a scale-in, Application + // Auto Scaling scales out your scalable target immediately. + Cooldown *int64 `type:"integer"` + + // The aggregation type for the CloudWatch metrics. Valid values are Minimum, + // Maximum, and Average. + MetricAggregationType *string `type:"string" enum:"MetricAggregationType"` + + // The minimum number to adjust your scalable dimension as a result of a scaling + // activity. If the adjustment type is PercentChangeInCapacity, the scaling + // policy changes the scalable dimension of the scalable target by this amount. + MinAdjustmentMagnitude *int64 `type:"integer"` + + // A set of adjustments that enable you to scale based on the size of the alarm + // breach. + StepAdjustments []*StepAdjustment `type:"list"` +} + +// String returns the string representation +func (s StepScalingPolicyConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StepScalingPolicyConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StepScalingPolicyConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StepScalingPolicyConfiguration"} + if s.StepAdjustments != nil { + for i, v := range s.StepAdjustments { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "StepAdjustments", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +const ( + // @enum AdjustmentType + AdjustmentTypeChangeInCapacity = "ChangeInCapacity" + // @enum AdjustmentType + AdjustmentTypePercentChangeInCapacity = "PercentChangeInCapacity" + // @enum AdjustmentType + AdjustmentTypeExactCapacity = "ExactCapacity" +) + +const ( + // @enum MetricAggregationType + MetricAggregationTypeAverage = "Average" + // @enum MetricAggregationType + MetricAggregationTypeMinimum = "Minimum" + // @enum MetricAggregationType + MetricAggregationTypeMaximum = "Maximum" +) + +const ( + // @enum PolicyType + PolicyTypeStepScaling = "StepScaling" +) + +const ( + // @enum ScalableDimension + ScalableDimensionEcsServiceDesiredCount = "ecs:service:DesiredCount" +) + +const ( + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodePending = "Pending" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeInProgress = "InProgress" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeSuccessful = "Successful" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeOverridden = "Overridden" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeUnfulfilled = "Unfulfilled" + // @enum ScalingActivityStatusCode + ScalingActivityStatusCodeFailed = "Failed" +) + +const ( + // @enum ServiceNamespace + ServiceNamespaceEcs = "ecs" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/service.go b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/service.go new file mode 100644 index 000000000..d6e797ff2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/service.go @@ -0,0 +1,112 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package applicationautoscaling + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" +) + +// Application Auto Scaling is a general purpose Auto Scaling service for supported +// elastic AWS resources. With Application Auto Scaling, you can automatically +// scale your AWS resources, with an experience similar to that of Auto Scaling. +// +// At this time, Application Auto Scaling only supports scaling Amazon ECS +// services. +// +// For example, you can use Application Auto Scaling to accomplish the following +// tasks: +// +// Define scaling policies for automatically adjusting your application’s +// resources +// +// Scale your resources in response to CloudWatch alarms +// +// View history of your scaling events +// +// Application Auto Scaling is available in the following regions: +// +// us-east-1 +// +// us-west-2 +// +// eu-west-1 +//The service client's operations are safe to be used concurrently. +// It is not safe to mutate any of the client's properties though. +type ApplicationAutoScaling struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// A ServiceName is the name of the service the client will make API calls to. +const ServiceName = "autoscaling" + +// New creates a new instance of the ApplicationAutoScaling client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a ApplicationAutoScaling client from just a session. +// svc := applicationautoscaling.New(mySession) +// +// // Create a ApplicationAutoScaling client with additional configuration +// svc := applicationautoscaling.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *ApplicationAutoScaling { + c := p.ClientConfig(ServiceName, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *ApplicationAutoScaling { + svc := &ApplicationAutoScaling{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningName: "application-autoscaling", + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2016-02-06", + JSONVersion: "1.1", + TargetPrefix: "AnyScaleFrontendService", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a ApplicationAutoScaling operation and runs any +// custom request initialization. +func (c *ApplicationAutoScaling) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/vendor.json b/vendor/vendor.json index d50287eaa..db04e37e7 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -532,6 +532,12 @@ "version": "v1.2.7", "versionExact": "v1.2.7" }, + { + "checksumSHA1": "Td30Frd+lrCLlkMAirUTbjBXq5Q=", + "path": "github.com/aws/aws-sdk-go/service/applicationautoscaling", + "revision": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6", + "revisionTime": "2016-07-08T00:08:20Z" + }, { "checksumSHA1": "AUA6op9dlm0X4vv1YPFnIFs6404=", "comment": "v1.1.23", From 76aea014cc55956722b0f9ffc54f129d62ebdb64 Mon Sep 17 00:00:00 2001 From: stack72 Date: Tue, 26 Jul 2016 10:42:21 +0100 Subject: [PATCH 0409/1238] provider/aws: Making some slight changes to the tests in resources --- ...resource_aws_appautoscaling_policy_test.go | 3 +- ...resource_aws_appautoscaling_target_test.go | 34 +++++++------------ 2 files changed, 14 insertions(+), 23 deletions(-) diff --git a/builtin/providers/aws/resource_aws_appautoscaling_policy_test.go b/builtin/providers/aws/resource_aws_appautoscaling_policy_test.go index 62d6d777d..0b5b0888d 100644 --- a/builtin/providers/aws/resource_aws_appautoscaling_policy_test.go +++ b/builtin/providers/aws/resource_aws_appautoscaling_policy_test.go @@ -11,11 +11,10 @@ import ( "github.com/hashicorp/terraform/terraform" ) -func TestAccAWSAppautoscalingPolicy_basic(t *testing.T) { +func TestAccAWSAppautoScalingPolicy_basic(t *testing.T) { var policy applicationautoscaling.ScalingPolicy randClusterName := fmt.Sprintf("cluster%s", acctest.RandString(10)) - // randResourceId := fmt.Sprintf("service/%s/%s", randClusterName, acctest.RandString(10)) randPolicyName := fmt.Sprintf("terraform-test-foobar-%s", acctest.RandString(5)) resource.Test(t, resource.TestCase{ diff --git a/builtin/providers/aws/resource_aws_appautoscaling_target_test.go b/builtin/providers/aws/resource_aws_appautoscaling_target_test.go index 185751c63..e3796e34e 100644 --- a/builtin/providers/aws/resource_aws_appautoscaling_target_test.go +++ b/builtin/providers/aws/resource_aws_appautoscaling_target_test.go @@ -16,7 +16,6 @@ func TestAccAWSAppautoScalingTarget_basic(t *testing.T) { var target applicationautoscaling.ScalableTarget randClusterName := fmt.Sprintf("cluster-%s", acctest.RandString(10)) - randResourceId := fmt.Sprintf("service/%s/%s", randClusterName, acctest.RandString(10)) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -25,12 +24,10 @@ func TestAccAWSAppautoScalingTarget_basic(t *testing.T) { CheckDestroy: testAccCheckAWSAppautoscalingTargetDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccAWSAppautoscalingTargetConfig(randClusterName, randResourceId), + Config: testAccAWSAppautoscalingTargetConfig(randClusterName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSAppautoscalingTargetExists("aws_appautoscaling_target.bar", &target), - testAccCheckAWSAppautoscalingTargetAttributes(&target, randResourceId), resource.TestCheckResourceAttr("aws_appautoscaling_target.bar", "service_namespace", "ecs"), - resource.TestCheckResourceAttr("aws_appautoscaling_target.bar", "resource_id", fmt.Sprintf("service/%s/foobar", randClusterName)), resource.TestCheckResourceAttr("aws_appautoscaling_target.bar", "scalable_dimension", "ecs:service:DesiredCount"), resource.TestCheckResourceAttr("aws_appautoscaling_target.bar", "min_capacity", "1"), resource.TestCheckResourceAttr("aws_appautoscaling_target.bar", "max_capacity", "3"), @@ -38,11 +35,11 @@ func TestAccAWSAppautoScalingTarget_basic(t *testing.T) { }, resource.TestStep{ - Config: testAccAWSAppautoscalingTargetConfigUpdate(randClusterName, randResourceId), + Config: testAccAWSAppautoscalingTargetConfigUpdate(randClusterName), Check: resource.ComposeTestCheckFunc( testAccCheckAWSAppautoscalingTargetExists("aws_appautoscaling_target.bar", &target), - resource.TestCheckResourceAttr("aws_appautoscaling_target.bar", "min_capacity", "3"), - resource.TestCheckResourceAttr("aws_appautoscaling_target.bar", "max_capacity", "6"), + resource.TestCheckResourceAttr("aws_appautoscaling_target.bar", "min_capacity", "2"), + resource.TestCheckResourceAttr("aws_appautoscaling_target.bar", "max_capacity", "8"), ), }, }, @@ -60,7 +57,8 @@ func testAccCheckAWSAppautoscalingTargetDestroy(s *terraform.State) error { // Try to find the target describeTargets, err := conn.DescribeScalableTargets( &applicationautoscaling.DescribeScalableTargetsInput{ - ResourceIds: []*string{aws.String(rs.Primary.ID)}, + ResourceIds: []*string{aws.String(rs.Primary.ID)}, + ServiceNamespace: aws.String(rs.Primary.Attributes["service_namespace"]), }, ) @@ -99,7 +97,8 @@ func testAccCheckAWSAppautoscalingTargetExists(n string, target *applicationauto describeTargets, err := conn.DescribeScalableTargets( &applicationautoscaling.DescribeScalableTargetsInput{ - ResourceIds: []*string{aws.String(rs.Primary.ID)}, + ResourceIds: []*string{aws.String(rs.Primary.ID)}, + ServiceNamespace: aws.String(rs.Primary.Attributes["service_namespace"]), }, ) @@ -107,24 +106,18 @@ func testAccCheckAWSAppautoscalingTargetExists(n string, target *applicationauto return err } - if len(describeTargets.ScalableTargets) != 1 || - *describeTargets.ScalableTargets[0].ResourceId != rs.Primary.ID { + if len(describeTargets.ScalableTargets) != 1 || *describeTargets.ScalableTargets[0].ResourceId != rs.Primary.ID { return fmt.Errorf("Application AutoScaling ResourceId not found") } - *target = *describeTargets.ScalableTargets[0] + target = describeTargets.ScalableTargets[0] return nil } } -func testAccCheckAWSAppautoscalingTargetAttributes(target *applicationautoscaling.ScalableTarget, resourceId string) resource.TestCheckFunc { - return nil -} - func testAccAWSAppautoscalingTargetConfig( - randClusterName string, - randResourceId string) string { + randClusterName string) string { return fmt.Sprintf(` resource "aws_iam_role" "autoscale_role" { name = "autoscalerole%s" @@ -210,14 +203,13 @@ resource "aws_appautoscaling_target" "bar" { scalable_dimension = "ecs:service:DesiredCount" role_arn = "${aws_iam_role.autoscale_role.arn}" min_capacity = 1 - max_capacity = 4 + max_capacity = 3 } `, randClusterName, randClusterName, randClusterName) } func testAccAWSAppautoscalingTargetConfigUpdate( - randClusterName, - randResourceId string) string { + randClusterName string) string { return fmt.Sprintf(` resource "aws_iam_role" "autoscale_role" { name = "autoscalerole%s" From 0c9669b6148d3c3f31ec52d61135c900dc9bf05e Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Tue, 26 Jul 2016 10:46:17 +0100 Subject: [PATCH 0410/1238] Update CHANGELOG.md --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 11df71aee..00f2bd953 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -65,6 +65,8 @@ FEATURES: * **New Resource:** `aws_opsworks_user_profile` [GH-6304] * **New Resource:** `aws_opsworks_permission` [GH-6304] * **New Resource:** `aws_ami_launch_permission` [GH-7365] + * **New Resource:** `aws_appautoscaling_policy` [GH-7663] + * **New Resource:** `aws_appautoscaling_target` [GH-7663] * **New Resource:** `openstack_blockstorage_volume_v2` [GH-6693] * **New Resource:** `openstack_lb_loadbalancer_v2` [GH-7012] * **New Resource:** `openstack_lb_listener_v2` [GH-7012] From 8f48a4106f308390d9f7863b56b2f430975ffadf Mon Sep 17 00:00:00 2001 From: stack72 Date: Tue, 26 Jul 2016 10:50:17 +0100 Subject: [PATCH 0411/1238] docs/aws: Add the App Autoscaling Resources to the nav bar in a section of their own --- website/source/layouts/aws.erb | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb index 8ef7ebbf0..c4765aafe 100644 --- a/website/source/layouts/aws.erb +++ b/website/source/layouts/aws.erb @@ -70,6 +70,20 @@ + > + App Autoscaling Resources + + + > CloudFormation Resources From da437303970bc3821e4df480f263bd6dd38e5058 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Tue, 26 Jul 2016 19:20:42 +0100 Subject: [PATCH 0423/1238] Update CHANGELOG.md --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e41713023..098561e07 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -87,6 +87,8 @@ FEATURES: * **New Resource:** `consul_catalog_entry` [GH-7508] * **New Resource:** `consul_node` [GH-7508] * **New Resource:** `consul_service` [GH-7508] + * **New Resource:** `mysql_grant` [GH-7656] + * **New Resource:** `mysql_user` [GH-7656] * core: Tainted resources now show up in the plan and respect dependency ordering [GH-6600] * core: The `lookup` interpolation function can now have a default fall-back value specified [GH-6884] * core: The `terraform plan` command no longer persists state. [GH-6811] From 6a42717f55c17e5d62506f8c8e545f580c3d183f Mon Sep 17 00:00:00 2001 From: clint shryock Date: Tue, 26 Jul 2016 14:41:54 -0500 Subject: [PATCH 0424/1238] update sqs test to fail on reverting to defaults --- .../aws/resource_aws_sqs_queue_test.go | 23 ++++++++++++------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/builtin/providers/aws/resource_aws_sqs_queue_test.go b/builtin/providers/aws/resource_aws_sqs_queue_test.go index 5fa2b5b72..a5e06ae14 100644 --- a/builtin/providers/aws/resource_aws_sqs_queue_test.go +++ b/builtin/providers/aws/resource_aws_sqs_queue_test.go @@ -22,13 +22,19 @@ func TestAccAWSSQSQueue_basic(t *testing.T) { resource.TestStep{ Config: testAccAWSSQSConfigWithDefaults(queueName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSQSExistsWithDefaults("aws_sqs_queue.queue-with-defaults"), + testAccCheckAWSSQSExistsWithDefaults("aws_sqs_queue.queue"), ), }, resource.TestStep{ - Config: testAccAWSSQSConfigWithOverrides, + Config: testAccAWSSQSConfigWithOverrides(queueName), Check: resource.ComposeTestCheckFunc( - testAccCheckAWSSQSExistsWithOverrides("aws_sqs_queue.queue-with-overrides"), + testAccCheckAWSSQSExistsWithOverrides("aws_sqs_queue.queue"), + ), + }, + resource.TestStep{ + Config: testAccAWSSQSConfigWithDefaults(queueName), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSQSExistsWithDefaults("aws_sqs_queue.queue"), ), }, }, @@ -197,22 +203,23 @@ func testAccCheckAWSSQSExistsWithOverrides(n string) resource.TestCheckFunc { func testAccAWSSQSConfigWithDefaults(r string) string { return fmt.Sprintf(` -resource "aws_sqs_queue" "queue-with-defaults" { +resource "aws_sqs_queue" "queue" { name = "%s" } `, r) } -const testAccAWSSQSConfigWithOverrides = ` -resource "aws_sqs_queue" "queue-with-overrides" { - name = "test-sqs-queue-with-overrides" +func testAccAWSSQSConfigWithOverrides(r string) string { + return fmt.Sprintf(` +resource "aws_sqs_queue" "queue" { + name = "%s" delay_seconds = 90 max_message_size = 2048 message_retention_seconds = 86400 receive_wait_time_seconds = 10 visibility_timeout_seconds = 60 +}`, r) } -` func testAccAWSSQSConfigWithRedrive(name string) string { return fmt.Sprintf(` From 348f6bad50a67e9411e1034d9e96232d7349f571 Mon Sep 17 00:00:00 2001 From: clint shryock Date: Tue, 26 Jul 2016 14:53:45 -0500 Subject: [PATCH 0425/1238] provider/aws: Apply defaults to SQS Queues --- builtin/providers/aws/resource_aws_sqs_queue.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/builtin/providers/aws/resource_aws_sqs_queue.go b/builtin/providers/aws/resource_aws_sqs_queue.go index 02eab6d58..cb76fbb45 100644 --- a/builtin/providers/aws/resource_aws_sqs_queue.go +++ b/builtin/providers/aws/resource_aws_sqs_queue.go @@ -50,22 +50,22 @@ func resourceAwsSqsQueue() *schema.Resource { "delay_seconds": &schema.Schema{ Type: schema.TypeInt, Optional: true, - Computed: true, + Default: 0, }, "max_message_size": &schema.Schema{ Type: schema.TypeInt, Optional: true, - Computed: true, + Default: 262144, }, "message_retention_seconds": &schema.Schema{ Type: schema.TypeInt, Optional: true, - Computed: true, + Default: 345600, }, "receive_wait_time_seconds": &schema.Schema{ Type: schema.TypeInt, Optional: true, - Computed: true, + Default: 0, }, "visibility_timeout_seconds": &schema.Schema{ Type: schema.TypeInt, From ff9ad3cc401a7747f05976732e12aa7a636c0646 Mon Sep 17 00:00:00 2001 From: Clint Date: Tue, 26 Jul 2016 15:04:03 -0500 Subject: [PATCH 0426/1238] provider/aws: Fix issue updating ElasticBeanstalk Environment templates (#7811) * provider/aws: Fix issue removing templates from ElasticBeanstalk * regression test --- ...ource_aws_elastic_beanstalk_environment.go | 8 +- ..._aws_elastic_beanstalk_environment_test.go | 94 +++++++++++++++++++ 2 files changed, 100 insertions(+), 2 deletions(-) diff --git a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go b/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go index b94494778..cf1748733 100644 --- a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go +++ b/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go @@ -311,7 +311,9 @@ func resourceAwsElasticBeanstalkEnvironmentUpdate(d *schema.ResourceData, meta i if d.HasChange("solution_stack_name") { hasChange = true - updateOpts.SolutionStackName = aws.String(d.Get("solution_stack_name").(string)) + if v, ok := d.GetOk("solution_stack_name"); ok { + updateOpts.SolutionStackName = aws.String(v.(string)) + } } if d.HasChange("setting") { @@ -332,7 +334,9 @@ func resourceAwsElasticBeanstalkEnvironmentUpdate(d *schema.ResourceData, meta i if d.HasChange("template_name") { hasChange = true - updateOpts.TemplateName = aws.String(d.Get("template_name").(string)) + if v, ok := d.GetOk("template_name"); ok { + updateOpts.TemplateName = aws.String(v.(string)) + } } if hasChange { diff --git a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment_test.go b/builtin/providers/aws/resource_aws_elastic_beanstalk_environment_test.go index 4d9481791..ab4744351 100644 --- a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment_test.go +++ b/builtin/providers/aws/resource_aws_elastic_beanstalk_environment_test.go @@ -178,6 +178,40 @@ func TestAccAWSBeanstalkEnv_vpc(t *testing.T) { }) } +func TestAccAWSBeanstalkEnv_template_change(t *testing.T) { + var app elasticbeanstalk.EnvironmentDescription + + rInt := acctest.RandInt() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { + testAccPreCheck(t) + }, + Providers: testAccProviders, + CheckDestroy: testAccCheckBeanstalkEnvDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccBeanstalkEnv_TemplateChange_stack(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckBeanstalkEnvExists("aws_elastic_beanstalk_environment.environment", &app), + ), + }, + resource.TestStep{ + Config: testAccBeanstalkEnv_TemplateChange_temp(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckBeanstalkEnvExists("aws_elastic_beanstalk_environment.environment", &app), + ), + }, + resource.TestStep{ + Config: testAccBeanstalkEnv_TemplateChange_stack(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckBeanstalkEnvExists("aws_elastic_beanstalk_environment.environment", &app), + ), + }, + }, + }) +} + func testAccCheckBeanstalkEnvDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).elasticbeanstalkconn @@ -571,3 +605,63 @@ resource "aws_elastic_beanstalk_environment" "default" { } `, name) } + +func testAccBeanstalkEnv_TemplateChange_stack(r int) string { + return fmt.Sprintf(` +provider "aws" { + region = "us-east-1" +} + +resource "aws_elastic_beanstalk_application" "app" { + name = "beanstalk-app-%d" + description = "" +} + +resource "aws_elastic_beanstalk_environment" "environment" { + name = "beanstalk-env-%d" + application = "${aws_elastic_beanstalk_application.app.name}" + + # Go 1.4 + + solution_stack_name = "64bit Amazon Linux 2016.03 v2.1.0 running Go 1.4" +} + +resource "aws_elastic_beanstalk_configuration_template" "template" { + name = "beanstalk-config-%d" + application = "${aws_elastic_beanstalk_application.app.name}" + + # Go 1.5 + solution_stack_name = "64bit Amazon Linux 2016.03 v2.1.3 running Go 1.5" +} +`, r, r, r) +} + +func testAccBeanstalkEnv_TemplateChange_temp(r int) string { + return fmt.Sprintf(` +provider "aws" { + region = "us-east-1" +} + +resource "aws_elastic_beanstalk_application" "app" { + name = "beanstalk-app-%d" + description = "" +} + +resource "aws_elastic_beanstalk_environment" "environment" { + name = "beanstalk-env-%d" + application = "${aws_elastic_beanstalk_application.app.name}" + + # Go 1.4 + + template_name = "${aws_elastic_beanstalk_configuration_template.template.name}" +} + +resource "aws_elastic_beanstalk_configuration_template" "template" { + name = "beanstalk-config-%d" + application = "${aws_elastic_beanstalk_application.app.name}" + + # Go 1.5 + solution_stack_name = "64bit Amazon Linux 2016.03 v2.1.3 running Go 1.5" +} +`, r, r, r) +} From 7d580625daeb19aa317447f889be02aa831b494c Mon Sep 17 00:00:00 2001 From: Clint Date: Tue, 26 Jul 2016 15:04:41 -0500 Subject: [PATCH 0427/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 098561e07..826b6ccf1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -238,6 +238,7 @@ BUG FIXES: * provider/aws: Fix bug with Updating `aws_autoscaling_group` `enabled_metrics` [GH-7698] * provider/aws: Ignore IOPS on non io1 AWS root_block_device [GH-7783] * provider/aws: Ignore missing ENI attachment when trying to detach ENI [GH-7185] + * provider/aws: Fix issue updating ElasticBeanstalk Environment templates [GH-7811] * provider/azurerm: Fixes terraform crash when using SSH keys with `azurerm_virtual_machine` [GH-6766] * provider/azurerm: Fix a bug causing 'diffs do not match' on `azurerm_network_interface` resources [GH-6790] * provider/azurerm: Normalizes `availability_set_id` casing to avoid spurious diffs in `azurerm_virtual_machine` [GH-6768] From 681d94ae207b0ff780642d68e98a518eb27eefce Mon Sep 17 00:00:00 2001 From: James Nugent Date: Wed, 20 Jul 2016 20:38:26 -0500 Subject: [PATCH 0428/1238] core: Allow lists and maps as variable overrides Terraform 0.7 introduces lists and maps as first-class values for variables, in addition to string values which were previously available. However, there was previously no way to override the default value of a list or map, and the functionality for overriding specific map keys was broken. Using the environment variable method for setting variable values, there was previously no way to give a variable a value of a list or map. These now support HCL for individual values - specifying: TF_VAR_test='["Hello", "World"]' will set the variable `test` to a two-element list containing "Hello" and "World". Specifying TF_VAR_test_map='{"Hello = "World", "Foo" = "bar"}' will set the variable `test_map` to a two-element map with keys "Hello" and "Foo", and values "World" and "bar" respectively. The same logic is applied to `-var` flags, and the file parsed by `-var-files` ("autoVariables"). Note that care must be taken to not run into shell expansion for `-var-` flags and environment variables. We also merge map keys where appropriate. The override syntax has changed (to be noted in CHANGELOG as a breaking change), so several tests needed their syntax updating from the old `amis.us-east-1 = "newValue"` style to `amis = "{ "us-east-1" = "newValue"}"` style as defined in TF-002. In order to continue supporting the `-var "foo=bar"` type of variable flag (which is not valid HCL), a special case error is checked after HCL parsing fails, and the old code path runs instead. --- command/flag_kv.go | 89 +++++++++- command/flag_kv_test.go | 101 ++++++++++-- command/init.go | 2 +- command/meta.go | 6 +- command/remote_config.go | 2 +- terraform/context.go | 156 ++++++++++++++++-- terraform/context_apply_test.go | 29 +++- terraform/context_input_test.go | 8 +- terraform/semantics.go | 38 ++++- terraform/terraform_test.go | 4 + .../test-fixtures/apply-vars-env/main.tf | 17 +- terraform/test-fixtures/apply-vars/main.tf | 10 ++ 12 files changed, 409 insertions(+), 53 deletions(-) diff --git a/command/flag_kv.go b/command/flag_kv.go index 492cc66be..b39aa1cd8 100644 --- a/command/flag_kv.go +++ b/command/flag_kv.go @@ -3,21 +3,46 @@ package command import ( "fmt" "io/ioutil" + "regexp" "strings" "github.com/hashicorp/hcl" "github.com/mitchellh/go-homedir" ) -// FlagKV is a flag.Value implementation for parsing user variables -// from the command-line in the format of '-var key=value'. -type FlagKV map[string]string +// FlagTypedKVis a flag.Value implementation for parsing user variables +// from the command-line in the format of '-var key=value', where value is +// a type intended for use as a Terraform variable +type FlagTypedKV map[string]interface{} -func (v *FlagKV) String() string { +func (v *FlagTypedKV) String() string { return "" } -func (v *FlagKV) Set(raw string) error { +func (v *FlagTypedKV) Set(raw string) error { + key, value, err := parseVarFlagAsHCL(raw) + if err != nil { + return err + } + + if *v == nil { + *v = make(map[string]interface{}) + } + + (*v)[key] = value + return nil +} + +// FlagStringKV is a flag.Value implementation for parsing user variables +// from the command-line in the format of '-var key=value', where value is +// only ever a primitive. +type FlagStringKV map[string]string + +func (v *FlagStringKV) String() string { + return "" +} + +func (v *FlagStringKV) Set(raw string) error { idx := strings.Index(raw, "=") if idx == -1 { return fmt.Errorf("No '=' value in arg: %s", raw) @@ -34,7 +59,7 @@ func (v *FlagKV) Set(raw string) error { // FlagKVFile is a flag.Value implementation for parsing user variables // from the command line in the form of files. i.e. '-var-file=foo' -type FlagKVFile map[string]string +type FlagKVFile map[string]interface{} func (v *FlagKVFile) String() string { return "" @@ -47,7 +72,7 @@ func (v *FlagKVFile) Set(raw string) error { } if *v == nil { - *v = make(map[string]string) + *v = make(map[string]interface{}) } for key, value := range vs { @@ -57,7 +82,7 @@ func (v *FlagKVFile) Set(raw string) error { return nil } -func loadKVFile(rawPath string) (map[string]string, error) { +func loadKVFile(rawPath string) (map[string]interface{}, error) { path, err := homedir.Expand(rawPath) if err != nil { return nil, fmt.Errorf( @@ -78,7 +103,7 @@ func loadKVFile(rawPath string) (map[string]string, error) { "Error parsing %s: %s", path, err) } - var result map[string]string + var result map[string]interface{} if err := hcl.DecodeObject(&result, obj); err != nil { return nil, fmt.Errorf( "Error decoding Terraform vars file: %s\n\n"+ @@ -103,3 +128,49 @@ func (v *FlagStringSlice) Set(raw string) error { return nil } + +// parseVarFlagAsHCL parses the value of a single variable as would have been specified +// on the command line via -var or in an environment variable named TF_VAR_x, where x is +// the name of the variable. In order to get around the restriction of HCL requiring a +// top level object, we prepend a sentinel key, decode the user-specified value as its +// value and pull the value back out of the resulting map. +func parseVarFlagAsHCL(input string) (string, interface{}, error) { + idx := strings.Index(input, "=") + if idx == -1 { + return "", nil, fmt.Errorf("No '=' value in variable: %s", input) + } + probablyName := input[0:idx] + + parsed, err := hcl.Parse(input) + if err != nil { + // This covers flags of the form `foo=bar` which is not valid HCL + // At this point, probablyName is actually the name, and the remainder + // of the expression after the equals sign is the value. + if regexp.MustCompile(`Unknown token: \d+:\d+ IDENT`).Match([]byte(err.Error())) { + value := input[idx+1:] + return probablyName, value, nil + } + return "", nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL: %s", probablyName, input, err) + } + + var decoded map[string]interface{} + if hcl.DecodeObject(&decoded, parsed); err != nil { + return "", nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL: %s", probablyName, input, err) + } + + // Cover cases such as key= + if len(decoded) == 0 { + return probablyName, "", nil + } + + if len(decoded) > 1 { + return "", nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. Only one value may be specified.", probablyName, input) + } + + for k, v := range decoded { + return k, v, nil + } + + // Should be unreachable + return "", nil, fmt.Errorf("No value for variable: %s", input) +} diff --git a/command/flag_kv_test.go b/command/flag_kv_test.go index 07b70d022..000915e61 100644 --- a/command/flag_kv_test.go +++ b/command/flag_kv_test.go @@ -2,16 +2,17 @@ package command import ( "flag" + "github.com/davecgh/go-spew/spew" "io/ioutil" "reflect" "testing" ) -func TestFlagKV_impl(t *testing.T) { - var _ flag.Value = new(FlagKV) +func TestFlagStringKV_impl(t *testing.T) { + var _ flag.Value = new(FlagStringKV) } -func TestFlagKV(t *testing.T) { +func TestFlagStringKV(t *testing.T) { cases := []struct { Input string Output map[string]string @@ -49,10 +50,10 @@ func TestFlagKV(t *testing.T) { } for _, tc := range cases { - f := new(FlagKV) + f := new(FlagStringKV) err := f.Set(tc.Input) if err != nil != tc.Error { - t.Fatalf("bad error. Input: %#v", tc.Input) + t.Fatalf("bad error. Input: %#v\n\nError: %s", tc.Input, err) } actual := map[string]string(*f) @@ -62,6 +63,86 @@ func TestFlagKV(t *testing.T) { } } +func TestFlagTypedKV_impl(t *testing.T) { + var _ flag.Value = new(FlagTypedKV) +} + +func TestFlagTypedKV(t *testing.T) { + cases := []struct { + Input string + Output map[string]interface{} + Error bool + }{ + { + "key=value", + map[string]interface{}{"key": "value"}, + false, + }, + + { + "key=", + map[string]interface{}{"key": ""}, + false, + }, + + { + "key=foo=bar", + map[string]interface{}{"key": "foo=bar"}, + false, + }, + + { + "map.key=foo", + map[string]interface{}{"map.key": "foo"}, + false, + }, + + { + "key", + nil, + true, + }, + + { + `key=["hello", "world"]`, + map[string]interface{}{"key": []interface{}{"hello", "world"}}, + false, + }, + + { + `key={"hello" = "world", "foo" = "bar"}`, + map[string]interface{}{ + "key": []map[string]interface{}{ + map[string]interface{}{ + "hello": "world", + "foo": "bar", + }, + }, + }, + false, + }, + + { + `key={"hello" = "world", "foo" = "bar"}\nkey2="invalid"`, + nil, + true, + }, + } + + for _, tc := range cases { + f := new(FlagTypedKV) + err := f.Set(tc.Input) + if err != nil != tc.Error { + t.Fatalf("bad error. Input: %#v\n\nError: %s", tc.Input, err) + } + + actual := map[string]interface{}(*f) + if !reflect.DeepEqual(actual, tc.Output) { + t.Fatalf("bad:\nexpected: %s\n\n got: %s\n", spew.Sdump(tc.Output), spew.Sdump(actual)) + } + } +} + func TestFlagKVFile_impl(t *testing.T) { var _ flag.Value = new(FlagKVFile) } @@ -76,24 +157,24 @@ foo = "bar" cases := []struct { Input string - Output map[string]string + Output map[string]interface{} Error bool }{ { inputLibucl, - map[string]string{"foo": "bar"}, + map[string]interface{}{"foo": "bar"}, false, }, { inputJson, - map[string]string{"foo": "bar"}, + map[string]interface{}{"foo": "bar"}, false, }, { `map.key = "foo"`, - map[string]string{"map.key": "foo"}, + map[string]interface{}{"map.key": "foo"}, false, }, } @@ -111,7 +192,7 @@ foo = "bar" t.Fatalf("bad error. Input: %#v, err: %s", tc.Input, err) } - actual := map[string]string(*f) + actual := map[string]interface{}(*f) if !reflect.DeepEqual(actual, tc.Output) { t.Fatalf("bad: %#v", actual) } diff --git a/command/init.go b/command/init.go index bcc339bee..c4026d48d 100644 --- a/command/init.go +++ b/command/init.go @@ -25,7 +25,7 @@ func (c *InitCommand) Run(args []string) int { remoteConfig := make(map[string]string) cmdFlags := flag.NewFlagSet("init", flag.ContinueOnError) cmdFlags.StringVar(&remoteBackend, "backend", "", "") - cmdFlags.Var((*FlagKV)(&remoteConfig), "backend-config", "config") + cmdFlags.Var((*FlagStringKV)(&remoteConfig), "backend-config", "config") cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } if err := cmdFlags.Parse(args); err != nil { return 1 diff --git a/command/meta.go b/command/meta.go index 69f3e3e5c..5adf1b667 100644 --- a/command/meta.go +++ b/command/meta.go @@ -36,9 +36,9 @@ type Meta struct { // Variables for the context (private) autoKey string - autoVariables map[string]string + autoVariables map[string]interface{} input bool - variables map[string]string + variables map[string]interface{} // Targets for this context (private) targets []string @@ -315,7 +315,7 @@ func (m *Meta) contextOpts() *terraform.ContextOpts { func (m *Meta) flagSet(n string) *flag.FlagSet { f := flag.NewFlagSet(n, flag.ContinueOnError) f.BoolVar(&m.input, "input", true, "input") - f.Var((*FlagKV)(&m.variables), "var", "variables") + f.Var((*FlagTypedKV)(&m.variables), "var", "variables") f.Var((*FlagKVFile)(&m.variables), "var-file", "variable file") f.Var((*FlagStringSlice)(&m.targets), "target", "resource to target") diff --git a/command/remote_config.go b/command/remote_config.go index 6f53d0dbb..7f4a00b81 100644 --- a/command/remote_config.go +++ b/command/remote_config.go @@ -38,7 +38,7 @@ func (c *RemoteConfigCommand) Run(args []string) int { cmdFlags.StringVar(&c.conf.statePath, "state", DefaultStateFilename, "path") cmdFlags.StringVar(&c.conf.backupPath, "backup", "", "path") cmdFlags.StringVar(&c.remoteConf.Type, "backend", "atlas", "") - cmdFlags.Var((*FlagKV)(&config), "backend-config", "config") + cmdFlags.Var((*FlagStringKV)(&config), "backend-config", "config") cmdFlags.Usage = func() { c.Ui.Error(c.Help()) } if err := cmdFlags.Parse(args); err != nil { c.Ui.Error(fmt.Sprintf("\nError parsing CLI flags: %s", err)) diff --git a/terraform/context.go b/terraform/context.go index f653aba5e..86d7e58ce 100644 --- a/terraform/context.go +++ b/terraform/context.go @@ -9,6 +9,7 @@ import ( "sync" "github.com/hashicorp/go-multierror" + "github.com/hashicorp/hcl" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config/module" ) @@ -119,24 +120,109 @@ func NewContext(opts *ContextOpts) (*Context, error) { par = 10 } - // Setup the variables. We first take the variables given to us. - // We then merge in the variables set in the environment. + // Set up the variables in the following sequence: + // 0 - Take default values from the configuration + // 1 - Take values from TF_VAR_x environment variables + // 2 - Take values specified in -var flags, overriding values + // set by environment variables if necessary. This includes + // values taken from -var-file in addition. variables := make(map[string]interface{}) - for _, v := range os.Environ() { - if !strings.HasPrefix(v, VarEnvPrefix) { - continue + + if opts.Module != nil { + for _, v := range opts.Module.Config().Variables { + if v.Default != nil { + if v.Type() == config.VariableTypeString { + // v.Default has already been parsed as HCL so there may be + // some stray ints in there + switch typedDefault := v.Default.(type) { + case string: + if typedDefault == "" { + continue + } + variables[v.Name] = typedDefault + case int, int64: + variables[v.Name] = fmt.Sprintf("%d", typedDefault) + case float32, float64: + variables[v.Name] = fmt.Sprintf("%f", typedDefault) + case bool: + variables[v.Name] = fmt.Sprintf("%t", typedDefault) + } + } else { + variables[v.Name] = v.Default + } + } } - // Strip off the prefix and get the value after the first "=" - idx := strings.Index(v, "=") - k := v[len(VarEnvPrefix):idx] - v = v[idx+1:] + for _, v := range os.Environ() { + if !strings.HasPrefix(v, VarEnvPrefix) { + continue + } - // Override the command-line set variable - variables[k] = v - } - for k, v := range opts.Variables { - variables[k] = v + // Strip off the prefix and get the value after the first "=" + idx := strings.Index(v, "=") + k := v[len(VarEnvPrefix):idx] + v = v[idx+1:] + + // Override the configuration-default values. Note that *not* finding the variable + // in configuration is OK, as we don't want to preclude people from having multiple + // sets of TF_VAR_whatever in their environment even if it is a little weird. + for _, schema := range opts.Module.Config().Variables { + if schema.Name == k { + varType := schema.Type() + varVal, err := parseVariableAsHCL(k, v, varType) + if err != nil { + return nil, err + } + switch varType { + case config.VariableTypeMap: + if existing, hasMap := variables[k]; !hasMap { + variables[k] = varVal + } else { + if existingMap, ok := existing.(map[string]interface{}); !ok { + panic(fmt.Sprintf("%s is not a map, this is a bug in Terraform.", k)) + } else { + if newMap, ok := varVal.(map[string]interface{}); ok { + for newKey, newVal := range newMap { + existingMap[newKey] = newVal + } + } else { + panic(fmt.Sprintf("%s is not a map, this is a bug in Terraform.", k)) + } + } + } + default: + variables[k] = varVal + } + } + } + } + + for k, v := range opts.Variables { + for _, schema := range opts.Module.Config().Variables { + if schema.Name == k { + switch schema.Type() { + case config.VariableTypeMap: + if existing, hasMap := variables[k]; !hasMap { + variables[k] = v + } else { + if existingMap, ok := existing.(map[string]interface{}); !ok { + panic(fmt.Sprintf("%s is not a map, this is a bug in Terraform.", k)) + } else { + if newMap, ok := v.([]map[string]interface{}); ok { + for newKey, newVal := range newMap[0] { + existingMap[newKey] = newVal + } + } else { + panic(fmt.Sprintf("%s is not a map, this is a bug in Terraform.", k)) + } + } + } + default: + variables[k] = v + } + } + } + } } return &Context{ @@ -548,3 +634,45 @@ func (c *Context) walk( walker := &ContextGraphWalker{Context: c, Operation: operation} return walker, graph.Walk(walker) } + +// parseVariableAsHCL parses the value of a single variable as would have been specified +// on the command line via -var or in an environment variable named TF_VAR_x, where x is +// the name of the variable. In order to get around the restriction of HCL requiring a +// top level object, we prepend a sentinel key, decode the user-specified value as its +// value and pull the value back out of the resulting map. +func parseVariableAsHCL(name string, input interface{}, targetType config.VariableType) (interface{}, error) { + if targetType == config.VariableTypeString { + return input, nil + } + + const sentinelValue = "SENTINEL_TERRAFORM_VAR_OVERRIDE_KEY" + inputWithSentinal := fmt.Sprintf("%s = %s", sentinelValue, input) + + var decoded map[string]interface{} + err := hcl.Decode(&decoded, inputWithSentinal) + if err != nil { + return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL: %s", name, input, err) + } + + if len(decoded) != 1 { + return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. Only one value may be specified.", name, input) + } + + parsedValue, ok := decoded[sentinelValue] + if !ok { + return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. One value must be specified.", name, input) + } + + switch targetType { + case config.VariableTypeList: + return parsedValue, nil + case config.VariableTypeMap: + if list, ok := parsedValue.([]map[string]interface{}); ok { + return list[0], nil + } + + return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. One value must be specified.", name, input) + default: + panic(fmt.Errorf("unknown type %s", targetType)) + } +} diff --git a/terraform/context_apply_test.go b/terraform/context_apply_test.go index cc499a482..ce64ddfca 100644 --- a/terraform/context_apply_test.go +++ b/terraform/context_apply_test.go @@ -1135,7 +1135,11 @@ func TestContext2Apply_mapVariableOverride(t *testing.T) { "aws": testProviderFuncFixed(p), }, Variables: map[string]interface{}{ - "images.us-west-2": "overridden", + "images": []map[string]interface{}{ + map[string]interface{}{ + "us-west-2": "overridden", + }, + }, }, }) @@ -4269,8 +4273,18 @@ func TestContext2Apply_vars(t *testing.T) { "aws": testProviderFuncFixed(p), }, Variables: map[string]interface{}{ - "foo": "us-west-2", - "amis.us-east-1": "override", + "foo": "us-west-2", + "test_list": []interface{}{"Hello", "World"}, + "test_map": map[string]interface{}{ + "Hello": "World", + "Foo": "Bar", + "Baz": "Foo", + }, + "amis": []map[string]interface{}{ + map[string]interface{}{ + "us-east-1": "override", + }, + }, }, }) @@ -4300,8 +4314,13 @@ func TestContext2Apply_vars(t *testing.T) { func TestContext2Apply_varsEnv(t *testing.T) { // Set the env var - old := tempEnv(t, "TF_VAR_ami", "baz") - defer os.Setenv("TF_VAR_ami", old) + old_ami := tempEnv(t, "TF_VAR_ami", "baz") + old_list := tempEnv(t, "TF_VAR_list", `["Hello", "World"]`) + old_map := tempEnv(t, "TF_VAR_map", `{"Hello" = "World", "Foo" = "Bar", "Baz" = "Foo"}`) + + defer os.Setenv("TF_VAR_ami", old_ami) + defer os.Setenv("TF_VAR_list", old_list) + defer os.Setenv("TF_VAR_list", old_map) m := testModule(t, "apply-vars-env") p := testProvider("aws") diff --git a/terraform/context_input_test.go b/terraform/context_input_test.go index 9791b06fb..13e372469 100644 --- a/terraform/context_input_test.go +++ b/terraform/context_input_test.go @@ -19,8 +19,12 @@ func TestContext2Input(t *testing.T) { "aws": testProviderFuncFixed(p), }, Variables: map[string]interface{}{ - "foo": "us-west-2", - "amis.us-east-1": "override", + "foo": "us-west-2", + "amis": []map[string]interface{}{ + map[string]interface{}{ + "us-east-1": "override", + }, + }, }, UIInput: input, }) diff --git a/terraform/semantics.go b/terraform/semantics.go index 6d001226a..e8e52b7aa 100644 --- a/terraform/semantics.go +++ b/terraform/semantics.go @@ -95,16 +95,42 @@ func smcUserVariables(c *config.Config, vs map[string]interface{}) []error { } // Check that types match up - for k, _ := range vs { - v, ok := cvs[k] + for name, proposedValue := range vs { + schema, ok := cvs[name] if !ok { continue } - if v.Type() != config.VariableTypeString { - errs = append(errs, fmt.Errorf( - "%s: cannot assign string value to map type", - k)) + declaredType := schema.Type() + + switch declaredType { + case config.VariableTypeString: + switch proposedValue.(type) { + case string: + continue + default: + errs = append(errs, fmt.Errorf("variable %s should be type %s, got %s", + name, declaredType.Printable(), hclTypeName(proposedValue))) + } + case config.VariableTypeMap: + switch proposedValue.(type) { + case map[string]interface{}: + continue + default: + errs = append(errs, fmt.Errorf("variable %s should be type %s, got %s", + name, declaredType.Printable(), hclTypeName(proposedValue))) + } + case config.VariableTypeList: + switch proposedValue.(type) { + case []interface{}: + continue + default: + errs = append(errs, fmt.Errorf("variable %s should be type %s, got %s", + name, declaredType.Printable(), hclTypeName(proposedValue))) + } + default: + errs = append(errs, fmt.Errorf("variable %s should be type %s, got %s", + name, declaredType.Printable(), hclTypeName(proposedValue))) } } diff --git a/terraform/terraform_test.go b/terraform/terraform_test.go index 6be32ea03..fbcf6c61e 100644 --- a/terraform/terraform_test.go +++ b/terraform/terraform_test.go @@ -705,6 +705,8 @@ aws_instance.bar: aws_instance.foo: ID = foo bar = baz + list = Hello,World + map = Baz,Foo,Hello num = 2 type = aws_instance ` @@ -712,6 +714,8 @@ aws_instance.foo: const testTerraformApplyVarsEnvStr = ` aws_instance.bar: ID = foo + bar = Hello,World + baz = Baz,Foo,Hello foo = baz type = aws_instance ` diff --git a/terraform/test-fixtures/apply-vars-env/main.tf b/terraform/test-fixtures/apply-vars-env/main.tf index b686da065..245564323 100644 --- a/terraform/test-fixtures/apply-vars-env/main.tf +++ b/terraform/test-fixtures/apply-vars-env/main.tf @@ -1,7 +1,20 @@ variable "ami" { - default = "foo" + default = "foo" + type = "string" +} + +variable "list" { + default = [] + type = "list" +} + +variable "map" { + default = {} + type = "map" } resource "aws_instance" "bar" { - foo = "${var.ami}" + foo = "${var.ami}" + bar = "${join(",", var.list)}" + baz = "${join(",", keys(var.map))}" } diff --git a/terraform/test-fixtures/apply-vars/main.tf b/terraform/test-fixtures/apply-vars/main.tf index 7cd4b5316..7c426b227 100644 --- a/terraform/test-fixtures/apply-vars/main.tf +++ b/terraform/test-fixtures/apply-vars/main.tf @@ -5,6 +5,14 @@ variable "amis" { } } +variable "test_list" { + type = "list" +} + +variable "test_map" { + type = "map" +} + variable "bar" { default = "baz" } @@ -14,6 +22,8 @@ variable "foo" {} resource "aws_instance" "foo" { num = "2" bar = "${var.bar}" + list = "${join(",", var.test_list)}" + map = "${join(",", keys(var.test_map))}" } resource "aws_instance" "bar" { From 4e1d54ad511525fe04df9e6b352450237a2539f8 Mon Sep 17 00:00:00 2001 From: stack72 Date: Tue, 26 Jul 2016 21:59:21 +0100 Subject: [PATCH 0429/1238] provider/aws: Change the resource name expected as part of sqs queue import test --- builtin/providers/aws/import_aws_sqs_queue_test.go | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/builtin/providers/aws/import_aws_sqs_queue_test.go b/builtin/providers/aws/import_aws_sqs_queue_test.go index 45787eb6b..d3e9028d5 100644 --- a/builtin/providers/aws/import_aws_sqs_queue_test.go +++ b/builtin/providers/aws/import_aws_sqs_queue_test.go @@ -10,7 +10,7 @@ import ( ) func TestAccAWSSQSQueue_importBasic(t *testing.T) { - resourceName := "aws_sqs_queue.queue-with-defaults" + resourceName := "aws_sqs_queue.queue" queueName := fmt.Sprintf("sqs-queue-%s", acctest.RandString(5)) resource.Test(t, resource.TestCase{ @@ -26,9 +26,6 @@ func TestAccAWSSQSQueue_importBasic(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - //The name is never returned after the initial create of the queue. - //It is part of the URL and can be split down if needed - //ImportStateVerifyIgnore: []string{"name"}, }, }, }) From 5cacd788b5fbece2e18959922507fa288b4a71af Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Tue, 26 Jul 2016 22:01:01 +0100 Subject: [PATCH 0430/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 826b6ccf1..892ce5b8b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -239,6 +239,7 @@ BUG FIXES: * provider/aws: Ignore IOPS on non io1 AWS root_block_device [GH-7783] * provider/aws: Ignore missing ENI attachment when trying to detach ENI [GH-7185] * provider/aws: Fix issue updating ElasticBeanstalk Environment templates [GH-7811] + * provider/aws: Restore Defaults to SQS Queues [GH-7818] * provider/azurerm: Fixes terraform crash when using SSH keys with `azurerm_virtual_machine` [GH-6766] * provider/azurerm: Fix a bug causing 'diffs do not match' on `azurerm_network_interface` resources [GH-6790] * provider/azurerm: Normalizes `availability_set_id` casing to avoid spurious diffs in `azurerm_virtual_machine` [GH-6768] From 4b9b42dbf08953119186ba537efe3f4c7d36b698 Mon Sep 17 00:00:00 2001 From: Dan Allegood Date: Tue, 26 Jul 2016 16:25:56 -0700 Subject: [PATCH 0431/1238] Fixes the hasBootableVmdk flag when attaching multiple disks (#7804) The hasBootableFlag logic had a bug where it would only be set properly if the bootable disk was the last specified. Adding some bool logic resolves the issue. Also adding check to ensure only one bootable disk is given, and cleaning up a redundant var. --- .../vsphere/resource_vsphere_virtual_machine.go | 11 ++++++----- .../vsphere/resource_vsphere_virtual_machine_test.go | 11 ++++++++--- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go index a946517d7..444e10877 100644 --- a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go +++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go @@ -789,7 +789,6 @@ func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{ if diskSet, ok := vL.(*schema.Set); ok { disks := []hardDisk{} - hasBootableDisk := false for _, value := range diskSet.List() { disk := value.(map[string]interface{}) newDisk := hardDisk{} @@ -799,10 +798,10 @@ func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{ return fmt.Errorf("Cannot specify name of a template") } vm.template = v - if hasBootableDisk { + if vm.hasBootableVmdk { return fmt.Errorf("[ERROR] Only one bootable disk or template may be given") } - hasBootableDisk = true + vm.hasBootableVmdk = true } if v, ok := disk["type"].(string); ok && v != "" { @@ -846,9 +845,11 @@ func resourceVSphereVirtualMachineCreate(d *schema.ResourceData, meta interface{ return fmt.Errorf("Cannot specify name of a vmdk") } if vBootable, ok := disk["bootable"].(bool); ok { - hasBootableDisk = true + if vBootable && vm.hasBootableVmdk { + return fmt.Errorf("[ERROR] Only one bootable disk or template may be given") + } newDisk.bootable = vBootable - vm.hasBootableVmdk = vBootable + vm.hasBootableVmdk = vm.hasBootableVmdk || vBootable } newDisk.vmdkPath = vVmdk } diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go index 3c063d006..56f2db226 100644 --- a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go +++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go @@ -606,7 +606,12 @@ resource "vsphere_virtual_machine" "with_existing_vmdk" { disk { %s vmdk = "%s" - bootable = true + bootable = true + } + disk { + size = 1 + iops = 500 + name = "one" } } ` @@ -635,7 +640,7 @@ func TestAccVSphereVirtualMachine_createWithExistingVmdk(t *testing.T) { Config: config, Check: resource.ComposeTestCheckFunc( TestFuncData{vm: vm, label: data.label, vmName: "vsphere_virtual_machine.with_existing_vmdk", - vmResource: "terraform-test-with-existing-vmdk"}.testCheckFuncBasic(), + vmResource: "terraform-test-with-existing-vmdk", numDisks: "2"}.testCheckFuncBasic(), //resource.TestCheckResourceAttr( // "vsphere_virtual_machine.with_existing_vmdk", "disk.2393891804.vmdk", vmdk_path), //resource.TestCheckResourceAttr( @@ -766,7 +771,7 @@ resource "vsphere_virtual_machine" "ipv4ipv6" { disk { size = 1 iops = 500 - name = "one" + name = "one" } } ` From 6263adaa136faaa03bceb44f881adc8d49875816 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 27 Jul 2016 00:26:47 +0100 Subject: [PATCH 0432/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 892ce5b8b..a42b13b6c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -278,6 +278,7 @@ BUG FIXES: * provider/vsphere: Make `vsphere_virtual_machine` `product_key` optional [GH-7410] * provider/vsphere: Refreshing devices list after adding a disk or cdrom controller [GH-7167] * provider/vsphere: `vsphere_virtual_machine` no longer has to be powered on to delete [GH-7206] + * provider/vSphere: Fixes the hasBootableVmdk flag when attaching multiple disks [GH-7804] * provisioner/remote-exec: Properly seed random script paths so they are not deterministic across runs [GH-7413] ## 0.6.16 (May 9, 2016) From bef3b76c7ac7bc2afebc838a9489a5fc1aa0e0f6 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 27 Jul 2016 01:06:23 +0100 Subject: [PATCH 0433/1238] docs/intro: Change the location of Atlas User Tokens in Intro docs (#7822) Fixes #5861 --- website/source/intro/getting-started/remote.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/intro/getting-started/remote.html.markdown b/website/source/intro/getting-started/remote.html.markdown index 9ad3e9279..95ab71fb1 100644 --- a/website/source/intro/getting-started/remote.html.markdown +++ b/website/source/intro/getting-started/remote.html.markdown @@ -31,7 +31,7 @@ or you can follow the outlined steps below. First, If you don't have an Atlas account, you can [create an account here](https://atlas.hashicorp.com/account/new?utm_source=oss&utm_medium=getting-started&utm_campaign=terraform). -In order for the Terraform CLI to gain access to your Atlas account you're going to need to generate an access key. From the main menu, select your username in the top right corner to access your profile. Under `Personal`, click on the `Tokens` tab and hit generate. +In order for the Terraform CLI to gain access to your Atlas account you're going to need to generate an access key. From the main menu, select your username in the left side navigation menu to access your profile. Under `Personal`, click on the `Tokens` tab and hit generate. For the purposes of this tutorial you can use this token by exporting it to your local shell session: From de8726769731be455092567547f3c8aca7c05a54 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Mon, 25 Jul 2016 15:56:56 -0400 Subject: [PATCH 0434/1238] Add tf_vars to the variables sent in push Add tf_vars to the data structures sent in terraform push. This takes any value of type []interface{} or map[string]interface{} and marshals it as a string representation of the equivalent HCL. This prevents ambiguity in atlas between a string that looks like a json structure, and an actual json structure. For the time being we will need a way to serialize data as HCL, so the command package has an internal encodeHCL function to do so. We can remove this if we get complete package for marshaling HCL. --- command/hcl_printer.go | 114 ++++++++++++++++++++++ command/push.go | 68 ++++++++++++- command/push_test.go | 53 +++++++--- command/test-fixtures/push-tfvars/main.tf | 13 +++ 4 files changed, 233 insertions(+), 15 deletions(-) create mode 100644 command/hcl_printer.go diff --git a/command/hcl_printer.go b/command/hcl_printer.go new file mode 100644 index 000000000..dc4eb07d6 --- /dev/null +++ b/command/hcl_printer.go @@ -0,0 +1,114 @@ +package command + +// Marshal an object as an hcl value. +import ( + "bytes" + "fmt" + "regexp" + + "github.com/hashicorp/hcl/hcl/printer" +) + +// This will only work operate on []interface{}, map[string]interface{}, and +// primitive types. +func encodeHCL(i interface{}) ([]byte, error) { + + state := &encodeState{} + err := state.encode(i) + if err != nil { + return nil, err + } + + hcl := state.Bytes() + if len(hcl) == 0 { + return hcl, nil + } + + // the HCL parser requires an assignment. Strip it off again later + fakeAssignment := append([]byte("X = "), hcl...) + + // use the real hcl parser to verify our output, and format it canonically + hcl, err = printer.Format(fakeAssignment) + + // now strip that first assignment off + eq := regexp.MustCompile(`=\s+`).FindIndex(hcl) + return hcl[eq[1]:], err +} + +type encodeState struct { + bytes.Buffer +} + +func (e *encodeState) encode(i interface{}) error { + switch v := i.(type) { + case []interface{}: + return e.encodeList(v) + + case map[string]interface{}: + return e.encodeMap(v) + + case int, int8, int32, int64, uint8, uint32, uint64: + return e.encodeInt(i) + + case float32, float64: + return e.encodeFloat(i) + + case string: + return e.encodeString(v) + + case nil: + return nil + + default: + return fmt.Errorf("invalid type %T", i) + } + +} + +func (e *encodeState) encodeList(l []interface{}) error { + e.WriteString("[") + for i, v := range l { + err := e.encode(v) + if err != nil { + return err + } + if i < len(l)-1 { + e.WriteString(", ") + } + } + e.WriteString("]") + return nil +} + +func (e *encodeState) encodeMap(m map[string]interface{}) error { + e.WriteString("{\n") + for i, k := range sortedKeys(m) { + v := m[k] + + e.WriteString(k + " = ") + err := e.encode(v) + if err != nil { + return err + } + if i < len(m)-1 { + e.WriteString("\n") + } + } + e.WriteString("}") + return nil +} + +func (e *encodeState) encodeString(s string) error { + _, err := fmt.Fprintf(e, "%q", s) + return err +} + +func (e *encodeState) encodeInt(i interface{}) error { + _, err := fmt.Fprintf(e, "%d", i) + return err +} + +func (e *encodeState) encodeFloat(f interface{}) error { + _, err := fmt.Fprintf(e, "%f", f) + return err +} diff --git a/command/push.go b/command/push.go index 1abe13e0b..557887b7c 100644 --- a/command/push.go +++ b/command/push.go @@ -88,6 +88,7 @@ func (c *PushCommand) Run(args []string) int { Path: configPath, StatePath: c.Meta.statePath, }) + if err != nil { c.Ui.Error(err.Error()) return 1 @@ -209,12 +210,23 @@ func (c *PushCommand) Run(args []string) int { c.Ui.Output("") } + variables := ctx.Variables() + serializedVars, err := tfVars(variables) + if err != nil { + c.Ui.Error(fmt.Sprintf( + "An error has occurred while serializing the variables for uploading:\n"+ + "%s", err)) + return 1 + } + // Upsert! opts := &pushUpsertOptions{ Name: name, Archive: archiveR, Variables: ctx.Variables(), + TFVars: serializedVars, } + c.Ui.Output("Uploading Terraform configuration...") vsn, err := c.client.Upsert(opts) if err != nil { @@ -272,6 +284,58 @@ Options: return strings.TrimSpace(helpText) } +func sortedKeys(m map[string]interface{}) []string { + var keys []string + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// build the set of TFVars for push +func tfVars(vars map[string]interface{}) ([]atlas.TFVar, error) { + var tfVars []atlas.TFVar + var err error + +RANGE: + for _, k := range sortedKeys(vars) { + v := vars[k] + + var hcl []byte + tfv := atlas.TFVar{Key: k} + + switch v := v.(type) { + case string: + tfv.Value = v + + case []interface{}: + hcl, err = encodeHCL(v) + if err != nil { + break RANGE + } + + tfv.Value = string(hcl) + tfv.IsHCL = true + + case map[string]interface{}: + hcl, err = encodeHCL(v) + if err != nil { + break RANGE + } + + tfv.Value = string(hcl) + tfv.IsHCL = true + default: + err = fmt.Errorf("unknown type %T for variable %s", v, k) + } + + tfVars = append(tfVars, tfv) + } + + return tfVars, err +} + func (c *PushCommand) Synopsis() string { return "Upload this Terraform module to Atlas to run" } @@ -287,6 +351,7 @@ type pushUpsertOptions struct { Name string Archive *archive.Archive Variables map[string]interface{} + TFVars []atlas.TFVar } type atlasPushClient struct { @@ -306,6 +371,7 @@ func (c *atlasPushClient) Get(name string) (map[string]interface{}, error) { var variables map[string]interface{} if version != nil { + // TODO: merge variables and TFVars //variables = version.Variables } @@ -319,7 +385,7 @@ func (c *atlasPushClient) Upsert(opts *pushUpsertOptions) (int, error) { } data := &atlas.TerraformConfigVersion{ - //Variables: opts.Variables, + TFVars: opts.TFVars, } version, err := c.Client.CreateTerraformConfigVersion( diff --git a/command/push_test.go b/command/push_test.go index 04dfd7fe5..53db5cbd7 100644 --- a/command/push_test.go +++ b/command/push_test.go @@ -10,6 +10,7 @@ import ( "sort" "testing" + atlas "github.com/hashicorp/atlas-go/v1" "github.com/hashicorp/terraform/terraform" "github.com/mitchellh/cli" ) @@ -247,10 +248,8 @@ func TestPush_localOverride(t *testing.T) { t.Fatalf("bad: %#v", client.UpsertOptions) } - variables := map[string]interface{}{ - "foo": "bar", - "bar": "foo", - } + variables := pushTFVars() + if !reflect.DeepEqual(client.UpsertOptions.Variables, variables) { t.Fatalf("bad: %#v", client.UpsertOptions) } @@ -323,12 +322,11 @@ func TestPush_preferAtlas(t *testing.T) { t.Fatalf("bad: %#v", client.UpsertOptions) } - variables := map[string]interface{}{ - "foo": "old", - "bar": "foo", - } + variables := pushTFVars() + variables["foo"] = "old" + if !reflect.DeepEqual(client.UpsertOptions.Variables, variables) { - t.Fatalf("bad: %#v", client.UpsertOptions) + t.Fatalf("bad: %#v", client.UpsertOptions.Variables) } } @@ -394,12 +392,26 @@ func TestPush_tfvars(t *testing.T) { t.Fatalf("bad: %#v", client.UpsertOptions) } - variables := map[string]interface{}{ - "foo": "bar", - "bar": "foo", + variables := pushTFVars() + + // make sure these dind't go missing for some reason + for k, v := range variables { + if !reflect.DeepEqual(client.UpsertOptions.Variables[k], v) { + t.Fatalf("bad: %#v", client.UpsertOptions.Variables) + } } - if !reflect.DeepEqual(client.UpsertOptions.Variables, variables) { - t.Fatalf("bad: %#v", client.UpsertOptions) + + //now check TFVVars + + tfvars := []atlas.TFVar{ + {"bar", "foo", false}, + {"baz", "{\n A = \"a\"\n B = \"b\"\n}\n", true}, + {"fob", "[\"a\", \"b\", \"c\"]\n", true}, + {"foo", "bar", false}, + } + + if !reflect.DeepEqual(client.UpsertOptions.TFVars, tfvars) { + t.Fatalf("bad tf_vars: %#v", client.UpsertOptions.TFVars) } } @@ -563,3 +575,16 @@ func testArchiveStr(t *testing.T, path string) []string { sort.Strings(result) return result } + +// the structure returned from the push-tfvars test fixture +func pushTFVars() map[string]interface{} { + return map[string]interface{}{ + "foo": "bar", + "bar": "foo", + "baz": map[string]interface{}{ + "A": "a", + "B": "b", + }, + "fob": []interface{}{"a", "b", "c"}, + } +} diff --git a/command/test-fixtures/push-tfvars/main.tf b/command/test-fixtures/push-tfvars/main.tf index 8285c1ada..eef57acb5 100644 --- a/command/test-fixtures/push-tfvars/main.tf +++ b/command/test-fixtures/push-tfvars/main.tf @@ -1,6 +1,19 @@ variable "foo" {} variable "bar" {} +variable "baz" { + type = "map" + default = { + "A" = "a" + "B" = "b" + } +} + +variable "fob" { + type = "list" + default = ["a", "b", "c"] +} + resource "test_instance" "foo" {} atlas { From 648fff9ba1ab6a8af43424157fafdd890c170ed8 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Tue, 26 Jul 2016 12:43:05 -0400 Subject: [PATCH 0435/1238] Update the atlas-go client adds the new TFVars field --- vendor/github.com/hashicorp/atlas-go/v1/terraform.go | 11 ++++++++++- vendor/vendor.json | 6 +++--- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/vendor/github.com/hashicorp/atlas-go/v1/terraform.go b/vendor/github.com/hashicorp/atlas-go/v1/terraform.go index adeba2a11..debd1d319 100644 --- a/vendor/github.com/hashicorp/atlas-go/v1/terraform.go +++ b/vendor/github.com/hashicorp/atlas-go/v1/terraform.go @@ -14,7 +14,16 @@ type TerraformConfigVersion struct { Version int Remotes []string `json:"remotes"` Metadata map[string]string `json:"metadata"` - Variables map[string]string `json:"variables"` + Variables map[string]string `json:"variables,omitempty"` + TFVars []TFVar `json:"tf_vars"` +} + +// TFVar is used to serialize a single Terraform variable sent by the +// manager as a collection of Variables in a Job payload. +type TFVar struct { + Key string `json:"key"` + Value string `json:"value"` + IsHCL bool `json:"hcl"` } // TerraformConfigLatest returns the latest Terraform configuration version. diff --git a/vendor/vendor.json b/vendor/vendor.json index db04e37e7..bacde77c5 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -1060,11 +1060,11 @@ "revision": "95fa852edca41c06c4ce526af4bb7dec4eaad434" }, { - "checksumSHA1": "EWGfo74RcoKaYFZNSkvzYRJMgrY=", + "checksumSHA1": "yylO3hSRKd0T4mveT9ho2OSARwU=", "comment": "20141209094003-92-g95fa852", "path": "github.com/hashicorp/atlas-go/v1", - "revision": "c8b26aa95f096efc0f378b2d2830ca909631d584", - "revisionTime": "2016-07-22T13:58:36Z" + "revision": "9be9a611a15ba2f857a99b332fd966896867299a", + "revisionTime": "2016-07-26T16:33:11Z" }, { "comment": "v0.6.3-28-g3215b87", From 8038e60a20865e0a72dbb9107ebf3676a5d4b9d9 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Tue, 26 Jul 2016 20:37:41 -0400 Subject: [PATCH 0436/1238] Add a function to quote HCL strings The strings we have in the variables may contain escaped double-quotes, which have already been parsed and had the `\`s removed. We need to re-escape these, but only if we are in the outer string and not inside an interpolation. --- command/hcl_printer.go | 89 +++++++++++++++++++++-- command/push_test.go | 29 +++++--- command/test-fixtures/push-tfvars/main.tf | 19 +++-- 3 files changed, 113 insertions(+), 24 deletions(-) diff --git a/command/hcl_printer.go b/command/hcl_printer.go index dc4eb07d6..3c547c505 100644 --- a/command/hcl_printer.go +++ b/command/hcl_printer.go @@ -29,10 +29,14 @@ func encodeHCL(i interface{}) ([]byte, error) { // use the real hcl parser to verify our output, and format it canonically hcl, err = printer.Format(fakeAssignment) + if err != nil { + return nil, err + } // now strip that first assignment off eq := regexp.MustCompile(`=\s+`).FindIndex(hcl) - return hcl[eq[1]:], err + + return hcl[eq[1]:], nil } type encodeState struct { @@ -98,11 +102,6 @@ func (e *encodeState) encodeMap(m map[string]interface{}) error { return nil } -func (e *encodeState) encodeString(s string) error { - _, err := fmt.Fprintf(e, "%q", s) - return err -} - func (e *encodeState) encodeInt(i interface{}) error { _, err := fmt.Fprintf(e, "%d", i) return err @@ -112,3 +111,81 @@ func (e *encodeState) encodeFloat(f interface{}) error { _, err := fmt.Fprintf(e, "%f", f) return err } + +func (e *encodeState) encodeString(s string) error { + e.Write(quoteHCLString(s)) + return nil +} + +// Quote an HCL string, which may contain interpolations. +// Since the string was already parsed from HCL, we have to assume the +// required characters are sanely escaped. All we need to do is escape double +// quotes in the string, unless they are in an interpolation block. +func quoteHCLString(s string) []byte { + out := make([]byte, 0, len(s)) + out = append(out, '"') + + // our parse states + var ( + outer = 1 // the starting state for the string + dollar = 2 // look for '{' in the next character + interp = 3 // inside an interpolation block + escape = 4 // take the next character and pop back to prev state + ) + + // we could have nested interpolations + state := stack{} + state.push(outer) + + for i := 0; i < len(s); i++ { + switch state.peek() { + case outer: + switch s[i] { + case '"': + out = append(out, '\\') + case '$': + state.push(dollar) + case '\\': + state.push(escape) + } + case dollar: + state.pop() + switch s[i] { + case '{': + state.push(interp) + case '\\': + state.push(escape) + } + case interp: + switch s[i] { + case '}': + state.pop() + } + case escape: + state.pop() + } + + out = append(out, s[i]) + } + + out = append(out, '"') + + return out +} + +type stack []int + +func (s *stack) push(i int) { + *s = append(*s, i) +} + +func (s *stack) pop() int { + last := len(*s) - 1 + i := (*s)[last] + *s = (*s)[:last] + return i +} + +func (s *stack) peek() int { + return (*s)[len(*s)-1] +} diff --git a/command/push_test.go b/command/push_test.go index 53db5cbd7..3db6f7739 100644 --- a/command/push_test.go +++ b/command/push_test.go @@ -397,21 +397,29 @@ func TestPush_tfvars(t *testing.T) { // make sure these dind't go missing for some reason for k, v := range variables { if !reflect.DeepEqual(client.UpsertOptions.Variables[k], v) { - t.Fatalf("bad: %#v", client.UpsertOptions.Variables) + t.Fatalf("bad: %#v", client.UpsertOptions.Variables[k]) } } - //now check TFVVars - + //now check TFVars tfvars := []atlas.TFVar{ {"bar", "foo", false}, - {"baz", "{\n A = \"a\"\n B = \"b\"\n}\n", true}, - {"fob", "[\"a\", \"b\", \"c\"]\n", true}, + {"baz", `{ + A = "a" + B = "b" + interp = "${file("t.txt")}" +} +`, true}, + {"fob", `["a", "b", "c", "quotes \"in\" quotes"]` + "\n", true}, {"foo", "bar", false}, } - if !reflect.DeepEqual(client.UpsertOptions.TFVars, tfvars) { - t.Fatalf("bad tf_vars: %#v", client.UpsertOptions.TFVars) + for i, expected := range tfvars { + got := client.UpsertOptions.TFVars[i] + if got != expected { + t.Logf("%2d expected: %#v", i, expected) + t.Logf(" got: %#v", got) + } } } @@ -582,9 +590,10 @@ func pushTFVars() map[string]interface{} { "foo": "bar", "bar": "foo", "baz": map[string]interface{}{ - "A": "a", - "B": "b", + "A": "a", + "B": "b", + "interp": `${file("t.txt")}`, }, - "fob": []interface{}{"a", "b", "c"}, + "fob": []interface{}{"a", "b", "c", `quotes "in" quotes`}, } } diff --git a/command/test-fixtures/push-tfvars/main.tf b/command/test-fixtures/push-tfvars/main.tf index eef57acb5..c98389f28 100644 --- a/command/test-fixtures/push-tfvars/main.tf +++ b/command/test-fixtures/push-tfvars/main.tf @@ -1,21 +1,24 @@ variable "foo" {} + variable "bar" {} variable "baz" { - type = "map" - default = { - "A" = "a" - "B" = "b" - } + type = "map" + + default = { + "A" = "a" + "B" = "b" + interp = "${file("t.txt")}" + } } variable "fob" { - type = "list" - default = ["a", "b", "c"] + type = "list" + default = ["a", "b", "c", "quotes \"in\" quotes"] } resource "test_instance" "foo" {} atlas { - name = "foo" + name = "foo" } From 393863a5a907ebe8e844c8596840f24b51e0fc1e Mon Sep 17 00:00:00 2001 From: Sander van Harmelen Date: Wed, 27 Jul 2016 17:30:18 +0200 Subject: [PATCH 0437/1238] Add project parameter to additional resources (#7828) --- .../resource_cloudstack_loadbalancer_rule.go | 14 +++++++++++++- .../resource_cloudstack_static_nat.go | 19 +++++++++++++++++-- .../r/loadbalancer_rule.html.markdown | 3 +++ .../cloudstack/r/static_nat.html.markdown | 3 +++ 4 files changed, 36 insertions(+), 3 deletions(-) diff --git a/builtin/providers/cloudstack/resource_cloudstack_loadbalancer_rule.go b/builtin/providers/cloudstack/resource_cloudstack_loadbalancer_rule.go index c7e82fb92..2c3c25334 100644 --- a/builtin/providers/cloudstack/resource_cloudstack_loadbalancer_rule.go +++ b/builtin/providers/cloudstack/resource_cloudstack_loadbalancer_rule.go @@ -63,6 +63,13 @@ func resourceCloudStackLoadBalancerRule() *schema.Resource { ForceNew: true, Elem: &schema.Schema{Type: schema.TypeString}, }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, }, } } @@ -138,7 +145,10 @@ func resourceCloudStackLoadBalancerRuleRead(d *schema.ResourceData, meta interfa cs := meta.(*cloudstack.CloudStackClient) // Get the load balancer details - lb, count, err := cs.LoadBalancer.GetLoadBalancerRuleByID(d.Id()) + lb, count, err := cs.LoadBalancer.GetLoadBalancerRuleByID( + d.Id(), + cloudstack.WithProject(d.Get("project").(string)), + ) if err != nil { if count == 0 { log.Printf("[DEBUG] Load balancer rule %s does no longer exist", d.Get("name").(string)) @@ -159,6 +169,8 @@ func resourceCloudStackLoadBalancerRuleRead(d *schema.ResourceData, meta interfa d.Set("network_id", lb.Networkid) } + setValueOrID(d, "project", lb.Project, lb.Projectid) + return nil } diff --git a/builtin/providers/cloudstack/resource_cloudstack_static_nat.go b/builtin/providers/cloudstack/resource_cloudstack_static_nat.go index b96991eef..ca8d4691f 100644 --- a/builtin/providers/cloudstack/resource_cloudstack_static_nat.go +++ b/builtin/providers/cloudstack/resource_cloudstack_static_nat.go @@ -42,6 +42,13 @@ func resourceCloudStackStaticNAT() *schema.Resource { Computed: true, ForceNew: true, }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, }, } } @@ -77,7 +84,10 @@ func resourceCloudStackStaticNATExists(d *schema.ResourceData, meta interface{}) cs := meta.(*cloudstack.CloudStackClient) // Get the IP address details - ip, count, err := cs.Address.GetPublicIpAddressByID(d.Id()) + ip, count, err := cs.Address.GetPublicIpAddressByID( + d.Id(), + cloudstack.WithProject(d.Get("project").(string)), + ) if err != nil { if count == 0 { log.Printf("[DEBUG] IP address with ID %s no longer exists", d.Id()) @@ -94,7 +104,10 @@ func resourceCloudStackStaticNATRead(d *schema.ResourceData, meta interface{}) e cs := meta.(*cloudstack.CloudStackClient) // Get the IP address details - ip, count, err := cs.Address.GetPublicIpAddressByID(d.Id()) + ip, count, err := cs.Address.GetPublicIpAddressByID( + d.Id(), + cloudstack.WithProject(d.Get("project").(string)), + ) if err != nil { if count == 0 { log.Printf("[DEBUG] IP address with ID %s no longer exists", d.Id()) @@ -115,6 +128,8 @@ func resourceCloudStackStaticNATRead(d *schema.ResourceData, meta interface{}) e d.Set("virtual_machine_id", ip.Virtualmachineid) d.Set("vm_guest_ip", ip.Vmipaddress) + setValueOrID(d, "project", ip.Project, ip.Projectid) + return nil } diff --git a/website/source/docs/providers/cloudstack/r/loadbalancer_rule.html.markdown b/website/source/docs/providers/cloudstack/r/loadbalancer_rule.html.markdown index 2b87f5780..711c67717 100644 --- a/website/source/docs/providers/cloudstack/r/loadbalancer_rule.html.markdown +++ b/website/source/docs/providers/cloudstack/r/loadbalancer_rule.html.markdown @@ -55,6 +55,9 @@ The following arguments are supported: * `member_ids` - (Required) List of instance IDs to assign to the load balancer rule. Changing this forces a new resource to be created. +* `project` - (Optional) The name or ID of the project to deploy this + instance to. Changing this forces a new resource to be created. + ## Attributes Reference The following attributes are exported: diff --git a/website/source/docs/providers/cloudstack/r/static_nat.html.markdown b/website/source/docs/providers/cloudstack/r/static_nat.html.markdown index 2f7caf1ab..a7b2a342b 100644 --- a/website/source/docs/providers/cloudstack/r/static_nat.html.markdown +++ b/website/source/docs/providers/cloudstack/r/static_nat.html.markdown @@ -38,6 +38,9 @@ The following arguments are supported: forwarding rule (useful when the virtual machine has a secondairy NIC). Changing this forces a new resource to be created. +* `project` - (Optional) The name or ID of the project to deploy this + instance to. Changing this forces a new resource to be created. + ## Attributes Reference The following attributes are exported: From 142f689f96febd5f173e93fbae66cf3a66eb0fca Mon Sep 17 00:00:00 2001 From: James Bardin Date: Wed, 27 Jul 2016 11:42:21 -0400 Subject: [PATCH 0438/1238] Remove unused variables These variables weren't used, but the compiler misses them since they are captured in a closure. --- builtin/providers/aws/resource_aws_appautoscaling_target.go | 3 +-- builtin/providers/aws/resource_aws_opsworks_permission.go | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/builtin/providers/aws/resource_aws_appautoscaling_target.go b/builtin/providers/aws/resource_aws_appautoscaling_target.go index 705fee465..0f15b9421 100644 --- a/builtin/providers/aws/resource_aws_appautoscaling_target.go +++ b/builtin/providers/aws/resource_aws_appautoscaling_target.go @@ -88,10 +88,9 @@ func resourceAwsAppautoscalingTargetCreate(d *schema.ResourceData, meta interfac targetOpts.ServiceNamespace = aws.String(d.Get("service_namespace").(string)) log.Printf("[DEBUG] Application autoscaling target create configuration %#v", targetOpts) - var out *applicationautoscaling.RegisterScalableTargetOutput var err error err = resource.Retry(1*time.Minute, func() *resource.RetryError { - out, err = conn.RegisterScalableTarget(&targetOpts) + _, err = conn.RegisterScalableTarget(&targetOpts) if err != nil { if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ValidationException" { diff --git a/builtin/providers/aws/resource_aws_opsworks_permission.go b/builtin/providers/aws/resource_aws_opsworks_permission.go index 7493a4c20..5b7833eb7 100644 --- a/builtin/providers/aws/resource_aws_opsworks_permission.go +++ b/builtin/providers/aws/resource_aws_opsworks_permission.go @@ -131,10 +131,9 @@ func resourceAwsOpsworksPermissionCreate(d *schema.ResourceData, meta interface{ StackId: aws.String(d.Get("stack_id").(string)), } - var resp *opsworks.SetPermissionOutput err := resource.Retry(2*time.Minute, func() *resource.RetryError { var cerr error - resp, cerr = client.SetPermission(req) + _, cerr = client.SetPermission(req) if cerr != nil { log.Printf("[INFO] client error") if opserr, ok := cerr.(awserr.Error); ok { From 8ed549c3b5279a871b1935397e539ceef7092596 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 27 Jul 2016 16:47:25 +0100 Subject: [PATCH 0439/1238] provider/aws: Don't delete Lambda function from state on initial call of (#7829) the Read func Fixes #7782 Lambda functions are eventually consistent :( Therefore, when we move from the Create func to the Read func, there is a chance that the Lambda hasn't replicated yet and we could therefore find that it doesn't exist and delete it as follows: ``` params := &lambda.GetFunctionInput{ FunctionName: aws.String(d.Get("function_name").(string)), } getFunctionOutput, err := conn.GetFunction(params) if err != nil { if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ResourceNotFoundException" { d.SetId("") return nil } return err } ``` This PR uses `d.IsNewResource()` to check if the Read is being called after a Create and therefore, won't delete the lambda if not found. This should allow the lambda to replicate ``` % make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSLambdaFunction_' => Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSLambdaFunction_ -timeout 120m === RUN TestAccAWSLambdaFunction_importLocalFile --- PASS: TestAccAWSLambdaFunction_importLocalFile (36.64s) === RUN TestAccAWSLambdaFunction_importLocalFile_VPC --- PASS: TestAccAWSLambdaFunction_importLocalFile_VPC (45.17s) === RUN TestAccAWSLambdaFunction_importS3 --- PASS: TestAccAWSLambdaFunction_importS3 (40.88s) === RUN TestAccAWSLambdaFunction_basic --- PASS: TestAccAWSLambdaFunction_basic (44.77s) === RUN TestAccAWSLambdaFunction_VPC --- PASS: TestAccAWSLambdaFunction_VPC (44.13s) === RUN TestAccAWSLambdaFunction_s3 --- PASS: TestAccAWSLambdaFunction_s3 (43.62s) === RUN TestAccAWSLambdaFunction_localUpdate --- PASS: TestAccAWSLambdaFunction_localUpdate (33.49s) === RUN TestAccAWSLambdaFunction_localUpdate_nameOnly --- PASS: TestAccAWSLambdaFunction_localUpdate_nameOnly (51.83s) === RUN TestAccAWSLambdaFunction_s3Update --- PASS: TestAccAWSLambdaFunction_s3Update (106.49s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 447.055s ``` Thanks to @radeksimko for pointing out `d.IsNewResource()` --- builtin/providers/aws/resource_aws_lambda_function.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_lambda_function.go b/builtin/providers/aws/resource_aws_lambda_function.go index dee086c6a..486f45d2e 100644 --- a/builtin/providers/aws/resource_aws_lambda_function.go +++ b/builtin/providers/aws/resource_aws_lambda_function.go @@ -237,7 +237,7 @@ func resourceAwsLambdaFunctionRead(d *schema.ResourceData, meta interface{}) err getFunctionOutput, err := conn.GetFunction(params) if err != nil { - if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ResourceNotFoundException" { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ResourceNotFoundException" && !d.IsNewResource() { d.SetId("") return nil } From a09c7f329b722cddfc062c15c0f295564f7792fb Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 27 Jul 2016 16:48:18 +0100 Subject: [PATCH 0440/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a42b13b6c..888c6ff57 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -240,6 +240,7 @@ BUG FIXES: * provider/aws: Ignore missing ENI attachment when trying to detach ENI [GH-7185] * provider/aws: Fix issue updating ElasticBeanstalk Environment templates [GH-7811] * provider/aws: Restore Defaults to SQS Queues [GH-7818] + * provider/aws: Don't delete Lambda function from state on initial call of the Read func [GH-7829] * provider/azurerm: Fixes terraform crash when using SSH keys with `azurerm_virtual_machine` [GH-6766] * provider/azurerm: Fix a bug causing 'diffs do not match' on `azurerm_network_interface` resources [GH-6790] * provider/azurerm: Normalizes `availability_set_id` casing to avoid spurious diffs in `azurerm_virtual_machine` [GH-6768] From f0021e1f03edeb61140738d68ecd415756d89317 Mon Sep 17 00:00:00 2001 From: clint shryock Date: Wed, 27 Jul 2016 11:07:37 -0500 Subject: [PATCH 0441/1238] provider/aws: Seperate out TestAccAWSDBInstance_iops_update into it's own run This will seperate the TestAccAWSDBInstance_iops_update test into it's own namespace for testing by itself in Travis --- builtin/providers/aws/resource_aws_db_instance_test.go | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/builtin/providers/aws/resource_aws_db_instance_test.go b/builtin/providers/aws/resource_aws_db_instance_test.go index 36bf6e508..e02d8bb1d 100644 --- a/builtin/providers/aws/resource_aws_db_instance_test.go +++ b/builtin/providers/aws/resource_aws_db_instance_test.go @@ -179,7 +179,7 @@ func TestAccAWSDBInstance_enhancedMonitoring(t *testing.T) { // Regression test for https://github.com/hashicorp/terraform/issues/3760 . // We apply a plan, then change just the iops. If the apply succeeds, we // consider this a pass, as before in 3760 the request would fail -func TestAccAWSDBInstance_iops_update(t *testing.T) { +func TestAccAWS_seperate_DBInstance_iops_update(t *testing.T) { var v rds.DBInstance rName := acctest.RandString(5) @@ -203,11 +203,6 @@ func TestAccAWSDBInstance_iops_update(t *testing.T) { testAccCheckAWSDBInstanceExists("aws_db_instance.bar", &v), testAccCheckAWSDBInstanceAttributes(&v), ), - // The plan will be non-empty because even with apply_immediatley, the - // instance has to apply the change via reboot, so follow up plans will - // show a non empty plan. The test is considered "successful" if the - // follow up change is applied at all. - ExpectNonEmptyPlan: true, }, }, }) From b4b70193d27c7e118c315bd90082740920b57756 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Wed, 27 Jul 2016 12:08:59 -0400 Subject: [PATCH 0442/1238] whitespace fixes --- command/hcl_printer.go | 1 - command/test-fixtures/push-tfvars/main.tf | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/command/hcl_printer.go b/command/hcl_printer.go index 3c547c505..1537fff14 100644 --- a/command/hcl_printer.go +++ b/command/hcl_printer.go @@ -12,7 +12,6 @@ import ( // This will only work operate on []interface{}, map[string]interface{}, and // primitive types. func encodeHCL(i interface{}) ([]byte, error) { - state := &encodeState{} err := state.encode(i) if err != nil { diff --git a/command/test-fixtures/push-tfvars/main.tf b/command/test-fixtures/push-tfvars/main.tf index c98389f28..528b6ed60 100644 --- a/command/test-fixtures/push-tfvars/main.tf +++ b/command/test-fixtures/push-tfvars/main.tf @@ -6,9 +6,9 @@ variable "baz" { type = "map" default = { - "A" = "a" - "B" = "b" - interp = "${file("t.txt")}" + "A" = "a" + "B" = "b" + interp = "${file("t.txt")}" } } From 1425b34562a2ee85370953b6533167b5ad9719ab Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 27 Jul 2016 12:42:46 -0500 Subject: [PATCH 0443/1238] config: Add map() interpolation function * `map(key, value, ...)` - Returns a map consisting of the key/value pairs specified as arguments. Every odd argument must be a string key, and every even argument must have the same type as the other values specified. Duplicate keys are not allowed. Examples: * `map("hello", "world")` * `map("us-east", list("a", "b", "c"), "us-west", list("b", "c", "d"))` --- config/interpolate_funcs.go | 45 +++++++++++ config/interpolate_funcs_test.go | 75 +++++++++++++++++++ .../docs/configuration/interpolation.html.md | 7 ++ 3 files changed, 127 insertions(+) diff --git a/config/interpolate_funcs.go b/config/interpolate_funcs.go index 91aa66da7..45f8e8915 100644 --- a/config/interpolate_funcs.go +++ b/config/interpolate_funcs.go @@ -71,6 +71,7 @@ func Funcs() map[string]ast.Function { "length": interpolationFuncLength(), "list": interpolationFuncList(), "lower": interpolationFuncLower(), + "map": interpolationFuncMap(), "md5": interpolationFuncMd5(), "uuid": interpolationFuncUUID(), "replace": interpolationFuncReplace(), @@ -123,6 +124,50 @@ func interpolationFuncList() ast.Function { } } +// interpolationFuncMap creates a map from the parameters passed +// to it. +func interpolationFuncMap() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{}, + ReturnType: ast.TypeMap, + Variadic: true, + VariadicType: ast.TypeAny, + Callback: func(args []interface{}) (interface{}, error) { + outputMap := make(map[string]ast.Variable) + + if len(args)%2 != 0 { + return nil, fmt.Errorf("requires an even number of arguments, got %d", len(args)) + } + + var firstType *ast.Type + for i := 0; i < len(args); i += 2 { + key, ok := args[i].(string) + if !ok { + return nil, fmt.Errorf("argument %d represents a key, so it must be a string", i+1) + } + val := args[i+1] + variable, err := hil.InterfaceToVariable(val) + if err != nil { + return nil, err + } + // Enforce map type homogeneity + if firstType == nil { + firstType = &variable.Type + } else if variable.Type != *firstType { + return nil, fmt.Errorf("all map values must have the same type, got %s then %s", firstType.Printable(), variable.Type.Printable()) + } + // Check for duplicate keys + if _, ok := outputMap[key]; ok { + return nil, fmt.Errorf("argument %d is a duplicate key: %q", i+1, key) + } + outputMap[key] = variable + } + + return outputMap, nil + }, + } +} + // interpolationFuncCompact strips a list of multi-variable values // (e.g. as returned by "split") of any empty strings. func interpolationFuncCompact() ast.Function { diff --git a/config/interpolate_funcs_test.go b/config/interpolate_funcs_test.go index 541bcffab..dcc563ecb 100644 --- a/config/interpolate_funcs_test.go +++ b/config/interpolate_funcs_test.go @@ -113,6 +113,81 @@ func TestInterpolateFuncList(t *testing.T) { }) } +func TestInterpolateFuncMap(t *testing.T) { + testFunction(t, testFunctionConfig{ + Cases: []testFunctionCase{ + // empty input returns empty map + { + `${map()}`, + map[string]interface{}{}, + false, + }, + + // odd args is error + { + `${map("odd")}`, + nil, + true, + }, + + // two args returns map w/ one k/v + { + `${map("hello", "world")}`, + map[string]interface{}{"hello": "world"}, + false, + }, + + // four args get two k/v + { + `${map("hello", "world", "what's", "up?")}`, + map[string]interface{}{"hello": "world", "what's": "up?"}, + false, + }, + + // map of lists is okay + { + `${map("hello", list("world"), "what's", list("up?"))}`, + map[string]interface{}{ + "hello": []interface{}{"world"}, + "what's": []interface{}{"up?"}, + }, + false, + }, + + // map of maps is okay + { + `${map("hello", map("there", "world"), "what's", map("really", "up?"))}`, + map[string]interface{}{ + "hello": map[string]interface{}{"there": "world"}, + "what's": map[string]interface{}{"really": "up?"}, + }, + false, + }, + + // keys have to be strings + { + `${map(list("listkey"), "val")}`, + nil, + true, + }, + + // types have to match + { + `${map("some", "strings", "also", list("lists"))}`, + nil, + true, + }, + + // duplicate keys are an error + { + `${map("key", "val", "key", "again")}`, + nil, + true, + }, + }, + }) +} + func TestInterpolateFuncCompact(t *testing.T) { testFunction(t, testFunctionConfig{ Cases: []testFunctionCase{ diff --git a/website/source/docs/configuration/interpolation.html.md b/website/source/docs/configuration/interpolation.html.md index 338550377..ff555f2e6 100644 --- a/website/source/docs/configuration/interpolation.html.md +++ b/website/source/docs/configuration/interpolation.html.md @@ -181,6 +181,13 @@ The supported built-in functions are: * `lower(string)` - Returns a copy of the string with all Unicode letters mapped to their lower case. + * `map(key, value, ...)` - Returns a map consisting of the key/value pairs + specified as arguments. Every odd argument must be a string key, and every + even argument must have the same type as the other values specified. + Duplicate keys are not allowed. Examples: + * `map("hello", "world")` + * `map("us-east", list("a", "b", "c"), "us-west", list("b", "c", "d"))` + * `md5(string)` - Returns a (conventional) hexadecimal representation of the MD5 hash of the given string. From 3f83f0b9f9f58af9f9b10de8e78407e5c8fb63de Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 27 Jul 2016 22:16:32 +0100 Subject: [PATCH 0444/1238] provider/aws: Enable Redshift Cluster Logging (#7813) Fixes #7423 ``` % make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSRedshiftCluster_loggingEnabled' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSRedshiftCluster_loggingEnabled -timeout 120m === RUN TestAccAWSRedshiftCluster_loggingEnabled --- PASS: TestAccAWSRedshiftCluster_loggingEnabled (675.21s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 675.233s ``` --- .../aws/resource_aws_redshift_cluster.go | 86 +++++++++++++++++ .../aws/resource_aws_redshift_cluster_test.go | 93 +++++++++++++++++++ .../aws/r/redshift_cluster.html.markdown | 4 + 3 files changed, 183 insertions(+) diff --git a/builtin/providers/aws/resource_aws_redshift_cluster.go b/builtin/providers/aws/resource_aws_redshift_cluster.go index 6ca041389..af6eda093 100644 --- a/builtin/providers/aws/resource_aws_redshift_cluster.go +++ b/builtin/providers/aws/resource_aws_redshift_cluster.go @@ -207,6 +207,24 @@ func resourceAwsRedshiftCluster() *schema.Resource { Set: schema.HashString, }, + "enable_logging": { + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + + "bucket_name": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "s3_key_prefix": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "tags": tagsSchema(), }, } @@ -310,6 +328,16 @@ func resourceAwsRedshiftClusterCreate(d *schema.ResourceData, meta interface{}) return fmt.Errorf("[WARN] Error waiting for Redshift Cluster state to be \"available\": %s", err) } + if _, ok := d.GetOk("enable_logging"); ok { + + loggingErr := enableRedshiftClusterLogging(d, conn) + if loggingErr != nil { + log.Printf("[ERROR] Error Enabling Logging on Redshift Cluster: %s", err) + return loggingErr + } + + } + return resourceAwsRedshiftClusterRead(d, meta) } @@ -346,6 +374,15 @@ func resourceAwsRedshiftClusterRead(d *schema.ResourceData, meta interface{}) er return nil } + log.Printf("[INFO] Reading Redshift Cluster Logging Status: %s", d.Id()) + loggingStatus, loggingErr := conn.DescribeLoggingStatus(&redshift.DescribeLoggingStatusInput{ + ClusterIdentifier: aws.String(d.Id()), + }) + + if loggingErr != nil { + return loggingErr + } + d.Set("master_username", rsc.MasterUsername) d.Set("node_type", rsc.NodeType) d.Set("allow_version_upgrade", rsc.AllowVersionUpgrade) @@ -404,6 +441,10 @@ func resourceAwsRedshiftClusterRead(d *schema.ResourceData, meta interface{}) er d.Set("cluster_revision_number", rsc.ClusterRevisionNumber) d.Set("tags", tagsToMapRedshift(rsc.Tags)) + d.Set("bucket_name", loggingStatus.BucketName) + d.Set("enable_logging", loggingStatus.LoggingEnabled) + d.Set("s3_key_prefix", loggingStatus.S3KeyPrefix) + return nil } @@ -553,11 +594,56 @@ func resourceAwsRedshiftClusterUpdate(d *schema.ResourceData, meta interface{}) } } + if d.HasChange("enable_logging") || d.HasChange("bucket_name") || d.HasChange("s3_key_prefix") { + var loggingErr error + if _, ok := d.GetOk("enable_logging"); ok { + + log.Printf("[INFO] Enabling Logging for Redshift Cluster %q", d.Id()) + loggingErr = enableRedshiftClusterLogging(d, conn) + if loggingErr != nil { + return loggingErr + } + } else { + + log.Printf("[INFO] Disabling Logging for Redshift Cluster %q", d.Id()) + _, loggingErr = conn.DisableLogging(&redshift.DisableLoggingInput{ + ClusterIdentifier: aws.String(d.Id()), + }) + if loggingErr != nil { + return loggingErr + } + } + + d.SetPartial("enable_logging") + } + d.Partial(false) return resourceAwsRedshiftClusterRead(d, meta) } +func enableRedshiftClusterLogging(d *schema.ResourceData, conn *redshift.Redshift) error { + if _, ok := d.GetOk("bucket_name"); !ok { + return fmt.Errorf("bucket_name must be set when enabling logging for Redshift Clusters") + } + + params := &redshift.EnableLoggingInput{ + ClusterIdentifier: aws.String(d.Id()), + BucketName: aws.String(d.Get("bucket_name").(string)), + } + + if v, ok := d.GetOk("s3_key_prefix"); ok { + params.S3KeyPrefix = aws.String(v.(string)) + } + + _, loggingErr := conn.EnableLogging(params) + if loggingErr != nil { + log.Printf("[ERROR] Error Enabling Logging on Redshift Cluster: %s", loggingErr) + return loggingErr + } + return nil +} + func resourceAwsRedshiftClusterDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).redshiftconn log.Printf("[DEBUG] Destroying Redshift Cluster (%s)", d.Id()) diff --git a/builtin/providers/aws/resource_aws_redshift_cluster_test.go b/builtin/providers/aws/resource_aws_redshift_cluster_test.go index 2a94b9123..4be682136 100644 --- a/builtin/providers/aws/resource_aws_redshift_cluster_test.go +++ b/builtin/providers/aws/resource_aws_redshift_cluster_test.go @@ -38,6 +38,41 @@ func TestAccAWSRedshiftCluster_basic(t *testing.T) { }) } +func TestAccAWSRedshiftCluster_loggingEnabled(t *testing.T) { + var v redshift.Cluster + + ri := rand.New(rand.NewSource(time.Now().UnixNano())).Int() + preConfig := fmt.Sprintf(testAccAWSRedshiftClusterConfig_loggingEnabled, ri) + postConfig := fmt.Sprintf(testAccAWSRedshiftClusterConfig_loggingDisabled, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSRedshiftClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: preConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSRedshiftClusterExists("aws_redshift_cluster.default", &v), + resource.TestCheckResourceAttr( + "aws_redshift_cluster.default", "enable_logging", "true"), + resource.TestCheckResourceAttr( + "aws_redshift_cluster.default", "bucket_name", "tf-redshift-logging-test-bucket"), + ), + }, + + resource.TestStep{ + Config: postConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSRedshiftClusterExists("aws_redshift_cluster.default", &v), + resource.TestCheckResourceAttr( + "aws_redshift_cluster.default", "enable_logging", "false"), + ), + }, + }, + }) +} + func TestAccAWSRedshiftCluster_iamRoles(t *testing.T) { var v redshift.Cluster @@ -399,6 +434,64 @@ resource "aws_redshift_cluster" "default" { allow_version_upgrade = false }` +var testAccAWSRedshiftClusterConfig_loggingDisabled = ` +resource "aws_redshift_cluster" "default" { + cluster_identifier = "tf-redshift-cluster-%d" + availability_zone = "us-west-2a" + database_name = "mydb" + master_username = "foo_test" + master_password = "Mustbe8characters" + node_type = "dc1.large" + automated_snapshot_retention_period = 0 + allow_version_upgrade = false + enable_logging = false +} +` + +var testAccAWSRedshiftClusterConfig_loggingEnabled = ` +resource "aws_s3_bucket" "bucket" { + bucket = "tf-redshift-logging-test-bucket" + force_destroy = true + policy = < Date: Wed, 27 Jul 2016 22:17:35 +0100 Subject: [PATCH 0445/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 888c6ff57..0486bdd3b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -139,6 +139,7 @@ IMPROVEMENTS: * provider/aws: Support create / update greater than twenty db parameters in `aws_db_parameter_group` [GH-7364] * provider/aws: expose network interface id in `aws_instance` [GH-6751] * provider/aws: Adding passthrough behavior for API Gateway integration [GH-7801] + * provider/aws: Enable Redshift Cluster Logging [GH-7813] * provider/azurerm: Add support for EnableIPForwarding to `azurerm_network_interface` [GH-6807] * provider/azurerm: Add support for exporting the `azurerm_storage_account` access keys [GH-6742] * provider/azurerm: The Azure SDK now exposes better error messages [GH-6976] From 390e1acb3ceeae87bc2131ed335ec5e0b318cffd Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 27 Jul 2016 22:39:03 +0100 Subject: [PATCH 0446/1238] provider/azurerm: Change of `availability_set_id` on (#7650) `azurerm_virtual_machine` should ForceNew Fixes #6873 ``` make testacc TEST=./builtin/providers/azurerm TESTARGS='-run=TestAccAzureRMVirtualMachine_ChangeAvailbilitySet' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) TF_ACC=1 go test ./builtin/providers/azurerm -v -run=TestAccAzureRMVirtualMachine_ChangeAvailbilitySet -timeout 120m === RUN TestAccAzureRMVirtualMachine_ChangeAvailbilitySet --- PASS: TestAccAzureRMVirtualMachine_ChangeAvailbilitySet (976.35s) PASS ok github.com/hashicorp/terraform/builtin/providers/azurerm 976.367s ``` --- .../azurerm/resource_arm_virtual_machine.go | 1 + .../resource_arm_virtual_machine_test.go | 212 ++++++++++++++++++ 2 files changed, 213 insertions(+) diff --git a/builtin/providers/azurerm/resource_arm_virtual_machine.go b/builtin/providers/azurerm/resource_arm_virtual_machine.go index dfd9f6d90..b58e5878b 100644 --- a/builtin/providers/azurerm/resource_arm_virtual_machine.go +++ b/builtin/providers/azurerm/resource_arm_virtual_machine.go @@ -70,6 +70,7 @@ func resourceArmVirtualMachine() *schema.Resource { Type: schema.TypeString, Optional: true, Computed: true, + ForceNew: true, StateFunc: func(id interface{}) string { return strings.ToLower(id.(string)) }, diff --git a/builtin/providers/azurerm/resource_arm_virtual_machine_test.go b/builtin/providers/azurerm/resource_arm_virtual_machine_test.go index 91418877c..ee823a80f 100644 --- a/builtin/providers/azurerm/resource_arm_virtual_machine_test.go +++ b/builtin/providers/azurerm/resource_arm_virtual_machine_test.go @@ -262,6 +262,36 @@ func TestAccAzureRMVirtualMachine_ChangeComputerName(t *testing.T) { }) } +func TestAccAzureRMVirtualMachine_ChangeAvailbilitySet(t *testing.T) { + var afterCreate, afterUpdate compute.VirtualMachine + + ri := acctest.RandInt() + preConfig := fmt.Sprintf(testAccAzureRMVirtualMachine_withAvailabilitySet, ri, ri, ri, ri, ri, ri, ri, ri) + postConfig := fmt.Sprintf(testAccAzureRMVirtualMachine_updateAvailabilitySet, ri, ri, ri, ri, ri, ri, ri, ri) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMVirtualMachineDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: preConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMVirtualMachineExists("azurerm_virtual_machine.test", &afterCreate), + ), + }, + + resource.TestStep{ + Config: postConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMVirtualMachineExists("azurerm_virtual_machine.test", &afterUpdate), + testAccCheckVirtualMachineRecreated( + t, &afterCreate, &afterUpdate), + ), + }, + }, + }) +} + func testCheckAzureRMVirtualMachineExists(name string, vm *compute.VirtualMachine) resource.TestCheckFunc { return func(s *terraform.State) error { // Ensure we have enough information in state to look up in API @@ -1212,6 +1242,188 @@ resource "azurerm_virtual_machine" "test" { } ` +var testAccAzureRMVirtualMachine_withAvailabilitySet = ` + resource "azurerm_resource_group" "test" { + name = "acctestrg-%d" + location = "West US" + } + + resource "azurerm_virtual_network" "test" { + name = "acctvn-%d" + address_space = ["10.0.0.0/16"] + location = "West US" + resource_group_name = "${azurerm_resource_group.test.name}" + } + + resource "azurerm_subnet" "test" { + name = "acctsub-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test.name}" + address_prefix = "10.0.2.0/24" + } + + resource "azurerm_network_interface" "test" { + name = "acctni-%d" + location = "West US" + resource_group_name = "${azurerm_resource_group.test.name}" + + ip_configuration { + name = "testconfiguration1" + subnet_id = "${azurerm_subnet.test.id}" + private_ip_address_allocation = "dynamic" + } + } + + resource "azurerm_storage_account" "test" { + name = "accsa%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "westus" + account_type = "Standard_LRS" + + tags { + environment = "staging" + } + } + + resource "azurerm_availability_set" "test" { + name = "availabilityset%d" + location = "West US" + resource_group_name = "${azurerm_resource_group.test.name}" +} + + resource "azurerm_storage_container" "test" { + name = "vhds" + resource_group_name = "${azurerm_resource_group.test.name}" + storage_account_name = "${azurerm_storage_account.test.name}" + container_access_type = "private" + } + + resource "azurerm_virtual_machine" "test" { + name = "acctvm-%d" + location = "West US" + resource_group_name = "${azurerm_resource_group.test.name}" + network_interface_ids = ["${azurerm_network_interface.test.id}"] + vm_size = "Standard_A0" + availability_set_id = "${azurerm_availability_set.test.id}" + delete_os_disk_on_termination = true + + storage_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "14.04.2-LTS" + version = "latest" + } + + storage_os_disk { + name = "myosdisk1" + vhd_uri = "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}/myosdisk1.vhd" + caching = "ReadWrite" + create_option = "FromImage" + } + + os_profile { + computer_name = "hostname%d" + admin_username = "testadmin" + admin_password = "Password1234!" + } + + os_profile_linux_config { + disable_password_authentication = false + } + } +` + +var testAccAzureRMVirtualMachine_updateAvailabilitySet = ` + resource "azurerm_resource_group" "test" { + name = "acctestrg-%d" + location = "West US" + } + + resource "azurerm_virtual_network" "test" { + name = "acctvn-%d" + address_space = ["10.0.0.0/16"] + location = "West US" + resource_group_name = "${azurerm_resource_group.test.name}" + } + + resource "azurerm_subnet" "test" { + name = "acctsub-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test.name}" + address_prefix = "10.0.2.0/24" + } + + resource "azurerm_network_interface" "test" { + name = "acctni-%d" + location = "West US" + resource_group_name = "${azurerm_resource_group.test.name}" + + ip_configuration { + name = "testconfiguration1" + subnet_id = "${azurerm_subnet.test.id}" + private_ip_address_allocation = "dynamic" + } + } + + resource "azurerm_storage_account" "test" { + name = "accsa%d" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "westus" + account_type = "Standard_LRS" + + tags { + environment = "staging" + } + } + + resource "azurerm_availability_set" "test" { + name = "updatedAvailabilitySet%d" + location = "West US" + resource_group_name = "${azurerm_resource_group.test.name}" +} + + resource "azurerm_storage_container" "test" { + name = "vhds" + resource_group_name = "${azurerm_resource_group.test.name}" + storage_account_name = "${azurerm_storage_account.test.name}" + container_access_type = "private" + } + + resource "azurerm_virtual_machine" "test" { + name = "acctvm-%d" + location = "West US" + resource_group_name = "${azurerm_resource_group.test.name}" + network_interface_ids = ["${azurerm_network_interface.test.id}"] + vm_size = "Standard_A0" + availability_set_id = "${azurerm_availability_set.test.id}" + delete_os_disk_on_termination = true + + storage_image_reference { + publisher = "Canonical" + offer = "UbuntuServer" + sku = "14.04.2-LTS" + version = "latest" + } + + storage_os_disk { + name = "myosdisk1" + vhd_uri = "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}/myosdisk1.vhd" + caching = "ReadWrite" + create_option = "FromImage" + } + + os_profile { + computer_name = "hostname%d" + admin_username = "testadmin" + admin_password = "Password1234!" + } + + os_profile_linux_config { + disable_password_authentication = false + } + } +` + var testAccAzureRMVirtualMachine_updateMachineName = ` resource "azurerm_resource_group" "test" { name = "acctestrg-%d" From ba8674451c02491eb422ba85e526722a11ffe316 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 27 Jul 2016 22:39:38 +0100 Subject: [PATCH 0447/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0486bdd3b..10b3d1080 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -257,6 +257,7 @@ BUG FIXES: * provider/azurerm: changing the name of an `azurerm_virtual_machine` now forces a new resource [GH-7646] * provider/azurerm: azurerm_storage_account now returns storage keys value instead of their names [GH-7674] * provider/azurerm: `azurerm_virtual_machine` computer_name now Required [GH-7308] + * provider/azurerm: Change of `availability_set_id` on `azurerm_virtual_machine` should ForceNew [GH-7650] * provider/cloudflare: Fix issue upgrading CloudFlare Records created before v0.6.15 [GH-6969] * provider/cloudstack: Fix using `cloudstack_network_acl` within a project [GH-6743] * provider/cloudstack: Fix refresing `cloudstack_network_acl_rule` when the associated ACL is deleted [GH-7612] From 61c5c9f56b471b5fe1970a61bc685953dc68079b Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 27 Jul 2016 22:49:43 +0100 Subject: [PATCH 0448/1238] provider/azurerm: `azurerm_storage_table` resource (#7327) * provider/azurerm: `azurerm_storage_table` resource Fixes #7257 `````` * Update resource_arm_storage_table.go * Update resource_arm_storage_table.go --- builtin/providers/azurerm/config.go | 17 ++ builtin/providers/azurerm/provider.go | 1 + .../azurerm/resource_arm_storage_table.go | 146 +++++++++++ .../resource_arm_storage_table_test.go | 237 ++++++++++++++++++ .../azurerm/r/storage_table.html.markdown | 51 ++++ website/source/layouts/azurerm.erb | 4 + 6 files changed, 456 insertions(+) create mode 100644 builtin/providers/azurerm/resource_arm_storage_table.go create mode 100644 builtin/providers/azurerm/resource_arm_storage_table_test.go create mode 100644 website/source/docs/providers/azurerm/r/storage_table.html.markdown diff --git a/builtin/providers/azurerm/config.go b/builtin/providers/azurerm/config.go index e44a9222a..97387629d 100644 --- a/builtin/providers/azurerm/config.go +++ b/builtin/providers/azurerm/config.go @@ -364,6 +364,23 @@ func (armClient *ArmClient) getBlobStorageClientForStorageAccount(resourceGroupN blobClient := storageClient.GetBlobService() return &blobClient, true, nil } +func (armClient *ArmClient) getTableServiceClientForStorageAccount(resourceGroupName, storageAccountName string) (*mainStorage.TableServiceClient, bool, error) { + key, accountExists, err := armClient.getKeyForStorageAccount(resourceGroupName, storageAccountName) + if err != nil { + return nil, accountExists, err + } + if accountExists == false { + return nil, false, nil + } + + storageClient, err := mainStorage.NewBasicClient(storageAccountName, key) + if err != nil { + return nil, true, fmt.Errorf("Error creating storage client for storage account %q: %s", storageAccountName, err) + } + + tableClient := storageClient.GetTableService() + return &tableClient, true, nil +} func (armClient *ArmClient) getQueueServiceClientForStorageAccount(resourceGroupName, storageAccountName string) (*mainStorage.QueueServiceClient, bool, error) { key, accountExists, err := armClient.getKeyForStorageAccount(resourceGroupName, storageAccountName) diff --git a/builtin/providers/azurerm/provider.go b/builtin/providers/azurerm/provider.go index 0dfdc07f7..11f9576e8 100644 --- a/builtin/providers/azurerm/provider.go +++ b/builtin/providers/azurerm/provider.go @@ -60,6 +60,7 @@ func Provider() terraform.ResourceProvider { "azurerm_storage_blob": resourceArmStorageBlob(), "azurerm_storage_container": resourceArmStorageContainer(), "azurerm_storage_queue": resourceArmStorageQueue(), + "azurerm_storage_table": resourceArmStorageTable(), "azurerm_subnet": resourceArmSubnet(), "azurerm_template_deployment": resourceArmTemplateDeployment(), "azurerm_virtual_machine": resourceArmVirtualMachine(), diff --git a/builtin/providers/azurerm/resource_arm_storage_table.go b/builtin/providers/azurerm/resource_arm_storage_table.go new file mode 100644 index 000000000..9c440fa92 --- /dev/null +++ b/builtin/providers/azurerm/resource_arm_storage_table.go @@ -0,0 +1,146 @@ +package azurerm + +import ( + "fmt" + "log" + "regexp" + + "github.com/Azure/azure-sdk-for-go/storage" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceArmStorageTable() *schema.Resource { + return &schema.Resource{ + Create: resourceArmStorageTableCreate, + Read: resourceArmStorageTableRead, + Delete: resourceArmStorageTableDelete, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateArmStorageTableName, + }, + "resource_group_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "storage_account_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func validateArmStorageTableName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value == "table" { + errors = append(errors, fmt.Errorf( + "Table Storage %q cannot use the word `table`: %q", + k, value)) + } + if !regexp.MustCompile(`^[A-Za-z][A-Za-z0-9]{6,63}$`).MatchString(value) { + errors = append(errors, fmt.Errorf( + "Table Storage %q cannot begin with a numeric character, only alphanumeric characters are allowed and must be between 6 and 63 characters long: %q", + k, value)) + } + + return +} + +func resourceArmStorageTableCreate(d *schema.ResourceData, meta interface{}) error { + armClient := meta.(*ArmClient) + + resourceGroupName := d.Get("resource_group_name").(string) + storageAccountName := d.Get("storage_account_name").(string) + + tableClient, accountExists, err := armClient.getTableServiceClientForStorageAccount(resourceGroupName, storageAccountName) + if err != nil { + return err + } + if !accountExists { + return fmt.Errorf("Storage Account %q Not Found", storageAccountName) + } + + name := d.Get("name").(string) + table := storage.AzureTable(name) + + log.Printf("[INFO] Creating table %q in storage account %q.", name, storageAccountName) + err = tableClient.CreateTable(table) + if err != nil { + return fmt.Errorf("Error creating table %q in storage account %q: %s", name, storageAccountName, err) + } + + d.SetId(name) + + return resourceArmStorageTableRead(d, meta) +} + +func resourceArmStorageTableRead(d *schema.ResourceData, meta interface{}) error { + armClient := meta.(*ArmClient) + + resourceGroupName := d.Get("resource_group_name").(string) + storageAccountName := d.Get("storage_account_name").(string) + + tableClient, accountExists, err := armClient.getTableServiceClientForStorageAccount(resourceGroupName, storageAccountName) + if err != nil { + return err + } + if !accountExists { + log.Printf("[DEBUG] Storage account %q not found, removing table %q from state", storageAccountName, d.Id()) + d.SetId("") + return nil + } + + name := d.Get("name").(string) + tables, err := tableClient.QueryTables() + if err != nil { + return fmt.Errorf("Failed to retrieve storage tables in account %q: %s", name, err) + } + + var found bool + for _, table := range tables { + if string(table) == name { + found = true + d.Set("name", string(table)) + } + } + + if !found { + log.Printf("[INFO] Storage table %q does not exist in account %q, removing from state...", name, storageAccountName) + d.SetId("") + } + + return nil +} + +func resourceArmStorageTableDelete(d *schema.ResourceData, meta interface{}) error { + armClient := meta.(*ArmClient) + + resourceGroupName := d.Get("resource_group_name").(string) + storageAccountName := d.Get("storage_account_name").(string) + + tableClient, accountExists, err := armClient.getTableServiceClientForStorageAccount(resourceGroupName, storageAccountName) + if err != nil { + return err + } + if !accountExists { + log.Printf("[INFO]Storage Account %q doesn't exist so the table won't exist", storageAccountName) + return nil + } + + name := d.Get("name").(string) + table := storage.AzureTable(name) + + log.Printf("[INFO] Deleting storage table %q in account %q", name, storageAccountName) + if err := tableClient.DeleteTable(table); err != nil { + return fmt.Errorf("Error deleting storage table %q from storage account %q: %s", name, storageAccountName, err) + } + + d.SetId("") + return nil +} diff --git a/builtin/providers/azurerm/resource_arm_storage_table_test.go b/builtin/providers/azurerm/resource_arm_storage_table_test.go new file mode 100644 index 000000000..ca184d72d --- /dev/null +++ b/builtin/providers/azurerm/resource_arm_storage_table_test.go @@ -0,0 +1,237 @@ +package azurerm + +import ( + "fmt" + "log" + "strings" + "testing" + + "github.com/Azure/azure-sdk-for-go/storage" + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAzureRMStorageTable_basic(t *testing.T) { + var table storage.AzureTable + + ri := acctest.RandInt() + rs := strings.ToLower(acctest.RandString(11)) + config := fmt.Sprintf(testAccAzureRMStorageTable_basic, ri, rs, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMStorageTableDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMStorageTableExists("azurerm_storage_table.test", &table), + ), + }, + }, + }) +} + +func TestAccAzureRMStorageTable_disappears(t *testing.T) { + var table storage.AzureTable + + ri := acctest.RandInt() + rs := strings.ToLower(acctest.RandString(11)) + config := fmt.Sprintf(testAccAzureRMStorageTable_basic, ri, rs, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMStorageTableDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMStorageTableExists("azurerm_storage_table.test", &table), + testAccARMStorageTableDisappears("azurerm_storage_table.test", &table), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testCheckAzureRMStorageTableExists(name string, t *storage.AzureTable) resource.TestCheckFunc { + return func(s *terraform.State) error { + + rs, ok := s.RootModule().Resources[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + name := rs.Primary.Attributes["name"] + storageAccountName := rs.Primary.Attributes["storage_account_name"] + resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] + if !hasResourceGroup { + return fmt.Errorf("Bad: no resource group found in state for storage table: %s", name) + } + + armClient := testAccProvider.Meta().(*ArmClient) + tableClient, accountExists, err := armClient.getTableServiceClientForStorageAccount(resourceGroup, storageAccountName) + if err != nil { + return err + } + if !accountExists { + return fmt.Errorf("Bad: Storage Account %q does not exist", storageAccountName) + } + + tables, err := tableClient.QueryTables() + + if len(tables) == 0 { + return fmt.Errorf("Bad: Storage Table %q (storage account: %q) does not exist", name, storageAccountName) + } + + var found bool + for _, table := range tables { + if string(table) == name { + found = true + *t = table + } + } + + if !found { + return fmt.Errorf("Bad: Storage Table %q (storage account: %q) does not exist", name, storageAccountName) + } + + return nil + } +} + +func testAccARMStorageTableDisappears(name string, t *storage.AzureTable) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + armClient := testAccProvider.Meta().(*ArmClient) + + storageAccountName := rs.Primary.Attributes["storage_account_name"] + resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] + if !hasResourceGroup { + return fmt.Errorf("Bad: no resource group found in state for storage table: %s", string(*t)) + } + + tableClient, accountExists, err := armClient.getTableServiceClientForStorageAccount(resourceGroup, storageAccountName) + if err != nil { + return err + } + if !accountExists { + log.Printf("[INFO]Storage Account %q doesn't exist so the table won't exist", storageAccountName) + return nil + } + + table := storage.AzureTable(string(*t)) + err = tableClient.DeleteTable(table) + if err != nil { + return err + } + + return nil + } +} + +func testCheckAzureRMStorageTableDestroy(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != "azurerm_storage_table" { + continue + } + + name := rs.Primary.Attributes["name"] + storageAccountName := rs.Primary.Attributes["storage_account_name"] + resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] + if !hasResourceGroup { + return fmt.Errorf("Bad: no resource group found in state for storage table: %s", name) + } + + armClient := testAccProvider.Meta().(*ArmClient) + tableClient, accountExists, err := armClient.getTableServiceClientForStorageAccount(resourceGroup, storageAccountName) + if err != nil { + //If we can't get keys then the table can't exist + return nil + } + if !accountExists { + return nil + } + + tables, err := tableClient.QueryTables() + + if err != nil { + return nil + } + + var found bool + for _, table := range tables { + if string(table) == name { + found = true + } + } + + if found { + return fmt.Errorf("Bad: Storage Table %q (storage account: %q) still exist", name, storageAccountName) + } + } + + return nil +} + +func TestValidateArmStorageTableName(t *testing.T) { + validNames := []string{ + "mytable01", + "mytable", + "myTable", + "MYTABLE", + } + for _, v := range validNames { + _, errors := validateArmStorageTableName(v, "name") + if len(errors) != 0 { + t.Fatalf("%q should be a valid Storage Table Name: %q", v, errors) + } + } + + invalidNames := []string{ + "table", + "-invalidname1", + "invalid_name", + "invalid!", + "ww", + strings.Repeat("w", 65), + } + for _, v := range invalidNames { + _, errors := validateArmStorageTableName(v, "name") + if len(errors) == 0 { + t.Fatalf("%q should be an invalid Storage Table Name", v) + } + } +} + +var testAccAzureRMStorageTable_basic = ` +resource "azurerm_resource_group" "test" { + name = "acctestrg-%d" + location = "westus" +} + +resource "azurerm_storage_account" "test" { + name = "acctestacc%s" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "westus" + account_type = "Standard_LRS" + + tags { + environment = "staging" + } +} + +resource "azurerm_storage_table" "test" { + name = "tfacceptancetest%d" + resource_group_name = "${azurerm_resource_group.test.name}" + storage_account_name = "${azurerm_storage_account.test.name}" +} +` diff --git a/website/source/docs/providers/azurerm/r/storage_table.html.markdown b/website/source/docs/providers/azurerm/r/storage_table.html.markdown new file mode 100644 index 000000000..4f7ac4dc3 --- /dev/null +++ b/website/source/docs/providers/azurerm/r/storage_table.html.markdown @@ -0,0 +1,51 @@ +--- +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_storage_table" +sidebar_current: "docs-azurerm-resource-storage-table" +description: |- + Create a Azure Storage Table. +--- + +# azurerm\_storage\_table + +Create an Azure Storage Table. + +## Example Usage + +``` +resource "azurerm_resource_group" "test" { + name = "acctestrg-%d" + location = "westus" +} + +resource "azurerm_storage_account" "test" { + name = "acctestacc%s" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "westus" + account_type = "Standard_LRS" +} + +resource "azurerm_storage_table" "test" { + name = "mysampletable" + resource_group_name = "${azurerm_resource_group.test.name}" + storage_account_name = "${azurerm_storage_account.test.name}" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the storage table. Must be unique within the storage account the table is located. + +* `resource_group_name` - (Required) The name of the resource group in which to + create the storage table. Changing this forces a new resource to be created. + +* `storage_account_name` - (Required) Specifies the storage account in which to create the storage table. + Changing this forces a new resource to be created. + +## Attributes Reference + +The following attributes are exported in addition to the arguments listed above: + +* `id` - The storage table Resource ID. diff --git a/website/source/layouts/azurerm.erb b/website/source/layouts/azurerm.erb index 089fb3dd1..d2ace25b8 100644 --- a/website/source/layouts/azurerm.erb +++ b/website/source/layouts/azurerm.erb @@ -165,6 +165,10 @@ azurerm_storage_queue + > + azurerm_storage_table + + From c0bd7fe1d8a062e143c2d2c3da0224b022861fe1 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 27 Jul 2016 22:50:44 +0100 Subject: [PATCH 0449/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 10b3d1080..32019d290 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -89,6 +89,7 @@ FEATURES: * **New Resource:** `consul_service` [GH-7508] * **New Resource:** `mysql_grant` [GH-7656] * **New Resource:** `mysql_user` [GH-7656] + * **New Resource:** `azurerm_storage_table` [GH-7327] * core: Tainted resources now show up in the plan and respect dependency ordering [GH-6600] * core: The `lookup` interpolation function can now have a default fall-back value specified [GH-6884] * core: The `terraform plan` command no longer persists state. [GH-6811] From 630de403cec7064cf0ed3cd8288bf57bc9ba6bce Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 27 Jul 2016 22:57:02 +0100 Subject: [PATCH 0450/1238] provider/azurerm: Wait for `azurerm_storage_account` to be available (#7329) Fixes #7005 where a container tried to provision *before* the storage account was available. We now wait for the Storage Account to be in the `Succeeded` state before returning ``` make testacc TEST=./builtin/providers/azurerm TESTARGS='-run=TestAccAzureRMStorageAccount_' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /vendor/) TF_ACC=1 go test ./builtin/providers/azurerm -v -run=TestAccAzureRMStorageAccount_ -timeout 120m === RUN TestAccAzureRMStorageAccount_basic --- PASS: TestAccAzureRMStorageAccount_basic (163.68s) PASS ok github.com/hashicorp/terraform/builtin/providers/azurerm 163.695s ``` --- .../azurerm/resource_arm_storage_account.go | 33 +++++++++++++++++-- 1 file changed, 30 insertions(+), 3 deletions(-) diff --git a/builtin/providers/azurerm/resource_arm_storage_account.go b/builtin/providers/azurerm/resource_arm_storage_account.go index 2d01952cf..059e92ecb 100644 --- a/builtin/providers/azurerm/resource_arm_storage_account.go +++ b/builtin/providers/azurerm/resource_arm_storage_account.go @@ -2,11 +2,14 @@ package azurerm import ( "fmt" + "log" "net/http" "regexp" "strings" + "time" "github.com/Azure/azure-sdk-for-go/arm/storage" + "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" ) @@ -109,7 +112,8 @@ func resourceArmStorageAccount() *schema.Resource { } func resourceArmStorageAccountCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*ArmClient).storageServiceClient + client := meta.(*ArmClient) + storageClient := client.storageServiceClient resourceGroupName := d.Get("resource_group_name").(string) storageAccountName := d.Get("name").(string) @@ -127,13 +131,13 @@ func resourceArmStorageAccountCreate(d *schema.ResourceData, meta interface{}) e Tags: expandTags(tags), } - _, err := client.Create(resourceGroupName, storageAccountName, opts, make(chan struct{})) + _, err := storageClient.Create(resourceGroupName, storageAccountName, opts, make(chan struct{})) if err != nil { return fmt.Errorf("Error creating Azure Storage Account '%s': %s", storageAccountName, err) } // The only way to get the ID back apparently is to read the resource again - read, err := client.GetProperties(resourceGroupName, storageAccountName) + read, err := storageClient.GetProperties(resourceGroupName, storageAccountName) if err != nil { return err } @@ -142,6 +146,18 @@ func resourceArmStorageAccountCreate(d *schema.ResourceData, meta interface{}) e storageAccountName, resourceGroupName) } + log.Printf("[DEBUG] Waiting for Storage Account (%s) to become available", storageAccountName) + stateConf := &resource.StateChangeConf{ + Pending: []string{"Updating", "Creating"}, + Target: []string{"Succeeded"}, + Refresh: storageAccountStateRefreshFunc(client, resourceGroupName, storageAccountName), + Timeout: 30 * time.Minute, + MinTimeout: 15 * time.Second, + } + if _, err := stateConf.WaitForState(); err != nil { + return fmt.Errorf("Error waiting for Storage Account (%s) to become available: %s", storageAccountName, err) + } + d.SetId(*read.ID) return resourceArmStorageAccountRead(d, meta) @@ -305,3 +321,14 @@ func validateArmStorageAccountType(v interface{}, k string) (ws []string, es []e es = append(es, fmt.Errorf("Invalid storage account type %q", input)) return } + +func storageAccountStateRefreshFunc(client *ArmClient, resourceGroupName string, storageAccountName string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + res, err := client.storageServiceClient.GetProperties(resourceGroupName, storageAccountName) + if err != nil { + return nil, "", fmt.Errorf("Error issuing read request in storageAccountStateRefreshFunc to Azure ARM for Storage Account '%s' (RG: '%s'): %s", storageAccountName, resourceGroupName, err) + } + + return res, string(res.Properties.ProvisioningState), nil + } +} From e1d21594e25a749ea04af5e5263f456c91c89c4f Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 27 Jul 2016 22:57:36 +0100 Subject: [PATCH 0451/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 32019d290..716520346 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -259,6 +259,7 @@ BUG FIXES: * provider/azurerm: azurerm_storage_account now returns storage keys value instead of their names [GH-7674] * provider/azurerm: `azurerm_virtual_machine` computer_name now Required [GH-7308] * provider/azurerm: Change of `availability_set_id` on `azurerm_virtual_machine` should ForceNew [GH-7650] + * provider/azurerm: Wait for `azurerm_storage_account` to be available [GH-7329] * provider/cloudflare: Fix issue upgrading CloudFlare Records created before v0.6.15 [GH-6969] * provider/cloudstack: Fix using `cloudstack_network_acl` within a project [GH-6743] * provider/cloudstack: Fix refresing `cloudstack_network_acl_rule` when the associated ACL is deleted [GH-7612] From 7af10adcbe484f176ea479c86691e7a2801b891e Mon Sep 17 00:00:00 2001 From: James Nugent Date: Wed, 27 Jul 2016 17:14:47 -0500 Subject: [PATCH 0452/1238] core: Do not assume HCL parser has touched vars This PR fixes #7824, which crashed when applying a plan file. The bug is that while a map which has come from the HCL parser reifies as a []map[string]interface{}, the variable saved in the plan file was not. We now cover both cases. Fixes #7824. --- terraform/context.go | 22 +++++++--- terraform/context_apply_test.go | 49 ++++++++++++++++++++++ terraform/test-fixtures/issue-7824/main.tf | 6 +++ 3 files changed, 71 insertions(+), 6 deletions(-) create mode 100644 terraform/test-fixtures/issue-7824/main.tf diff --git a/terraform/context.go b/terraform/context.go index 86d7e58ce..f43e48dfe 100644 --- a/terraform/context.go +++ b/terraform/context.go @@ -181,11 +181,16 @@ func NewContext(opts *ContextOpts) (*Context, error) { if existingMap, ok := existing.(map[string]interface{}); !ok { panic(fmt.Sprintf("%s is not a map, this is a bug in Terraform.", k)) } else { - if newMap, ok := varVal.(map[string]interface{}); ok { - for newKey, newVal := range newMap { + switch typedV := varVal.(type) { + case []map[string]interface{}: + for newKey, newVal := range typedV[0] { existingMap[newKey] = newVal } - } else { + case map[string]interface{}: + for newKey, newVal := range typedV { + existingMap[newKey] = newVal + } + default: panic(fmt.Sprintf("%s is not a map, this is a bug in Terraform.", k)) } } @@ -208,11 +213,16 @@ func NewContext(opts *ContextOpts) (*Context, error) { if existingMap, ok := existing.(map[string]interface{}); !ok { panic(fmt.Sprintf("%s is not a map, this is a bug in Terraform.", k)) } else { - if newMap, ok := v.([]map[string]interface{}); ok { - for newKey, newVal := range newMap[0] { + switch typedV := v.(type) { + case []map[string]interface{}: + for newKey, newVal := range typedV[0] { existingMap[newKey] = newVal } - } else { + case map[string]interface{}: + for newKey, newVal := range typedV { + existingMap[newKey] = newVal + } + default: panic(fmt.Sprintf("%s is not a map, this is a bug in Terraform.", k)) } } diff --git a/terraform/context_apply_test.go b/terraform/context_apply_test.go index ce64ddfca..6c2a493c5 100644 --- a/terraform/context_apply_test.go +++ b/terraform/context_apply_test.go @@ -4523,6 +4523,55 @@ func TestContext2Apply_singleDestroy(t *testing.T) { } } +// GH-7824 +func TestContext2Apply_issue7824(t *testing.T) { + p := testProvider("template") + p.ResourcesReturn = append(p.ResourcesReturn, ResourceType{ + Name: "template_file", + }) + + p.ApplyFn = testApplyFn + p.DiffFn = testDiffFn + + // Apply cleanly step 0 + ctx := testContext2(t, &ContextOpts{ + Module: testModule(t, "issue-7824"), + Providers: map[string]ResourceProviderFactory{ + "template": testProviderFuncFixed(p), + }, + }) + + plan, err := ctx.Plan() + if err != nil { + t.Fatalf("err: %s", err) + } + + // Write / Read plan to simulate running it through a Plan file + var buf bytes.Buffer + if err := WritePlan(plan, &buf); err != nil { + t.Fatalf("err: %s", err) + } + + planFromFile, err := ReadPlan(&buf) + if err != nil { + t.Fatalf("err: %s", err) + } + + ctx, err = planFromFile.Context(&ContextOpts{ + Providers: map[string]ResourceProviderFactory{ + "template": testProviderFuncFixed(p), + }, + }) + if err != nil { + t.Fatalf("err: %s", err) + } + + _, err = ctx.Apply() + if err != nil { + t.Fatalf("err: %s", err) + } +} + // GH-5254 func TestContext2Apply_issue5254(t *testing.T) { // Create a provider. We use "template" here just to match the repro diff --git a/terraform/test-fixtures/issue-7824/main.tf b/terraform/test-fixtures/issue-7824/main.tf new file mode 100644 index 000000000..835254b65 --- /dev/null +++ b/terraform/test-fixtures/issue-7824/main.tf @@ -0,0 +1,6 @@ +variable "test" { + type = "map" + default = { + "test" = "1" + } +} \ No newline at end of file From 7ead97369f1f2724b52a18ec2fef66c691baa725 Mon Sep 17 00:00:00 2001 From: tomgoren Date: Wed, 27 Jul 2016 16:26:47 -0700 Subject: [PATCH 0453/1238] Website typo (#7838) * missing single space between the words 'DB' and 'Snapshot' * just kidding - 'S' should be lowercase also --- website/source/docs/providers/aws/r/rds_cluster.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/providers/aws/r/rds_cluster.html.markdown b/website/source/docs/providers/aws/r/rds_cluster.html.markdown index 90e373eac..eef19cff0 100644 --- a/website/source/docs/providers/aws/r/rds_cluster.html.markdown +++ b/website/source/docs/providers/aws/r/rds_cluster.html.markdown @@ -61,7 +61,7 @@ string. * `final_snapshot_identifier` - (Optional) The name of your final DB snapshot when this DB cluster is deleted. If omitted, no final snapshot will be made. -* `skip_final_snapshot` - (Optional) Determines whether a final DB snapshot is created before the DB cluster is deleted. If true is specified, no DBSnapshot is created. If false is specified, a DB snapshot is created before the DB cluster is deleted, using the value from `final_snapshot_identifier`. Default is true. +* `skip_final_snapshot` - (Optional) Determines whether a final DB snapshot is created before the DB cluster is deleted. If true is specified, no DB snapshot is created. If false is specified, a DB snapshot is created before the DB cluster is deleted, using the value from `final_snapshot_identifier`. Default is true. * `availability_zones` - (Optional) A list of EC2 Availability Zones that instances in the DB cluster can be created in * `backup_retention_period` - (Optional) The days to retain backups for. Default From 2195a67748fce27a0e778a2f9f62b50ae1a352f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Borgstrom=20=E2=99=95?= Date: Thu, 28 Jul 2016 00:44:09 -0700 Subject: [PATCH 0454/1238] Ensure kms_key_id docs indicate it is an ARN (#7842) If you specify just a bare ID, then the initial application works but subsequent applications may end up doing bad things, like: ``` -/+ aws_ebs_volume.vol_1 availability_zone: "us-east-1a" => "us-east-1a" encrypted: "true" => "true" iops: "" => "" kms_key_id: "arn:aws:kms:us-east-1:123456789:key/59faf88b-0912-4cca-8b6c-bd107a6ba8c4" => "59faf88b-0912-4cca-8b6c-bd107a6ba8c4" (forces new resource) size: "100" => "100" snapshot_id: "" => "" ``` --- website/source/docs/providers/aws/r/cloudtrail.html.markdown | 4 ++-- website/source/docs/providers/aws/r/ebs_volume.html.md | 4 ++-- .../docs/providers/aws/r/redshift_cluster.html.markdown | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/website/source/docs/providers/aws/r/cloudtrail.html.markdown b/website/source/docs/providers/aws/r/cloudtrail.html.markdown index 3d8d3d290..c31365018 100644 --- a/website/source/docs/providers/aws/r/cloudtrail.html.markdown +++ b/website/source/docs/providers/aws/r/cloudtrail.html.markdown @@ -77,7 +77,7 @@ The following arguments are supported: defined for notification of log file delivery. * `enable_log_file_validation` - (Optional) Specifies whether log file integrity validation is enabled. Defaults to `false`. -* `kms_key_id` - (Optional) Specifies the KMS key ID to use to encrypt the logs delivered by CloudTrail. +* `kms_key_id` - (Optional) Specifies the KMS key ARN to use to encrypt the logs delivered by CloudTrail. * `tags` - (Optional) A mapping of tags to assign to the trail ## Attribute Reference @@ -95,4 +95,4 @@ Cloudtrails can be imported using the `name`, e.g. ``` $ terraform import aws_cloudtrail.sample my-sample-trail -``` \ No newline at end of file +``` diff --git a/website/source/docs/providers/aws/r/ebs_volume.html.md b/website/source/docs/providers/aws/r/ebs_volume.html.md index 395d56666..a9117b6d9 100644 --- a/website/source/docs/providers/aws/r/ebs_volume.html.md +++ b/website/source/docs/providers/aws/r/ebs_volume.html.md @@ -34,7 +34,7 @@ The following arguments are supported: * `size` - (Optional) The size of the drive in GB. * `snapshot_id` (Optional) A snapshot to base the EBS volume off of. * `type` - (Optional) The type of EBS volume. Can be "standard", "gp2", "io1", or "st1" (Default: "standard"). -* `kms_key_id` - (Optional) The KMS key ID for the volume. +* `kms_key_id` - (Optional) The ARN for the KMS encryption key. When specifying `kms_key_id`, `encrypted` needs to be set to true * `tags` - (Optional) A mapping of tags to assign to the resource. ## Attributes Reference @@ -50,4 +50,4 @@ EBS Volumes can be imported using the `id`, e.g. ``` $ terraform import aws_ebs_volume.data vol-049df61146c4d7901 -``` \ No newline at end of file +``` diff --git a/website/source/docs/providers/aws/r/redshift_cluster.html.markdown b/website/source/docs/providers/aws/r/redshift_cluster.html.markdown index 0b7156101..d6a697e7a 100644 --- a/website/source/docs/providers/aws/r/redshift_cluster.html.markdown +++ b/website/source/docs/providers/aws/r/redshift_cluster.html.markdown @@ -52,7 +52,7 @@ string. * `number_of_nodes` - (Optional) The number of compute nodes in the cluster. This parameter is required when the ClusterType parameter is specified as multi-node. Default is 1. * `publicly_accessible` - (Optional) If true, the cluster can be accessed from a public network. Default is `true`. * `encrypted` - (Optional) If true , the data in the cluster is encrypted at rest. -* `kms_key_id` - (Optional) The KMS key ID for the cluster. +* `kms_key_id` - (Optional) The ARN for the KMS encryption key. When specifying `kms_key_id`, `encrypted` needs to be set to true * `elastic_ip` - (Optional) The Elastic IP (EIP) address for the cluster. * `skip_final_snapshot` - (Optional) Determines whether a final snapshot of the cluster is created before Amazon Redshift deletes the cluster. If true , a final cluster snapshot is not created. If false , a final cluster snapshot is created before the cluster is deleted. Default is true. * `final_snapshot_identifier` - (Optional) The identifier of the final snapshot that is to be created immediately before deleting the cluster. If this parameter is provided, `skip_final_snapshot` must be false. @@ -93,4 +93,4 @@ Redshift Clusters can be imported using the `cluster_identifier`, e.g. ``` $ terraform import aws_redshift_cluster.myprodcluster tf-redshift-cluster-12345 -``` \ No newline at end of file +``` From 63a14be8da391028a5fac34ff85709e6d90d3640 Mon Sep 17 00:00:00 2001 From: Krzysztof Wilczynski Date: Thu, 28 Jul 2016 19:19:39 +0900 Subject: [PATCH 0455/1238] Add ability to set Performance Mode in aws_efs_file_system. (#7791) * Add ability to set Performance Mode in aws_efs_file_system. The Elastic File System (EFS) allows for setting a Performance Mode during creation, thus enabling anyone to chose performance of the file system according to their particular needs. This commit adds an optional "performance_mode" attribte to the aws_efs_file_system resource so that an appropriate mode can be set as needed. Signed-off-by: Krzysztof Wilczynski * Add test coverage for the ValidateFunc used. Signed-off-by: Krzysztof Wilczynski * Add "creation_token" and deprecate "reference_name". Add the "creation_token" attribute so that the resource follows the API more closely (as per the convention), thus deprecate the "reference_name" attribute. Update tests and documentation accordingly. Signed-off-by: Krzysztof Wilczynski --- .../aws/import_aws_efs_file_system_test.go | 2 +- .../aws/resource_aws_efs_file_system.go | 95 ++++++++--- .../aws/resource_aws_efs_file_system_test.go | 160 +++++++++++++++++- .../aws/r/efs_file_system.html.markdown | 26 ++- .../aws/r/efs_mount_target.html.markdown | 29 ++-- 5 files changed, 261 insertions(+), 51 deletions(-) diff --git a/builtin/providers/aws/import_aws_efs_file_system_test.go b/builtin/providers/aws/import_aws_efs_file_system_test.go index b40e7f496..46e0de459 100644 --- a/builtin/providers/aws/import_aws_efs_file_system_test.go +++ b/builtin/providers/aws/import_aws_efs_file_system_test.go @@ -22,7 +22,7 @@ func TestAccAWSEFSFileSystem_importBasic(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{"reference_name"}, + ImportStateVerifyIgnore: []string{"reference_name", "creation_token"}, }, }, }) diff --git a/builtin/providers/aws/resource_aws_efs_file_system.go b/builtin/providers/aws/resource_aws_efs_file_system.go index 28f451756..d0524aef3 100644 --- a/builtin/providers/aws/resource_aws_efs_file_system.go +++ b/builtin/providers/aws/resource_aws_efs_file_system.go @@ -24,10 +24,27 @@ func resourceAwsEfsFileSystem() *schema.Resource { }, Schema: map[string]*schema.Schema{ + "creation_token": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validateMaxLength(64), + }, + "reference_name": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Deprecated: "Please use attribute `creation_token' instead. This attribute might be removed in future releases.", + ConflictsWith: []string{"creation_token"}, + ValidateFunc: validateReferenceName, + }, + + "performance_mode": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validatePerformanceModeType, }, "tags": tagsSchema(), @@ -38,20 +55,34 @@ func resourceAwsEfsFileSystem() *schema.Resource { func resourceAwsEfsFileSystemCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).efsconn - referenceName := "" - if v, ok := d.GetOk("reference_name"); ok { - referenceName = v.(string) + "-" - } - token := referenceName + resource.UniqueId() - fs, err := conn.CreateFileSystem(&efs.CreateFileSystemInput{ - CreationToken: aws.String(token), - }) - if err != nil { - return err + creationToken := "" + if v, ok := d.GetOk("creation_token"); ok { + creationToken = v.(string) + } else { + if v, ok := d.GetOk("reference_name"); ok { + creationToken = resource.PrefixedUniqueId(fmt.Sprintf("%s-", v.(string))) + log.Printf("[WARN] Using deprecated `reference_name' attribute.") + } else { + creationToken = resource.UniqueId() + } + } + + createOpts := &efs.CreateFileSystemInput{ + CreationToken: aws.String(creationToken), + } + + if v, ok := d.GetOk("performance_mode"); ok { + createOpts.PerformanceMode = aws.String(v.(string)) + } + + log.Printf("[DEBUG] EFS file system create options: %#v", *createOpts) + fs, err := conn.CreateFileSystem(createOpts) + if err != nil { + return fmt.Errorf("Error creating EFS file system: %s", err) } - log.Printf("[DEBUG] Creating EFS file system: %s", *fs) d.SetId(*fs.FileSystemId) + log.Printf("[INFO] EFS file system ID: %s", d.Id()) stateConf := &resource.StateChangeConf{ Pending: []string{"creating"}, @@ -82,7 +113,7 @@ func resourceAwsEfsFileSystemCreate(d *schema.ResourceData, meta interface{}) er return fmt.Errorf("Error waiting for EFS file system (%q) to create: %q", d.Id(), err.Error()) } - log.Printf("[DEBUG] EFS file system created: %q", *fs.FileSystemId) + log.Printf("[DEBUG] EFS file system %q created.", d.Id()) return resourceAwsEfsFileSystemUpdate(d, meta) } @@ -91,7 +122,8 @@ func resourceAwsEfsFileSystemUpdate(d *schema.ResourceData, meta interface{}) er conn := meta.(*AWSClient).efsconn err := setTagsEFS(conn, d) if err != nil { - return err + return fmt.Errorf("Error setting EC2 tags for EFS file system (%q): %q", + d.Id(), err.Error()) } return resourceAwsEfsFileSystemRead(d, meta) @@ -119,7 +151,8 @@ func resourceAwsEfsFileSystemRead(d *schema.ResourceData, meta interface{}) erro FileSystemId: aws.String(d.Id()), }) if err != nil { - return err + return fmt.Errorf("Error retrieving EC2 tags for EFS file system (%q): %q", + d.Id(), err.Error()) } d.Set("tags", tagsToMapEFS(tagsResp.Tags)) @@ -130,7 +163,7 @@ func resourceAwsEfsFileSystemRead(d *schema.ResourceData, meta interface{}) erro func resourceAwsEfsFileSystemDelete(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).efsconn - log.Printf("[DEBUG] Deleting EFS file system %s", d.Id()) + log.Printf("[DEBUG] Deleting EFS file system: %s", d.Id()) _, err := conn.DeleteFileSystem(&efs.DeleteFileSystemInput{ FileSystemId: aws.String(d.Id()), }) @@ -154,8 +187,7 @@ func resourceAwsEfsFileSystemDelete(d *schema.ResourceData, meta interface{}) er } fs := resp.FileSystems[0] - log.Printf("[DEBUG] current status of %q: %q", - *fs.FileSystemId, *fs.LifeCycleState) + log.Printf("[DEBUG] current status of %q: %q", *fs.FileSystemId, *fs.LifeCycleState) return fs, *fs.LifeCycleState, nil }, Timeout: 10 * time.Minute, @@ -165,10 +197,31 @@ func resourceAwsEfsFileSystemDelete(d *schema.ResourceData, meta interface{}) er _, err = stateConf.WaitForState() if err != nil { - return err + return fmt.Errorf("Error waiting for EFS file system (%q) to delete: %q", + d.Id(), err.Error()) } log.Printf("[DEBUG] EFS file system %q deleted.", d.Id()) return nil } + +func validateReferenceName(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + creationToken := resource.PrefixedUniqueId(fmt.Sprintf("%s-", value)) + if len(creationToken) > 64 { + errors = append(errors, fmt.Errorf( + "%q cannot take the Creation Token over the limit of 64 characters: %q", k, value)) + } + return +} + +func validatePerformanceModeType(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "generalPurpose" && value != "maxIO" { + errors = append(errors, fmt.Errorf( + "%q contains an invalid Performance Mode %q. Valid modes are either %q or %q", + k, value, "generalPurpose", "maxIO")) + } + return +} diff --git a/builtin/providers/aws/resource_aws_efs_file_system_test.go b/builtin/providers/aws/resource_aws_efs_file_system_test.go index 2f9ff9a4c..bc22c9807 100644 --- a/builtin/providers/aws/resource_aws_efs_file_system_test.go +++ b/builtin/providers/aws/resource_aws_efs_file_system_test.go @@ -9,10 +9,71 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/efs" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" ) +func TestResourceAWSEFSReferenceName_validation(t *testing.T) { + var value string + var errors []error + + value = acctest.RandString(128) + _, errors = validateReferenceName(value, "reference_name") + if len(errors) == 0 { + t.Fatalf("Expected to trigger a validation error") + } + + value = acctest.RandString(32) + _, errors = validateReferenceName(value, "reference_name") + if len(errors) != 0 { + t.Fatalf("Expected to trigger a validation error") + } +} + +func TestResourceAWSEFSPerformanceMode_validation(t *testing.T) { + type testCase struct { + Value string + ErrCount int + } + + invalidCases := []testCase{ + { + Value: "garrusVakarian", + ErrCount: 1, + }, + { + Value: acctest.RandString(80), + ErrCount: 1, + }, + } + + for _, tc := range invalidCases { + _, errors := validatePerformanceModeType(tc.Value, "performance_mode") + if len(errors) != tc.ErrCount { + t.Fatalf("Expected to trigger a validation error") + } + } + + validCases := []testCase{ + { + Value: "generalPurpose", + ErrCount: 0, + }, + { + Value: "maxIO", + ErrCount: 0, + }, + } + + for _, tc := range validCases { + _, errors := validatePerformanceModeType(tc.Value, "aws_efs_file_system") + if len(errors) != tc.ErrCount { + t.Fatalf("Expected not to trigger a validation error") + } + } +} + func TestAccAWSEFSFileSystem_basic(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -25,6 +86,10 @@ func TestAccAWSEFSFileSystem_basic(t *testing.T) { testAccCheckEfsFileSystem( "aws_efs_file_system.foo", ), + testAccCheckEfsFileSystemPerformanceMode( + "aws_efs_file_system.foo", + "generalPurpose", + ), ), }, resource.TestStep{ @@ -33,6 +98,10 @@ func TestAccAWSEFSFileSystem_basic(t *testing.T) { testAccCheckEfsFileSystem( "aws_efs_file_system.foo-with-tags", ), + testAccCheckEfsFileSystemPerformanceMode( + "aws_efs_file_system.foo-with-tags", + "generalPurpose", + ), testAccCheckEfsFileSystemTags( "aws_efs_file_system.foo-with-tags", map[string]string{ @@ -42,6 +111,22 @@ func TestAccAWSEFSFileSystem_basic(t *testing.T) { ), ), }, + resource.TestStep{ + Config: testAccAWSEFSFileSystemConfigWithPerformanceMode, + Check: resource.ComposeTestCheckFunc( + testAccCheckEfsFileSystem( + "aws_efs_file_system.foo-with-performance-mode", + ), + testAccCheckEfsCreationToken( + "aws_efs_file_system.foo-with-performance-mode", + "supercalifragilisticexpialidocious", + ), + testAccCheckEfsFileSystemPerformanceMode( + "aws_efs_file_system.foo-with-performance-mode", + "maxIO", + ), + ), + }, }, }) } @@ -82,16 +167,41 @@ func testAccCheckEfsFileSystem(resourceID string) resource.TestCheckFunc { return fmt.Errorf("No ID is set") } - fs, ok := s.RootModule().Resources[resourceID] + conn := testAccProvider.Meta().(*AWSClient).efsconn + _, err := conn.DescribeFileSystems(&efs.DescribeFileSystemsInput{ + FileSystemId: aws.String(rs.Primary.ID), + }) + + if err != nil { + return err + } + + return nil + } +} + +func testAccCheckEfsCreationToken(resourceID string, expectedToken string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceID] if !ok { return fmt.Errorf("Not found: %s", resourceID) } + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + conn := testAccProvider.Meta().(*AWSClient).efsconn - _, err := conn.DescribeFileSystems(&efs.DescribeFileSystemsInput{ - FileSystemId: aws.String(fs.Primary.ID), + resp, err := conn.DescribeFileSystems(&efs.DescribeFileSystemsInput{ + FileSystemId: aws.String(rs.Primary.ID), }) + fs := resp.FileSystems[0] + if *fs.CreationToken != expectedToken { + return fmt.Errorf("Creation Token mismatch.\nExpected: %s\nGiven: %v", + expectedToken, *fs.CreationToken) + } + if err != nil { return err } @@ -111,14 +221,9 @@ func testAccCheckEfsFileSystemTags(resourceID string, expectedTags map[string]st return fmt.Errorf("No ID is set") } - fs, ok := s.RootModule().Resources[resourceID] - if !ok { - return fmt.Errorf("Not found: %s", resourceID) - } - conn := testAccProvider.Meta().(*AWSClient).efsconn resp, err := conn.DescribeTags(&efs.DescribeTagsInput{ - FileSystemId: aws.String(fs.Primary.ID), + FileSystemId: aws.String(rs.Primary.ID), }) if !reflect.DeepEqual(expectedTags, tagsToMapEFS(resp.Tags)) { @@ -134,6 +239,36 @@ func testAccCheckEfsFileSystemTags(resourceID string, expectedTags map[string]st } } +func testAccCheckEfsFileSystemPerformanceMode(resourceID string, expectedMode string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[resourceID] + if !ok { + return fmt.Errorf("Not found: %s", resourceID) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + conn := testAccProvider.Meta().(*AWSClient).efsconn + resp, err := conn.DescribeFileSystems(&efs.DescribeFileSystemsInput{ + FileSystemId: aws.String(rs.Primary.ID), + }) + + fs := resp.FileSystems[0] + if *fs.PerformanceMode != expectedMode { + return fmt.Errorf("Performance Mode mismatch.\nExpected: %s\nGiven: %v", + expectedMode, *fs.PerformanceMode) + } + + if err != nil { + return err + } + + return nil + } +} + const testAccAWSEFSFileSystemConfig = ` resource "aws_efs_file_system" "foo" { reference_name = "radeksimko" @@ -149,3 +284,10 @@ resource "aws_efs_file_system" "foo-with-tags" { } } ` + +const testAccAWSEFSFileSystemConfigWithPerformanceMode = ` +resource "aws_efs_file_system" "foo-with-performance-mode" { + creation_token = "supercalifragilisticexpialidocious" + performance_mode = "maxIO" +} +` diff --git a/website/source/docs/providers/aws/r/efs_file_system.html.markdown b/website/source/docs/providers/aws/r/efs_file_system.html.markdown index ac017144f..060f8eec8 100644 --- a/website/source/docs/providers/aws/r/efs_file_system.html.markdown +++ b/website/source/docs/providers/aws/r/efs_file_system.html.markdown @@ -3,12 +3,12 @@ layout: "aws" page_title: "AWS: aws_efs_file_system" sidebar_current: "docs-aws-resource-efs-file-system" description: |- - Provides an EFS file system. + Provides an Elastic File System (EFS) resource. --- # aws\_efs\_file\_system -Provides an EFS file system. +Provides an Elastic File System (EFS) resource. ## Example Usage @@ -23,22 +23,32 @@ resource "aws_efs_file_system" "foo" { ## Argument Reference +~> **NOTE:** The `reference_name` attribute has been deprecated and might +be removed in future releases, please use `creation_token` instead. + The following arguments are supported: -* `reference_name` - (Optional) A reference name used in Creation Token -* `tags` - (Optional) A mapping of tags to assign to the file system +* `creation_token` - (Optional) A unique name (a maximum of 64 characters are allowed) +used as reference when creating the the Elastic File System to ensure idempotent file +system creation. By default generated by Terraform. See [Elastic File System] +(http://docs.aws.amazon.com/efs/latest/ug/) user guide for more information. +* `reference_name` - **DEPRECATED** (Optional) A reference name used when creating the +`Creation Token` which Amazon EFS uses to ensure idempotent file system creation. By +default generated by Terraform. +* `performance_mode` - (Optional) The file system performance mode. Can be either +`"generalPurpose"` or `"maxIO"` (Default: `"generalPurpose"`). +* `tags` - (Optional) A mapping of tags to assign to the file system. ## Attributes Reference The following attributes are exported: -* `id` - The ID that identifies the file system - +* `id` - The ID that identifies the file system (e.g. fs-ccfc0d65). ## Import -EFS Filesystems can be imported using the `id`, e.g. +The EFS file systems can be imported using the `id`, e.g. ``` $ terraform import aws_efs_file_system.foo fs-6fa144c6 -``` \ No newline at end of file +``` diff --git a/website/source/docs/providers/aws/r/efs_mount_target.html.markdown b/website/source/docs/providers/aws/r/efs_mount_target.html.markdown index 3764926df..b80216c0f 100644 --- a/website/source/docs/providers/aws/r/efs_mount_target.html.markdown +++ b/website/source/docs/providers/aws/r/efs_mount_target.html.markdown @@ -3,13 +3,15 @@ layout: "aws" page_title: "AWS: aws_efs_mount_target" sidebar_current: "docs-aws-resource-efs-mount-target" description: |- - Provides an EFS mount target. + Provides an Elastic File System (EFS) mount target. --- # aws\_efs\_mount\_target -Provides an EFS mount target. Per [documentation](https://docs.aws.amazon.com/efs/latest/ug/limits.html) -the limit is 1 mount target per AZ for a single EFS file system. +Provides an Elastic File System (EFS) mount target. + +~> **NOTE:** As per the current [documentation](https://docs.aws.amazon.com/efs/latest/ug/limits.html) +the limit is 1 mount target per Availability Zone for a single EFS file system. ## Example Usage @@ -35,25 +37,28 @@ resource "aws_subnet" "alpha" { The following arguments are supported: * `file_system_id` - (Required) The ID of the file system for which the mount target is intended. -* `subnet_id` - (Required) The ID of the subnet that the mount target is in. -* `ip_address` - (Optional) The address at which the file system may be mounted via the mount target. -* `security_groups` - (Optional) A list of up to 5 VPC security group IDs in effect for the mount target. +* `subnet_id` - (Required) The ID of the subnet to add the mount target in. +* `ip_address` - (Optional) The address (within the address range of the specified subnet) at +which the file system may be mounted via the mount target. +* `security_groups` - (Optional) A list of up to 5 VPC security group IDs (that must +be for the same VPC as subnet specified) in effect for the mount target. ## Attributes Reference -~> **Note:** The `dns_name` attribute is only useful if the mount target is in a VPC with `enable_dns_hostnames = true`. +~> **Note:** The `dns_name` attribute is only useful if the mount target is in a VPC that has +support for DNS hostnames enabled. See [Using DNS with Your VPC](http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-dns.html) +and [VPC resource](https://www.terraform.io/docs/providers/aws/r/vpc.html#enable_dns_hostnames) in Terraform for more information. The following attributes are exported: -* `id` - The ID of the mount target -* `dns_name` - The DNS name for the given subnet/AZ per [documented convention](http://docs.aws.amazon.com/efs/latest/ug/mounting-fs-mount-cmd-dns-name.html) +* `id` - The ID of the mount target. +* `dns_name` - The DNS name for the given subnet/AZ per [documented convention](http://docs.aws.amazon.com/efs/latest/ug/mounting-fs-mount-cmd-dns-name.html). * `network_interface_id` - The ID of the network interface that Amazon EFS created when it created the mount target. - ## Import -EFS Mount Targets can be imported using the `id`, e.g. +The EFS mount targets can be imported using the `id`, e.g. ``` $ terraform import aws_efs_mount_target.alpha fsmt-52a643fb -``` \ No newline at end of file +``` From bf2e8e23d9686819bac0ea1a3b56bea0e5b5f4cd Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Thu, 28 Jul 2016 11:20:32 +0100 Subject: [PATCH 0456/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 716520346..c7ccaf70e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -141,6 +141,7 @@ IMPROVEMENTS: * provider/aws: expose network interface id in `aws_instance` [GH-6751] * provider/aws: Adding passthrough behavior for API Gateway integration [GH-7801] * provider/aws: Enable Redshift Cluster Logging [GH-7813] + * provider/aws: Add ability to set Performance Mode in `aws_efs_file_system` [GH-7791] * provider/azurerm: Add support for EnableIPForwarding to `azurerm_network_interface` [GH-6807] * provider/azurerm: Add support for exporting the `azurerm_storage_account` access keys [GH-6742] * provider/azurerm: The Azure SDK now exposes better error messages [GH-6976] From c3ec9c79487c19d12410586669f5428fb259e108 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Thu, 28 Jul 2016 13:32:40 +0100 Subject: [PATCH 0457/1238] provider/azurerm: `azurerm_storage_blob` validation fix (#7328) The validation for the `azurerm_storage_blob` `type` parameter was checking for `blob` where it should have been `block` This commits fixes it up ``` make testacc TEST=./builtin/providers/azurerm TESTARGS='-run=TestResourceAzureRMStorageBlobType_validation' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /vendor/) TF_ACC=1 go test ./builtin/providers/azurerm -v -run=TestResourceAzureRMStorageBlobType_validation -timeout 120m === RUN TestResourceAzureRMStorageBlobType_validation --- PASS: TestResourceAzureRMStorageBlobType_validation (0.00s) PASS ok github.com/hashicorp/terraform/builtin/providers/azurerm 0.014s ``` --- builtin/providers/azurerm/resource_arm_storage_blob.go | 6 +++--- builtin/providers/azurerm/resource_arm_storage_blob_test.go | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/builtin/providers/azurerm/resource_arm_storage_blob.go b/builtin/providers/azurerm/resource_arm_storage_blob.go index 83f3c8382..80a3aed92 100644 --- a/builtin/providers/azurerm/resource_arm_storage_blob.go +++ b/builtin/providers/azurerm/resource_arm_storage_blob.go @@ -70,12 +70,12 @@ func validateArmStorageBlobSize(v interface{}, k string) (ws []string, errors [] func validateArmStorageBlobType(v interface{}, k string) (ws []string, errors []error) { value := strings.ToLower(v.(string)) validTypes := map[string]struct{}{ - "blob": struct{}{}, - "page": struct{}{}, + "block": struct{}{}, + "page": struct{}{}, } if _, ok := validTypes[value]; !ok { - errors = append(errors, fmt.Errorf("Blob type %q is invalid, must be %q or %q", value, "blob", "page")) + errors = append(errors, fmt.Errorf("Blob type %q is invalid, must be %q or %q", value, "block", "page")) } return } diff --git a/builtin/providers/azurerm/resource_arm_storage_blob_test.go b/builtin/providers/azurerm/resource_arm_storage_blob_test.go index bbea47d7a..d4fc5dc74 100644 --- a/builtin/providers/azurerm/resource_arm_storage_blob_test.go +++ b/builtin/providers/azurerm/resource_arm_storage_blob_test.go @@ -25,15 +25,15 @@ func TestResourceAzureRMStorageBlobType_validation(t *testing.T) { ErrCount: 0, }, { - Value: "blob", + Value: "block", ErrCount: 0, }, { - Value: "BLOB", + Value: "BLOCK", ErrCount: 0, }, { - Value: "Blob", + Value: "Block", ErrCount: 0, }, } From 341abd7956ac9a672ca39fd0f7c919b7ab5938ab Mon Sep 17 00:00:00 2001 From: James Bardin Date: Wed, 27 Jul 2016 21:34:30 -0400 Subject: [PATCH 0458/1238] limit input retries Prevent going into a busy loop if the input fd closes early. --- terraform/context.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/terraform/context.go b/terraform/context.go index 86d7e58ce..e7a5ef0c6 100644 --- a/terraform/context.go +++ b/terraform/context.go @@ -332,6 +332,7 @@ func (c *Context) Input(mode InputMode) error { // Ask the user for a value for this variable var value string + retry := 0 for { var err error value, err = c.uiInput.Input(&InputOpts{ @@ -345,7 +346,12 @@ func (c *Context) Input(mode InputMode) error { } if value == "" && v.Required() { - // Redo if it is required. + // Redo if it is required, but abort if we keep getting + // blank entries + if retry > 2 { + return fmt.Errorf("missing required value for %q", n) + } + retry++ continue } From 88030764ff48410e4ce405c100f1ca22c6926762 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Wed, 27 Jul 2016 12:47:44 -0500 Subject: [PATCH 0459/1238] config: Audit all interpolation functions for list/map behavior - `distinct()` - error on non-flat lists - `element()` - error on non-flat lists - `join()` - error on non-flat lists - `length()` - support maps - `lookup()` - error on non-flat maps - `values()` - error on non-flat maps --- config/interpolate_funcs.go | 58 ++++++++----- config/interpolate_funcs_test.go | 83 +++++++++++++++++-- .../docs/configuration/interpolation.html.md | 36 ++++---- 3 files changed, 136 insertions(+), 41 deletions(-) diff --git a/config/interpolate_funcs.go b/config/interpolate_funcs.go index 45f8e8915..87e9de778 100644 --- a/config/interpolate_funcs.go +++ b/config/interpolate_funcs.go @@ -180,13 +180,17 @@ func interpolationFuncCompact() ast.Function { var outputList []string for _, val := range inputList { - if strVal, ok := val.Value.(string); ok { - if strVal == "" { - continue - } - - outputList = append(outputList, strVal) + strVal, ok := val.Value.(string) + if !ok { + return nil, fmt.Errorf( + "compact() may only be used with flat lists, this list contains elements of %s", + val.Type.Printable()) } + if strVal == "" { + continue + } + + outputList = append(outputList, strVal) } return stringSliceToVariableValue(outputList), nil }, @@ -487,11 +491,16 @@ func interpolationFuncDistinct() ast.Function { var list []string if len(args) != 1 { - return nil, fmt.Errorf("distinct() excepts only one argument.") + return nil, fmt.Errorf("accepts only one argument.") } if argument, ok := args[0].([]ast.Variable); ok { for _, element := range argument { + if element.Type != ast.TypeString { + return nil, fmt.Errorf( + "only works for flat lists, this list contains elements of %s", + element.Type.Printable()) + } list = appendIfMissing(list, element.Value.(string)) } } @@ -527,15 +536,13 @@ func interpolationFuncJoin() ast.Function { } for _, arg := range args[1:] { - if parts, ok := arg.(ast.Variable); ok { - for _, part := range parts.Value.([]ast.Variable) { - list = append(list, part.Value.(string)) - } - } - if parts, ok := arg.([]ast.Variable); ok { - for _, part := range parts { - list = append(list, part.Value.(string)) + for _, part := range arg.([]ast.Variable) { + if part.Type != ast.TypeString { + return nil, fmt.Errorf( + "only works on flat lists, this list contains elements of %s", + part.Type.Printable()) } + list = append(list, part.Value.(string)) } } @@ -639,9 +646,11 @@ func interpolationFuncLength() ast.Function { return len(typedSubject), nil case []ast.Variable: return len(typedSubject), nil + case map[string]ast.Variable: + return len(typedSubject), nil } - return 0, fmt.Errorf("arguments to length() must be a string or list") + return 0, fmt.Errorf("arguments to length() must be a string, list, or map") }, } } @@ -740,9 +749,9 @@ func interpolationFuncLookup(vs map[string]ast.Variable) ast.Function { } } if v.Type != ast.TypeString { - return "", fmt.Errorf( - "lookup for '%s' has bad type %s", - args[1].(string), v.Type) + return nil, fmt.Errorf( + "lookup() may only be used with flat maps, this map contains elements of %s", + v.Type.Printable()) } return v.Value.(string), nil @@ -771,8 +780,13 @@ func interpolationFuncElement() ast.Function { resolvedIndex := index % len(list) - v := list[resolvedIndex].Value - return v, nil + v := list[resolvedIndex] + if v.Type != ast.TypeString { + return nil, fmt.Errorf( + "element() may only be used with flat lists, this list contains elements of %s", + v.Type.Printable()) + } + return v.Value, nil }, } } @@ -793,7 +807,7 @@ func interpolationFuncKeys(vs map[string]ast.Variable) ast.Function { sort.Strings(keys) - //Keys are guaranteed to be strings + // Keys are guaranteed to be strings return stringSliceToVariableValue(keys), nil }, } diff --git a/config/interpolate_funcs_test.go b/config/interpolate_funcs_test.go index dcc563ecb..11450f668 100644 --- a/config/interpolate_funcs_test.go +++ b/config/interpolate_funcs_test.go @@ -211,6 +211,13 @@ func TestInterpolateFuncCompact(t *testing.T) { []interface{}{}, false, }, + + // errrors on list of lists + { + `${compact(list(list("a"), list("b")))}`, + nil, + true, + }, }, }) } @@ -502,6 +509,12 @@ func TestInterpolateFuncDistinct(t *testing.T) { nil, true, }, + // non-flat list is an error + { + `${distinct(list(list("a"), list("a")))}`, + nil, + true, + }, }, }) } @@ -665,6 +678,7 @@ func TestInterpolateFuncJoin(t *testing.T) { Vars: map[string]ast.Variable{ "var.a_list": interfaceToVariableSwallowError([]string{"foo"}), "var.a_longer_list": interfaceToVariableSwallowError([]string{"foo", "bar", "baz"}), + "var.list_of_lists": interfaceToVariableSwallowError([]interface{}{[]string{"foo"}, []string{"bar"}, []string{"baz"}}), }, Cases: []testFunctionCase{ { @@ -684,6 +698,17 @@ func TestInterpolateFuncJoin(t *testing.T) { "foo.bar.baz", false, }, + + { + `${join(".", var.list_of_lists)}`, + nil, + true, + }, + { + `${join(".", list(list("nested")))}`, + nil, + true, + }, }, }) } @@ -878,6 +903,17 @@ func TestInterpolateFuncLength(t *testing.T) { "0", false, }, + // Works for maps + { + `${length(map("k", "v"))}`, + "1", + false, + }, + { + `${length(map("k1", "v1", "k2", "v2"))}`, + "2", + false, + }, }, }) } @@ -1003,15 +1039,29 @@ func TestInterpolateFuncSplit(t *testing.T) { func TestInterpolateFuncLookup(t *testing.T) { testFunction(t, testFunctionConfig{ Vars: map[string]ast.Variable{ - "var.foo": ast.Variable{ + "var.foo": { Type: ast.TypeMap, Value: map[string]ast.Variable{ - "bar": ast.Variable{ + "bar": { Type: ast.TypeString, Value: "baz", }, }, }, + "var.map_of_lists": ast.Variable{ + Type: ast.TypeMap, + Value: map[string]ast.Variable{ + "bar": { + Type: ast.TypeList, + Value: []ast.Variable{ + { + Type: ast.TypeString, + Value: "baz", + }, + }, + }, + }, + }, }, Cases: []testFunctionCase{ { @@ -1048,6 +1098,13 @@ func TestInterpolateFuncLookup(t *testing.T) { true, }, + // Cannot lookup into map of lists + { + `${lookup(var.map_of_lists, "bar")}`, + nil, + true, + }, + // Non-empty default { `${lookup(var.foo, "zap", "xyz")}`, @@ -1209,6 +1266,13 @@ func TestInterpolateFuncValues(t *testing.T) { nil, true, }, + + // Map of lists + { + `${values(map("one", list()))}`, + nil, + true, + }, }, }) } @@ -1221,9 +1285,10 @@ func interfaceToVariableSwallowError(input interface{}) ast.Variable { func TestInterpolateFuncElement(t *testing.T) { testFunction(t, testFunctionConfig{ Vars: map[string]ast.Variable{ - "var.a_list": interfaceToVariableSwallowError([]string{"foo", "baz"}), - "var.a_short_list": interfaceToVariableSwallowError([]string{"foo"}), - "var.empty_list": interfaceToVariableSwallowError([]interface{}{}), + "var.a_list": interfaceToVariableSwallowError([]string{"foo", "baz"}), + "var.a_short_list": interfaceToVariableSwallowError([]string{"foo"}), + "var.empty_list": interfaceToVariableSwallowError([]interface{}{}), + "var.a_nested_list": interfaceToVariableSwallowError([]interface{}{[]string{"foo"}, []string{"baz"}}), }, Cases: []testFunctionCase{ { @@ -1265,6 +1330,13 @@ func TestInterpolateFuncElement(t *testing.T) { nil, true, }, + + // Only works on single-level lists + { + `${element(var.a_nested_list, "0")}`, + nil, + true, + }, }, }) } @@ -1466,6 +1538,7 @@ func testFunction(t *testing.T, config testFunctionConfig) { } result, err := hil.Eval(ast, langEvalConfig(config.Vars)) + t.Logf("err: %s", err) if err != nil != tc.Error { t.Fatalf("Case #%d:\ninput: %#v\nerr: %s", i, tc.Input, err) } diff --git a/website/source/docs/configuration/interpolation.html.md b/website/source/docs/configuration/interpolation.html.md index ff555f2e6..71b9e150b 100644 --- a/website/source/docs/configuration/interpolation.html.md +++ b/website/source/docs/configuration/interpolation.html.md @@ -115,15 +115,15 @@ The supported built-in functions are: Example: `concat(aws_instance.db.*.tags.Name, aws_instance.web.*.tags.Name)` * `distinct(list)` - Removes duplicate items from a list. Keeps the first - occurrence of each element, and removes subsequent occurences. - Example: `distinct(var.usernames)` + occurrence of each element, and removes subsequent occurences. This + function is only valid for flat lists. Example: `distinct(var.usernames)` * `element(list, index)` - Returns a single element from a list at the given index. If the index is greater than the number of elements, this function will wrap using a standard mod algorithm. - A list is only possible with splat variables from resources with - a count greater than one. - Example: `element(aws_subnet.foo.*.id, count.index)` + This function only works on flat lists. Examples: + * `element(aws_subnet.foo.*.id, count.index)` + * `element(var.list_of_strings, 2)` * `file(path)` - Reads the contents of a file into the string. Variables in this file are _not_ interpolated. The contents of the file are @@ -149,24 +149,28 @@ The supported built-in functions are: `formatlist("instance %v has private ip %v", aws_instance.foo.*.id, aws_instance.foo.*.private_ip)`. Passing lists with different lengths to formatlist results in an error. - * `index(list, elem)` - Finds the index of a given element in a list. Example: - `index(aws_instance.foo.*.tags.Name, "foo-test")` + * `index(list, elem)` - Finds the index of a given element in a list. + This function only works on flat lists. + Example: `index(aws_instance.foo.*.tags.Name, "foo-test")` - * `join(delim, list)` - Joins the list with the delimiter for a resultant string. A list is - only possible with splat variables from resources with a count - greater than one. Example: `join(",", aws_instance.foo.*.id)` + * `join(delim, list)` - Joins the list with the delimiter for a resultant string. + This function works only on flat lists. + Examples: + * `join(",", aws_instance.foo.*.id)` + * `join(",", var.ami_list)` * `jsonencode(item)` - Returns a JSON-encoded representation of the given item, which may be a string, list of strings, or map from string to string. Note that if the item is a string, the return value includes the double quotes. - * `keys(map)` - Returns a lexically sorted, JSON-encoded list of the map keys. + * `keys(map)` - Returns a lexically sorted list of the map keys. - * `length(list)` - Returns a number of members in a given list + * `length(list)` - Returns a number of members in a given list, map, or string. or a number of characters in a given string. * `${length(split(",", "a,b,c"))}` = 3 * `${length("a,b,c")}` = 5 + * `${length(map("key", "val"))}` = 1 * `list(items...)` - Returns a list consisting of the arguments to the function. This function provides a way of representing list literals in interpolation. @@ -177,7 +181,9 @@ The supported built-in functions are: variable. The `map` parameter should be another variable, such as `var.amis`. If `key` does not exist in `map`, the interpolation will fail unless you specify a third argument, `default`, which should be a - string value to return if no `key` is found in `map. + string value to return if no `key` is found in `map`. This function + only works on flat maps and will return an error for maps that + include nested lists or maps. * `lower(string)` - Returns a copy of the string with all Unicode letters mapped to their lower case. @@ -232,7 +238,9 @@ The supported built-in functions are: * `uuid()` - Returns a UUID string in RFC 4122 v4 format. This string will change with every invocation of the function, so in order to prevent diffs on every plan & apply, it must be used with the [`ignore_changes`](/docs/configuration/resources.html#ignore-changes) lifecycle attribute. - * `values(map)` - Returns a JSON-encoded list of the map values, in the order of the keys returned by the `keys` function. + * `values(map)` - Returns a list of the map values, in the order of the keys + returned by the `keys` function. This function only works on flat maps and + will return an error for maps that include nested lists or maps. ## Templates From f8caa630d84477ea1982bab7570d6b27e229964b Mon Sep 17 00:00:00 2001 From: Steven Wirges Date: Thu, 28 Jul 2016 17:08:06 +0200 Subject: [PATCH 0460/1238] Updated compute_address docs Added address to attribute ref --- .../source/docs/providers/google/r/compute_address.html.markdown | 1 + 1 file changed, 1 insertion(+) diff --git a/website/source/docs/providers/google/r/compute_address.html.markdown b/website/source/docs/providers/google/r/compute_address.html.markdown index 8d5655b14..df0c8fad5 100644 --- a/website/source/docs/providers/google/r/compute_address.html.markdown +++ b/website/source/docs/providers/google/r/compute_address.html.markdown @@ -42,3 +42,4 @@ In addition to the arguments listed above, the following computed attributes are exported: * `self_link` - The URI of the created resource. +* `address` - The IP of the created resource. From a1c14f0ffc315bb24e6e39b8a1b0e02529bf2c8a Mon Sep 17 00:00:00 2001 From: Clayton O'Neill Date: Thu, 28 Jul 2016 11:24:12 -0400 Subject: [PATCH 0461/1238] Fix OpenStack documentation (#7844) References to security_groups attribute on network objects should actually be security_group_ids. --- .../providers/openstack/r/networking_network_v2.html.markdown | 2 +- .../docs/providers/openstack/r/networking_port_v2.html.markdown | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/website/source/docs/providers/openstack/r/networking_network_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_network_v2.html.markdown index 0eff316e8..d427c3188 100644 --- a/website/source/docs/providers/openstack/r/networking_network_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/networking_network_v2.html.markdown @@ -40,7 +40,7 @@ resource "openstack_networking_port_v2" "port_1" { name = "port_1" network_id = "${openstack_networking_network_v2.network_1.id}" admin_state_up = "true" - security_groups = ["${openstack_compute_secgroup_v2.secgroup_1.id}"] + security_group_ids = ["${openstack_compute_secgroup_v2.secgroup_1.id}"] fixed_ip { "subnet_id" = "008ba151-0b8c-4a67-98b5-0d2b87666062" diff --git a/website/source/docs/providers/openstack/r/networking_port_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_port_v2.html.markdown index df8c9d996..0a4c21e4a 100644 --- a/website/source/docs/providers/openstack/r/networking_port_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/networking_port_v2.html.markdown @@ -82,7 +82,7 @@ The following attributes are exported: * `mac_address` - See Argument Reference above. * `tenant_id` - See Argument Reference above. * `device_owner` - See Argument Reference above. -* `security_groups` - See Argument Reference above. +* `security_group_ids` - See Argument Reference above. * `device_id` - See Argument Reference above. * `fixed_ip/ip_address` - See Argument Reference above. From 16a1a0c3c9c4af6e2ca0ac54b45d92aec9ffc008 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Thu, 28 Jul 2016 10:31:21 -0500 Subject: [PATCH 0462/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c7ccaf70e..d4a3ca319 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -99,6 +99,7 @@ IMPROVEMENTS: * core: The `jsonencode` interpolation function now supports encoding lists and maps [GH-6749] * core: Add the ability for resource definitions to mark attributes as "sensitive" which will omit them from UI output. [GH-6923] * core: Support `.` in map keys [GH-7654] + * core: Enhance interpolation functions to account for first class maps and lists [GH-7832] [GH-7834] * command: Remove second DefaultDataDirectory const [GH-7666] * provider/aws: Add `dns_name` to `aws_efs_mount_target` [GH-7428] * provider/aws: Add `force_destroy` to `aws_iam_user` for force-deleting access keys assigned to the user [GH-7766] From c6c0443548b6a5a6b5fec4293b87a139ea6e4594 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Thu, 28 Jul 2016 10:33:49 -0500 Subject: [PATCH 0463/1238] Update CHANGELOG.md --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d4a3ca319..050f46b24 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -47,6 +47,8 @@ FEATURES: * **New Data Source:** `consul_keys` [GH-7678] * **New Interpolation Function:** `sort` [GH-7128] * **New Interpolation Function:** `distinct` [GH-7174] + * **New Interpolation Function:** `list` [GH-7528] + * **New Interpolation Function:** `map` [GH-7832] * **New Provider:** `grafana` [GH-6206] * **New Provider:** `logentries` [GH-7067] * **New Provider:** `scaleway` [GH-7331] From 14f19aff1b1586f8efa8ede9347721be625c832c Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Thu, 28 Jul 2016 10:43:24 -0500 Subject: [PATCH 0464/1238] CHANGELOG: sort BC section by provider/core --- CHANGELOG.md | 54 +++++++++++++++++++++++++++------------------------- 1 file changed, 28 insertions(+), 26 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 050f46b24..9bcc00ac6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,31 +2,34 @@ BACKWARDS INCOMPATIBILITIES / NOTES: - * Terraform's built-in plugins are now distributed as part of the main Terraform binary, and use the go-plugin framework. Overrides are still available using separate binaries, but will need recompiling against Terraform 0.7. - * The `terraform plan` command no longer persists state. This makes the command much safer to run, since it is now side-effect free. The `refresh` and `apply` commands still persist state to local and remote storage. Any automation that assumes that `terraform plan` persists state will need to be reworked to explicitly call `terraform refresh` to get the equivalent side-effect. (The `terraform plan` command no longer has the `-state-out` or `-backup` flags due to this change.) - * The `concat()` interpolation function can no longer be used to join strings. - * Quotation marks may no longer be escaped in HIL expressions [GH-7201] - * `openstack_networking_subnet_v2` now defaults to turning DHCP on. - * `aws_elb` now defaults `cross_zone_load_balancing` to `true` - * `aws_instance`: EC2 Classic users may continue to use - `security_groups` to reference Security Groups by their `name`. Users who are - managing Instances inside VPCs will need to use `vpc_security_group_ids` instead, - and reference the security groups by their `id`. - Ref https://github.com/hashicorp/terraform/issues/6416#issuecomment-219145065 - * Lists materialized using splat syntax, for example `aws_instance.foo.*.id` are now ordered by the count index rather than lexographically sorted. If this produces a large number of undesirable differences, you can use the new `sort()` interpolation function to produce the previous behaviour. - * `aws_kinesis_firehose_delivery_stream`: AWS Kinesis Firehose has been refactored to support Redshift as a destination in addition to S3. As a result, the configuration has changed and users will need to update their configuration to match the new `s3_configuration` block. Checkout the documentaiton on [AWS Kinesis Firehose](http://localhost:4567/docs/providers/aws/r/kinesis_firehose_delivery_stream.html) for more information [GH-7375] - * `aws_route53_record`: `latency_routing_policy`, `geolocation_routing_policy`, and `failover_routing_policy` block options have been added. With these additions we’ve renamed the `weight` attribute to `weighted_routing_policy`, and it has changed from a string to a block to match the others. Please see the updated documentation on using `weighted_routing_policy`: https://www.terraform.io/docs/providers/aws/r/route53_record.html . [GH-6954] - * You now access the values of maps using the syntax `var.map["key"]` or the `lookup` function instead of `var.map.key`. - * Outputs on `terraform_remote_state` resources are now top level attributes rather than inside the `output` map. In order to access outputs, use the syntax: `terraform_remote_state.name.outputname`. Currently outputs cannot be named `config` or `backend`. - * `azurerm_dns_cname_record` now accepts a single record rather than a list of records - * `azurerm_virtual_machine` computer_name now Required - * `aws_db_instance` now defaults `publicly_accessible` to false - * `keep_updated` parameter removed from `docker_image` - This parameter never did what it was supposed to do. - See relevant docs, specifically `pull_trigger` & new `docker_registry_image` data source to understand how to keep your `docker_image` updated. - * `openstack_fw_policy_v1` now correctly applies rules in the order they are specified. Upon the next apply, current rules might be re-ordered. - * `atlas_artifact` resource has be deprecated. Please use the new `atlas_artifact` Data Source - * The `member` attribute of `openstack_lb_pool_v1` has been deprecated. Please ue the new `openstack_lb_member_v1` resource. - * All deprecated parameters are removed from all `CloudStack` resources + * Terraform Core + * Terraform's built-in plugins are now distributed as part of the main Terraform binary, and use the go-plugin framework. Overrides are still available using separate binaries, but will need recompiling against Terraform 0.7. + * The `terraform plan` command no longer persists state. This makes the command much safer to run, since it is now side-effect free. The `refresh` and `apply` commands still persist state to local and remote storage. Any automation that assumes that `terraform plan` persists state will need to be reworked to explicitly call `terraform refresh` to get the equivalent side-effect. (The `terraform plan` command no longer has the `-state-out` or `-backup` flags due to this change.) + * The `concat()` interpolation function can no longer be used to join strings. + * Quotation marks may no longer be escaped in HIL expressions [GH-7201] + * Lists materialized using splat syntax, for example `aws_instance.foo.*.id` are now ordered by the count index rather than lexographically sorted. If this produces a large number of undesirable differences, you can use the new `sort()` interpolation function to produce the previous behaviour. + * You now access the values of maps using the syntax `var.map["key"]` or the `lookup` function instead of `var.map.key`. + * Outputs on `terraform_remote_state` resources are now top level attributes rather than inside the `output` map. In order to access outputs, use the syntax: `terraform_remote_state.name.outputname`. Currently outputs cannot be named `config` or `backend`. + * AWS Provider + * `aws_elb` now defaults `cross_zone_load_balancing` to `true` + * `aws_instance`: EC2 Classic users may continue to use `security_groups` to reference Security Groups by their `name`. Users who are managing Instances inside VPCs will need to use `vpc_security_group_ids` instead, and reference the security groups by their `id`. Ref https://github.com/hashicorp/terraform/issues/6416#issuecomment-219145065 + * `aws_kinesis_firehose_delivery_stream`: AWS Kinesis Firehose has been refactored to support Redshift as a destination in addition to S3. As a result, the configuration has changed and users will need to update their configuration to match the new `s3_configuration` block. Checkout the documentaiton on [AWS Kinesis Firehose](http://localhost:4567/docs/providers/aws/r/kinesis_firehose_delivery_stream.html) for more information [GH-7375] + * `aws_route53_record`: `latency_routing_policy`, `geolocation_routing_policy`, and `failover_routing_policy` block options have been added. With these additions we’ve renamed the `weight` attribute to `weighted_routing_policy`, and it has changed from a string to a block to match the others. Please see the updated documentation on using `weighted_routing_policy`: https://www.terraform.io/docs/providers/aws/r/route53_record.html . [GH-6954] + * `aws_db_instance` now defaults `publicly_accessible` to false + * Microsoft Azure Provider + * In documentation, the "Azure (Resource Manager)" provider has been renamed to the "Microsoft Azure" provider. + * `azurerm_dns_cname_record` now accepts a single record rather than a list of records + * `azurerm_virtual_machine` computer_name now Required + * Openstack Provider + * `openstack_networking_subnet_v2` now defaults to turning DHCP on. + * `openstack_fw_policy_v1` now correctly applies rules in the order they are specified. Upon the next apply, current rules might be re-ordered. + * The `member` attribute of `openstack_lb_pool_v1` has been deprecated. Please ue the new `openstack_lb_member_v1` resource. + * Docker Provider + * `keep_updated` parameter removed from `docker_image` - This parameter never did what it was supposed to do. See relevant docs, specifically `pull_trigger` & new `docker_registry_image` data source to understand how to keep your `docker_image` updated. + * Atlas Provider + * `atlas_artifact` resource has be deprecated. Please use the new `atlas_artifact` Data Source. + * CloudStack Provider + * All deprecated parameters are removed from all `CloudStack` resources FEATURES: @@ -34,7 +37,6 @@ FEATURES: * **Lists and maps** can now be used as first class types for variables and may also be passed between modules. [GH-6322] * **State management CLI commands** provide a variety of state manipulation functions for advanced use cases. This should be used where possible instead of manually modifying state files. [GH-5811] * **State Import** allows a way to import existing resources into Terraform state for many types of resource. Initial coverage of AWS is quite high, and it is straightforward to add support for new resources. - * **New Command:** `terraform state` to provide access to a variety of state manipulation functions [GH-5811] * **New Option:** `terraform output` now supports the `-json` flag to print a machine-readable representation of outputs [GH-7608] * **New Data Source:** `aws_ami` [GH-6911] From bbd9b2c944dd0104625bcff9a7d250e8291701b3 Mon Sep 17 00:00:00 2001 From: Stephen Muth Date: Thu, 28 Jul 2016 12:01:06 -0400 Subject: [PATCH 0465/1238] provider/powerdns: Add support for PowerDNS 4 API (#7819) * Auto-detect the API version and update the endpoint URL accordingly * Typo fix * Make client and resource work with the 4.X API * Update documentation * Fix typos * 204 now counts as a "success" response See https://github.com/PowerDNS/pdns/commit/f0e76cee2c83e462ad9350e6772f1aaf10df8e68 for the change in the pdns repository. * Add a note about a possible pitfall when defining some records --- builtin/providers/powerdns/client.go | 84 ++++++++++++++----- .../powerdns/resource_powerdns_record.go | 1 + .../providers/powerdns/index.html.markdown | 2 +- .../providers/powerdns/r/record.html.markdown | 15 ++++ 4 files changed, 82 insertions(+), 20 deletions(-) diff --git a/builtin/providers/powerdns/client.go b/builtin/providers/powerdns/client.go index ca54ac718..9b53011fe 100644 --- a/builtin/providers/powerdns/client.go +++ b/builtin/providers/powerdns/client.go @@ -7,17 +7,17 @@ import ( "io" "net/http" "net/url" + "strconv" "strings" "github.com/hashicorp/go-cleanhttp" ) type Client struct { - // Location of PowerDNS server to use - ServerUrl string - // REST API Static authentication key - ApiKey string - Http *http.Client + ServerUrl string // Location of PowerDNS server to use + ApiKey string // REST API Static authentication key + ApiVersion int // API version to use + Http *http.Client } // NewClient returns a new PowerDNS client @@ -27,15 +27,26 @@ func NewClient(serverUrl string, apiKey string) (*Client, error) { ApiKey: apiKey, Http: cleanhttp.DefaultClient(), } + var err error + client.ApiVersion, err = client.detectApiVersion() + if err != nil { + return nil, err + } return &client, nil } // Creates a new request with necessary headers func (c *Client) newRequest(method string, endpoint string, body []byte) (*http.Request, error) { - url, err := url.Parse(c.ServerUrl + endpoint) + var urlStr string + if c.ApiVersion > 0 { + urlStr = c.ServerUrl + "/api/v" + strconv.Itoa(c.ApiVersion) + endpoint + } else { + urlStr = c.ServerUrl + endpoint + } + url, err := url.Parse(urlStr) if err != nil { - return nil, fmt.Errorf("Error during parting request URL: %s", err) + return nil, fmt.Errorf("Error during parsing request URL: %s", err) } var bodyReader io.Reader @@ -59,20 +70,21 @@ func (c *Client) newRequest(method string, endpoint string, body []byte) (*http. } type ZoneInfo struct { - Id string `json:"id"` - Name string `json:"name"` - URL string `json:"url"` - Kind string `json:"kind"` - DnsSec bool `json:"dnsssec"` - Serial int64 `json:"serial"` - Records []Record `json:"records,omitempty"` + Id string `json:"id"` + Name string `json:"name"` + URL string `json:"url"` + Kind string `json:"kind"` + DnsSec bool `json:"dnsssec"` + Serial int64 `json:"serial"` + Records []Record `json:"records,omitempty"` + ResourceRecordSets []ResourceRecordSet `json:"rrsets,omitempty"` } type Record struct { Name string `json:"name"` Type string `json:"type"` Content string `json:"content"` - TTL int `json:"ttl"` + TTL int `json:"ttl"` // For API v0 Disabled bool `json:"disabled"` } @@ -80,6 +92,7 @@ type ResourceRecordSet struct { Name string `json:"name"` Type string `json:"type"` ChangeType string `json:"changetype"` + TTL int `json:"ttl"` // For API v1 Records []Record `json:"records,omitempty"` } @@ -111,6 +124,26 @@ func parseId(recId string) (string, string, error) { } } +// Detects the API version in use on the server +// Uses int to represent the API version: 0 is the legacy AKA version 3.4 API +// Any other integer correlates with the same API version +func (client *Client) detectApiVersion() (int, error) { + req, err := client.newRequest("GET", "/api/v1/servers", nil) + if err != nil { + return -1, err + } + resp, err := client.Http.Do(req) + if err != nil { + return -1, err + } + defer resp.Body.Close() + if resp.StatusCode == 200 { + return 1, nil + } else { + return 0, nil + } +} + // Returns all Zones of server, without records func (client *Client) ListZones() ([]ZoneInfo, error) { @@ -154,7 +187,20 @@ func (client *Client) ListRecords(zone string) ([]Record, error) { return nil, err } - return zoneInfo.Records, nil + records := zoneInfo.Records + // Convert the API v1 response to v0 record structure + for _, rrs := range zoneInfo.ResourceRecordSets { + for _, record := range rrs.Records { + records = append(records, Record{ + Name: rrs.Name, + Type: rrs.Type, + Content: record.Content, + TTL: rrs.TTL, + }) + } + } + + return records, nil } // Returns only records of specified name and type @@ -232,7 +278,7 @@ func (client *Client) CreateRecord(zone string, record Record) (string, error) { } defer resp.Body.Close() - if resp.StatusCode != 200 { + if resp.StatusCode != 200 && resp.StatusCode != 204 { errorResp := new(errorResponse) if err = json.NewDecoder(resp.Body).Decode(errorResp); err != nil { return "", fmt.Errorf("Error creating record: %s", record.Id()) @@ -263,7 +309,7 @@ func (client *Client) ReplaceRecordSet(zone string, rrSet ResourceRecordSet) (st } defer resp.Body.Close() - if resp.StatusCode != 200 { + if resp.StatusCode != 200 && resp.StatusCode != 204 { errorResp := new(errorResponse) if err = json.NewDecoder(resp.Body).Decode(errorResp); err != nil { return "", fmt.Errorf("Error creating record set: %s", rrSet.Id()) @@ -298,7 +344,7 @@ func (client *Client) DeleteRecordSet(zone string, name string, tpe string) erro } defer resp.Body.Close() - if resp.StatusCode != 200 { + if resp.StatusCode != 200 && resp.StatusCode != 204 { errorResp := new(errorResponse) if err = json.NewDecoder(resp.Body).Decode(errorResp); err != nil { return fmt.Errorf("Error deleting record: %s %s", name, tpe) diff --git a/builtin/providers/powerdns/resource_powerdns_record.go b/builtin/providers/powerdns/resource_powerdns_record.go index 11f88a582..b5f9e0687 100644 --- a/builtin/providers/powerdns/resource_powerdns_record.go +++ b/builtin/providers/powerdns/resource_powerdns_record.go @@ -57,6 +57,7 @@ func resourcePDNSRecordCreate(d *schema.ResourceData, meta interface{}) error { rrSet := ResourceRecordSet{ Name: d.Get("name").(string), Type: d.Get("type").(string), + TTL: d.Get("ttl").(int), } zone := d.Get("zone").(string) diff --git a/website/source/docs/providers/powerdns/index.html.markdown b/website/source/docs/providers/powerdns/index.html.markdown index 172a7ea20..edd82d255 100644 --- a/website/source/docs/providers/powerdns/index.html.markdown +++ b/website/source/docs/providers/powerdns/index.html.markdown @@ -9,7 +9,7 @@ description: |- # PowerDNS Provider The PowerDNS provider is used manipulate DNS records supported by PowerDNS server. The provider needs to be configured -with the proper credentials before it can be used. +with the proper credentials before it can be used. It supports both the [legacy API](https://doc.powerdns.com/3/httpapi/api_spec/) and the new [version 1 API](https://doc.powerdns.com/md/httpapi/api_spec/), however resources may need to be configured differently. Use the navigation to the left to read about the available resources. diff --git a/website/source/docs/providers/powerdns/r/record.html.markdown b/website/source/docs/providers/powerdns/r/record.html.markdown index 785c0cd48..8d9502604 100644 --- a/website/source/docs/providers/powerdns/r/record.html.markdown +++ b/website/source/docs/providers/powerdns/r/record.html.markdown @@ -12,6 +12,21 @@ Provides a PowerDNS record resource. ## Example Usage +Note that PowerDNS internally lowercases certain records (e.g. CNAME and AAAA), which can lead to resources being marked for a change in every singe plan. + +For the v1 API (PowerDNS version 4): +``` +# Add a record to the zone +resource "powerdns_record" "foobar" { + zone = "example.com." + name = "www.example.com" + type = "A" + ttl = 300 + records = ["192.168.0.11"] +} +``` + +For the legacy API (PowerDNS version 3.4): ``` # Add a record to the zone resource "powerdns_record" "foobar" { From 4353832f6a755da887ae58b2d479f5755f4676a8 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Thu, 28 Jul 2016 17:02:07 +0100 Subject: [PATCH 0466/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9bcc00ac6..868d97b47 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -180,6 +180,7 @@ IMPROVEMENTS: * provider/openstack: Enforce `ForceNew` on Instance Block Device [GH-6921] * provider/openstack: Can now stop instances before destroying them [GH-7184] * provider/openstack: Disassociate LBaaS v1 Monitors from Pool Before Deletion [GH-6997] + * provider/powerdns: Add support for PowerDNS 4 API [GH-7819] * provider/triton: add `triton_machine` `domain names` [GH-7149] * provider/vsphere: Add support for `controller_type` to `vsphere_virtual_machine` [GH-6785] * provider/vsphere: Fix bug with `vsphere_virtual_machine` wait for ip [GH-6377] From efc6bf01b0944d61ddd67d39c423b9fae899b98d Mon Sep 17 00:00:00 2001 From: Clayton O'Neill Date: Thu, 28 Jul 2016 12:18:16 -0400 Subject: [PATCH 0467/1238] Fix subnet part of OS network example This fixes the hard coded network id and also fixes the fixed_ip address to be one on the network that is specified above. --- .../providers/openstack/r/networking_network_v2.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/source/docs/providers/openstack/r/networking_network_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_network_v2.html.markdown index d427c3188..96c4cb3b3 100644 --- a/website/source/docs/providers/openstack/r/networking_network_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/networking_network_v2.html.markdown @@ -43,8 +43,8 @@ resource "openstack_networking_port_v2" "port_1" { security_group_ids = ["${openstack_compute_secgroup_v2.secgroup_1.id}"] fixed_ip { - "subnet_id" = "008ba151-0b8c-4a67-98b5-0d2b87666062" - "ip_address" = "172.24.4.2" + "subnet_id" = "${openstack_networking_subnet_v2.subnet_1.id}" + "ip_address" = "192.168.199.10" } } From 0c714592f0a73b3ae0dc9e3704cbfa146fa9cf2f Mon Sep 17 00:00:00 2001 From: James Bardin Date: Thu, 28 Jul 2016 11:11:53 -0400 Subject: [PATCH 0468/1238] Fix variable handling on subsequent pushes The handling of remote variables was completely disabled for push. We still need to fetch variables from atlas for push, because if the variable is only set remotely the Input walk will still prompt the user for a value. We add the missing remote variables to the context to disable input. We now only handle remote variables as atlas.TFVar and explicitly pass around that type rather than an `interface{}`. Shorten the text fixture slightly to make the output a little more readable on failures. --- command/push.go | 98 +++++++++++++------- command/push_test.go | 105 ++++++++++++---------- command/test-fixtures/push-tfvars/main.tf | 3 +- 3 files changed, 121 insertions(+), 85 deletions(-) diff --git a/command/push.go b/command/push.go index 557887b7c..d7845ddb4 100644 --- a/command/push.go +++ b/command/push.go @@ -137,19 +137,28 @@ func (c *PushCommand) Run(args []string) int { c.client = &atlasPushClient{Client: client} } - // Get the variables we might already have + // Get the variables we already have in atlas atlasVars, err := c.client.Get(name) if err != nil { c.Ui.Error(fmt.Sprintf( "Error looking up previously pushed configuration: %s", err)) return 1 } - for k, v := range atlasVars { - if _, ok := overwriteMap[k]; ok { - continue - } - ctx.SetVariable(k, v) + // filter any overwrites from the atlas vars + for k := range overwriteMap { + delete(atlasVars, k) + } + + // Set remote variables in the context if we don't have a value here. These + // don't have to be correct, it just prevents the Input walk from prompting + // the user for input, The atlas variable may be an hcl-encoded object, but + // we're just going to set it as the raw string value. + ctxVars := ctx.Variables() + for k, av := range atlasVars { + if _, ok := ctxVars[k]; !ok { + ctx.SetVariable(k, av.Value) + } } // Ask for input @@ -159,6 +168,18 @@ func (c *PushCommand) Run(args []string) int { return 1 } + // Now that we've gone through the input walk, we can be sure we have all + // the variables we're going to get. + // We are going to keep these separate from the atlas variables until + // upload, so we can notify the user which local variables we're sending. + serializedVars, err := tfVars(ctx.Variables()) + if err != nil { + c.Ui.Error(fmt.Sprintf( + "An error has occurred while serializing the variables for uploading:\n"+ + "%s", err)) + return 1 + } + // Build the archiving options, which includes everything it can // by default according to VCS rules but forcing the data directory. archiveOpts := &archive.ArchiveOpts{ @@ -184,17 +205,23 @@ func (c *PushCommand) Run(args []string) int { // Output to the user the variables that will be uploaded var setVars []string - for k, _ := range ctx.Variables() { - if _, ok := overwriteMap[k]; !ok { - if _, ok := atlasVars[k]; ok { - // Atlas variable not within override, so it came from Atlas - continue - } + // variables to upload + var uploadVars []atlas.TFVar + + // Now we can combine the vars for upload to atlas and list the variables + // we're uploading for the user + for _, sv := range serializedVars { + if av, ok := atlasVars[sv.Key]; ok { + // this belongs to Atlas + uploadVars = append(uploadVars, av) + } else { + // we're uploading our local version + setVars = append(setVars, sv.Key) + uploadVars = append(uploadVars, sv) } - // This variable was set from the local value - setVars = append(setVars, k) } + sort.Strings(setVars) if len(setVars) > 0 { c.Ui.Output( @@ -210,21 +237,12 @@ func (c *PushCommand) Run(args []string) int { c.Ui.Output("") } - variables := ctx.Variables() - serializedVars, err := tfVars(variables) - if err != nil { - c.Ui.Error(fmt.Sprintf( - "An error has occurred while serializing the variables for uploading:\n"+ - "%s", err)) - return 1 - } - // Upsert! opts := &pushUpsertOptions{ Name: name, Archive: archiveR, Variables: ctx.Variables(), - TFVars: serializedVars, + TFVars: uploadVars, } c.Ui.Output("Uploading Terraform configuration...") @@ -340,10 +358,11 @@ func (c *PushCommand) Synopsis() string { return "Upload this Terraform module to Atlas to run" } -// pushClient is implementd internally to control where pushes go. This is -// either to Atlas or a mock for testing. +// pushClient is implemented internally to control where pushes go. This is +// either to Atlas or a mock for testing. We still return a map to make it +// easier to check for variable existence when filtering the overrides. type pushClient interface { - Get(string) (map[string]interface{}, error) + Get(string) (map[string]atlas.TFVar, error) Upsert(*pushUpsertOptions) (int, error) } @@ -358,7 +377,7 @@ type atlasPushClient struct { Client *atlas.Client } -func (c *atlasPushClient) Get(name string) (map[string]interface{}, error) { +func (c *atlasPushClient) Get(name string) (map[string]atlas.TFVar, error) { user, name, err := atlas.ParseSlug(name) if err != nil { return nil, err @@ -369,10 +388,21 @@ func (c *atlasPushClient) Get(name string) (map[string]interface{}, error) { return nil, err } - var variables map[string]interface{} - if version != nil { - // TODO: merge variables and TFVars - //variables = version.Variables + variables := make(map[string]atlas.TFVar) + + if version == nil { + return variables, nil + } + + // Variables is superseded by TFVars + if version.TFVars == nil { + for k, v := range version.Variables { + variables[k] = atlas.TFVar{Key: k, Value: v} + } + } else { + for _, v := range version.TFVars { + variables[v.Key] = v + } } return variables, nil @@ -402,7 +432,7 @@ type mockPushClient struct { GetCalled bool GetName string - GetResult map[string]interface{} + GetResult map[string]atlas.TFVar GetError error UpsertCalled bool @@ -411,7 +441,7 @@ type mockPushClient struct { UpsertError error } -func (c *mockPushClient) Get(name string) (map[string]interface{}, error) { +func (c *mockPushClient) Get(name string) (map[string]atlas.TFVar, error) { c.GetCalled = true c.GetName = name return c.GetResult, c.GetError diff --git a/command/push_test.go b/command/push_test.go index 3db6f7739..60270169e 100644 --- a/command/push_test.go +++ b/command/push_test.go @@ -119,11 +119,13 @@ func TestPush_input(t *testing.T) { variables := map[string]interface{}{ "foo": "foo", } + if !reflect.DeepEqual(client.UpsertOptions.Variables, variables) { t.Fatalf("bad: %#v", client.UpsertOptions.Variables) } } +// We want a variable from atlas to fill a missing variable locally func TestPush_inputPartial(t *testing.T) { tmp, cwd := testCwd(t) defer testFixCwd(t, tmp, cwd) @@ -143,8 +145,10 @@ func TestPush_inputPartial(t *testing.T) { defer os.Remove(archivePath) client := &mockPushClient{ - File: archivePath, - GetResult: map[string]interface{}{"foo": "bar"}, + File: archivePath, + GetResult: map[string]atlas.TFVar{ + "foo": atlas.TFVar{Key: "foo", Value: "bar"}, + }, } ui := new(cli.MockUi) c := &PushCommand{ @@ -171,12 +175,13 @@ func TestPush_inputPartial(t *testing.T) { t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) } - variables := map[string]interface{}{ - "foo": "bar", - "bar": "foo", + expectedTFVars := []atlas.TFVar{ + {Key: "bar", Value: "foo"}, + {Key: "foo", Value: "bar"}, } - if !reflect.DeepEqual(client.UpsertOptions.Variables, variables) { - t.Fatalf("bad: %#v", client.UpsertOptions) + if !reflect.DeepEqual(client.UpsertOptions.TFVars, expectedTFVars) { + t.Logf("expected: %#v", expectedTFVars) + t.Fatalf("got: %#v", client.UpsertOptions.TFVars) } } @@ -209,8 +214,11 @@ func TestPush_localOverride(t *testing.T) { client := &mockPushClient{File: archivePath} // Provided vars should override existing ones - client.GetResult = map[string]interface{}{ - "foo": "old", + client.GetResult = map[string]atlas.TFVar{ + "foo": atlas.TFVar{ + Key: "foo", + Value: "old", + }, } ui := new(cli.MockUi) c := &PushCommand{ @@ -248,10 +256,11 @@ func TestPush_localOverride(t *testing.T) { t.Fatalf("bad: %#v", client.UpsertOptions) } - variables := pushTFVars() + expectedTFVars := pushTFVars() - if !reflect.DeepEqual(client.UpsertOptions.Variables, variables) { - t.Fatalf("bad: %#v", client.UpsertOptions) + if !reflect.DeepEqual(client.UpsertOptions.TFVars, expectedTFVars) { + t.Logf("expected: %#v", expectedTFVars) + t.Fatalf("got: %#v", client.UpsertOptions.TFVars) } } @@ -284,8 +293,11 @@ func TestPush_preferAtlas(t *testing.T) { client := &mockPushClient{File: archivePath} // Provided vars should override existing ones - client.GetResult = map[string]interface{}{ - "foo": "old", + client.GetResult = map[string]atlas.TFVar{ + "foo": atlas.TFVar{ + Key: "foo", + Value: "old", + }, } ui := new(cli.MockUi) c := &PushCommand{ @@ -322,11 +334,17 @@ func TestPush_preferAtlas(t *testing.T) { t.Fatalf("bad: %#v", client.UpsertOptions) } - variables := pushTFVars() - variables["foo"] = "old" + // change the expected response to match our change + expectedTFVars := pushTFVars() + for i, v := range expectedTFVars { + if v.Key == "foo" { + expectedTFVars[i] = atlas.TFVar{Key: "foo", Value: "old"} + } + } - if !reflect.DeepEqual(client.UpsertOptions.Variables, variables) { - t.Fatalf("bad: %#v", client.UpsertOptions.Variables) + if !reflect.DeepEqual(expectedTFVars, client.UpsertOptions.TFVars) { + t.Logf("expected: %#v", expectedTFVars) + t.Fatalf("got: %#v", client.UpsertOptions.TFVars) } } @@ -392,27 +410,8 @@ func TestPush_tfvars(t *testing.T) { t.Fatalf("bad: %#v", client.UpsertOptions) } - variables := pushTFVars() - - // make sure these dind't go missing for some reason - for k, v := range variables { - if !reflect.DeepEqual(client.UpsertOptions.Variables[k], v) { - t.Fatalf("bad: %#v", client.UpsertOptions.Variables[k]) - } - } - //now check TFVars - tfvars := []atlas.TFVar{ - {"bar", "foo", false}, - {"baz", `{ - A = "a" - B = "b" - interp = "${file("t.txt")}" -} -`, true}, - {"fob", `["a", "b", "c", "quotes \"in\" quotes"]` + "\n", true}, - {"foo", "bar", false}, - } + tfvars := pushTFVars() for i, expected := range tfvars { got := client.UpsertOptions.TFVars[i] @@ -584,16 +583,24 @@ func testArchiveStr(t *testing.T, path string) []string { return result } -// the structure returned from the push-tfvars test fixture -func pushTFVars() map[string]interface{} { - return map[string]interface{}{ - "foo": "bar", - "bar": "foo", - "baz": map[string]interface{}{ - "A": "a", - "B": "b", - "interp": `${file("t.txt")}`, - }, - "fob": []interface{}{"a", "b", "c", `quotes "in" quotes`}, +func pushTFVars() []atlas.TFVar { + return []atlas.TFVar{ + {"bar", "foo", false}, + {"baz", `{ + A = "a" + interp = "${file("t.txt")}" +} +`, true}, + {"fob", `["a", "quotes \"in\" quotes"]` + "\n", true}, + {"foo", "bar", false}, } } + +// the structure returned from the push-tfvars test fixture +func pushTFVarsMap() map[string]atlas.TFVar { + vars := make(map[string]atlas.TFVar) + for _, v := range pushTFVars() { + vars[v.Key] = v + } + return vars +} diff --git a/command/test-fixtures/push-tfvars/main.tf b/command/test-fixtures/push-tfvars/main.tf index 528b6ed60..2110bea73 100644 --- a/command/test-fixtures/push-tfvars/main.tf +++ b/command/test-fixtures/push-tfvars/main.tf @@ -7,14 +7,13 @@ variable "baz" { default = { "A" = "a" - "B" = "b" interp = "${file("t.txt")}" } } variable "fob" { type = "list" - default = ["a", "b", "c", "quotes \"in\" quotes"] + default = ["a", "quotes \"in\" quotes"] } resource "test_instance" "foo" {} From 0024358b08e8a1898d17928be46512b546f133cc Mon Sep 17 00:00:00 2001 From: Clint Date: Thu, 28 Jul 2016 14:30:50 -0500 Subject: [PATCH 0469/1238] Update resource_aws_db_instance_test.go --- builtin/providers/aws/resource_aws_db_instance_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_db_instance_test.go b/builtin/providers/aws/resource_aws_db_instance_test.go index e02d8bb1d..509f875e3 100644 --- a/builtin/providers/aws/resource_aws_db_instance_test.go +++ b/builtin/providers/aws/resource_aws_db_instance_test.go @@ -179,7 +179,7 @@ func TestAccAWSDBInstance_enhancedMonitoring(t *testing.T) { // Regression test for https://github.com/hashicorp/terraform/issues/3760 . // We apply a plan, then change just the iops. If the apply succeeds, we // consider this a pass, as before in 3760 the request would fail -func TestAccAWS_seperate_DBInstance_iops_update(t *testing.T) { +func TestAccAWS_separate_DBInstance_iops_update(t *testing.T) { var v rds.DBInstance rName := acctest.RandString(5) From 81fa4de2d00fe955b689bb06a28018fab22faff2 Mon Sep 17 00:00:00 2001 From: Greg Aker Date: Thu, 28 Jul 2016 15:35:45 -0500 Subject: [PATCH 0470/1238] Update vendorized google deps. --- .../api/compute/v1/compute-api.json | 1187 ++- .../api/compute/v1/compute-gen.go | 6486 +++++++++++------ .../api/gensupport/buffer.go | 32 +- .../google.golang.org/api/gensupport/media.go | 15 +- .../api/gensupport/resumable.go | 9 +- .../google.golang.org/api/gensupport/send.go | 35 + .../api/storage/v1/storage-api.json | 50 +- .../api/storage/v1/storage-gen.go | 646 +- vendor/vendor.json | 12 +- 9 files changed, 5947 insertions(+), 2525 deletions(-) create mode 100644 vendor/google.golang.org/api/gensupport/send.go diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json index 84e1500aa..7b8cc894c 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-api.json +++ b/vendor/google.golang.org/api/compute/v1/compute-api.json @@ -1,13 +1,13 @@ { "kind": "discovery#restDescription", - "etag": "\"bRFOOrZKfO9LweMbPqu0kcu6De8/0HKk2qVFNFj4BfRYktkIsjDiv2o\"", + "etag": "\"C5oy1hgQsABtYOYIOXWcR3BgYqU/eASe8C9_MmiRuqi8LLPi5_VjUnQ\"", "discoveryVersion": "v1", "id": "compute:v1", "name": "compute", "version": "v1", - "revision": "20160302", + "revision": "20160617", "title": "Compute Engine API", - "description": "API for the Google Compute Engine service.", + "description": "Creates and runs virtual machines on Google Cloud Platform.", "ownerDomain": "google.com", "ownerName": "Google", "icons": { @@ -345,6 +345,10 @@ "type": "string", "description": "Specifies a unique device name of your choice that is reflected into the /dev/disk/by-id/google-* tree of a Linux operating system running within the instance. This name can be used to reference the device for mounting, resizing, and so on, from within the instance.\n\nIf not specified, the server chooses a default device name to apply to this disk, in the form persistent-disks-x, where x is a number assigned by Google Compute Engine. This field is only applicable for persistent disks." }, + "diskEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "Encrypts or decrypts a disk using a customer-supplied encryption key.\n\nIf you are creating a new disk, this field encrypts the new disk using an encryption key that you provide. If you are attaching an existing disk that is already encrypted, this field decrypts the disk using the customer-supplied encryption key.\n\nIf you encrypt a disk using a customer-supplied key, you must provide the same key again when you attempt to use this resource at a later time. For example, you must provide the key when you create a snapshot or an image from the disk or when you attach the disk to a virtual machine instance.\n\nIf you do not provide an encryption key, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the disk later.\n\nInstance templates do not store customer-supplied encryption keys, so you cannot use your own keys to encrypt disks in a managed instance group." + }, "index": { "type": "integer", "description": "Assigns a zero-based index to this disk, where 0 is reserved for the boot disk. For example, if you have many disks attached to an instance, each disk would have a unique index number. If not specified, the server will choose an appropriate value.", @@ -433,7 +437,11 @@ }, "sourceImage": { "type": "string", - "description": "A source image used to create the disk. You can provide a private (custom) image, and Compute Engine will use the corresponding image from your project. For example:\n\nglobal/images/my-private-image \n\nOr you can provide an image from a publicly-available project. For example, to use a Debian image from the debian-cloud project, make sure to include the project in the URL:\n\nprojects/debian-cloud/global/images/debian-7-wheezy-vYYYYMMDD \n\nwhere vYYYYMMDD is the image version. The fully-qualified URL will also work in both cases." + "description": "The source image used to create this disk. If the source image is deleted, this field will not be set.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-8 to use the latest Debian 8 image:\n\nprojects/debian-cloud/global/images/family/debian-8 \n\nAlternatively, use a specific version of a public operating system image:\n\nprojects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD \n\nTo create a disk with a private image that you created, specify the image name in the following format:\n\nglobal/images/my-private-image \n\nYou can also specify a private image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\n\nglobal/images/family/my-private-family" + }, + "sourceImageEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key.\n\nInstance templates do not store customer-supplied encryption keys, so you cannot create disks for instances in a managed instance group if the source images are encrypted with your own keys." } } }, @@ -786,6 +794,10 @@ "type": "string", "description": "An optional description of this resource. Provide this property when you create the resource." }, + "enableCDN": { + "type": "boolean", + "description": "If true, enable Cloud CDN for this BackendService." + }, "fingerprint": { "type": "string", "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a BackendService. An up-to-date fingerprint must be provided in order to update the BackendService.", @@ -834,6 +846,10 @@ "" ] }, + "region": { + "type": "string", + "description": "[Output Only] URL of the region where the regional backend service resides. This field is not applicable to global backend services." + }, "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for the resource." @@ -869,7 +885,7 @@ "properties": { "id": { "type": "string", - "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + "description": "[Output Only] Unique identifier for the resource; defined by the server." }, "items": { "type": "array", @@ -885,7 +901,7 @@ }, "nextPageToken": { "type": "string", - "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + "description": "[Output Only] A token used to continue a truncated list request." }, "selfLink": { "type": "string", @@ -893,6 +909,44 @@ } } }, + "CacheInvalidationRule": { + "id": "CacheInvalidationRule", + "type": "object", + "properties": { + "path": { + "type": "string" + } + } + }, + "CustomerEncryptionKey": { + "id": "CustomerEncryptionKey", + "type": "object", + "description": "Represents a customer-supplied encryption key", + "properties": { + "rawKey": { + "type": "string", + "description": "Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648 base64 to either encrypt or decrypt this resource." + }, + "sha256": { + "type": "string", + "description": "[Output only] The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption key that protects this resource." + } + } + }, + "CustomerEncryptionKeyProtectedDisk": { + "id": "CustomerEncryptionKeyProtectedDisk", + "type": "object", + "properties": { + "diskEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "Decrypts data associated with the disk with a customer-supplied encryption key." + }, + "source": { + "type": "string", + "description": "Specifies a valid partial or full URL to an existing Persistent Disk resource. This field is only applicable for persistent disks." + } + } + }, "DeprecationStatus": { "id": "DeprecationStatus", "type": "object", @@ -943,6 +997,10 @@ "type": "string", "description": "An optional description of this resource. Provide this property when you create the resource." }, + "diskEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "Encrypts the disk using a customer-supplied encryption key.\n\nAfter you encrypt a disk with a customer-supplied key, you must provide the same key if you use the disk later (e.g. to create a disk snapshot or an image, or to attach the disk to a virtual machine).\n\nCustomer-supplied encryption keys do not protect access to metadata of the disk.\n\nIf you do not provide an encryption key when creating the disk, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the disk later." + }, "id": { "type": "string", "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", @@ -993,7 +1051,11 @@ }, "sourceImage": { "type": "string", - "description": "The source image used to create this disk. If the source image is deleted from the system, this field will not be set, even if an image with the same name has been re-created.\n\nWhen creating a disk, you can provide a private (custom) image using the following input, and Compute Engine will use the corresponding image from your project. For example:\n\nglobal/images/my-private-image \n\nOr you can provide an image from a publicly-available project. For example, to use a Debian image from the debian-cloud project, make sure to include the project in the URL:\n\nprojects/debian-cloud/global/images/debian-7-wheezy-vYYYYMMDD \n\nwhere vYYYYMMDD is the image version. The fully-qualified URL will also work in both cases.\n\nYou can also specify the latest image for a private image family by replacing the image name suffix with family/family-name. For example:\n\nglobal/images/family/my-private-family \n\nOr you can specify an image family from a publicly-available project. For example, to use the latest Debian 7 from the debian-cloud project, make sure to include the project in the URL:\n\nprojects/debian-cloud/global/images/family/debian-7" + "description": "The source image used to create this disk. If the source image is deleted, this field will not be set.\n\nTo create a disk with one of the public operating system images, specify the image by its family name. For example, specify family/debian-8 to use the latest Debian 8 image:\n\nprojects/debian-cloud/global/images/family/debian-8 \n\nAlternatively, use a specific version of a public operating system image:\n\nprojects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD \n\nTo create a disk with a private image that you created, specify the image name in the following format:\n\nglobal/images/my-private-image \n\nYou can also specify a private image by its image family, which returns the latest version of the image in that family. Replace the image name with family/family-name:\n\nglobal/images/family/my-private-family" + }, + "sourceImageEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "The customer-supplied encryption key of the source image. Required if the source image is protected by a customer-supplied encryption key." }, "sourceImageId": { "type": "string", @@ -1003,6 +1065,10 @@ "type": "string", "description": "The source snapshot used to create this disk. You can provide this as a partial or full URL to the resource. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/global/snapshots/snapshot \n- projects/project/global/snapshots/snapshot \n- global/snapshots/snapshot" }, + "sourceSnapshotEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "The customer-supplied encryption key of the source snapshot. Required if the source snapshot is protected by a customer-supplied encryption key." + }, "sourceSnapshotId": { "type": "string", "description": "[Output Only] The unique ID of the snapshot used to create this disk. This value identifies the exact snapshot that was used to create this persistent disk. For example, if you created the persistent disk from a snapshot that was later deleted and recreated under the same name, the source snapshot ID would identify the exact version of the snapshot that was used." @@ -1307,6 +1373,17 @@ } } }, + "DisksResizeRequest": { + "id": "DisksResizeRequest", + "type": "object", + "properties": { + "sizeGb": { + "type": "string", + "description": "The new size of the persistent disk, which is specified in GB.", + "format": "int64" + } + } + }, "DisksScopedList": { "id": "DisksScopedList", "type": "object", @@ -1456,7 +1533,7 @@ }, "sourceTags": { "type": "array", - "description": "A list of instance tags which this rule applies to. One or both of sourceRanges and sourceTags may be set.\n\nIf both properties are set, an inbound connection is allowed if the range matches the sourceRanges OR the tag of the source matches the sourceTags property. The connection does not need to match both properties.", + "description": "A list of instance tags which this rule applies to. One or both of sourceRanges and sourceTags may be set.\n\nIf both properties are set, an inbound connection is allowed if the range matches the sourceRanges OR the tag of the source matches the sourceTags property. The connection does not need to match both properties.\n\nSource tags cannot be used to allow access to an instance's external IP address. Because tags are associated with an instance, not an IP address, source tags can only be used to control traffic traveling from an instance inside the same network as the firewall.", "items": { "type": "string" } @@ -1512,7 +1589,7 @@ }, "IPProtocol": { "type": "string", - "description": "The IP protocol to which this rule applies. Valid options are TCP, UDP, ESP, AH or SCTP.", + "description": "The IP protocol to which this rule applies. Valid options are TCP, UDP, ESP, AH, SCTP or ICMP.", "enum": [ "AH", "ESP", @@ -1995,11 +2072,19 @@ "description": "Size of the image when restored onto a persistent disk (in GB).", "format": "int64" }, + "family": { + "type": "string", + "description": "The name of the image family to which this image belongs. You can create disks by specifying an image family instead of a specific image name. The image family always returns its latest image that is not deprecated." + }, "id": { "type": "string", "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", "format": "uint64" }, + "imageEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "Encrypts the image using a customer-supplied encryption key.\n\nAfter you encrypt an image with a customer-supplied key, you must provide the same key if you use the image later (e.g. to create a disk from the image).\n\nCustomer-supplied encryption keys do not protect access to metadata of the disk.\n\nIf you do not provide an encryption key when creating the image, then the disk will be encrypted using an automatically generated key and you do not need to provide a key to use the image later." + }, "kind": { "type": "string", "description": "[Output Only] Type of the resource. Always compute#image for images.", @@ -2007,7 +2092,7 @@ }, "licenses": { "type": "array", - "description": "Any applicable publicly visible licenses.", + "description": "Any applicable license URI.", "items": { "type": "string" } @@ -2060,6 +2145,10 @@ "type": "string", "description": "URL of the The source disk used to create this image. This can be a full or valid partial URL. You must provide either this property or the rawDisk.source property but not both to create an image. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/disk/disk \n- projects/project/zones/zone/disk/disk \n- zones/zone/disks/disk" }, + "sourceDiskEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "The customer-supplied encryption key of the source disk. Required if the source disk is protected by a customer-supplied encryption key." + }, "sourceDiskId": { "type": "string", "description": "The ID value of the disk used to create this image. This value may be used to determine whether the image was taken from the current or a previous instance of a given disk name." @@ -2194,14 +2283,14 @@ }, "serviceAccounts": { "type": "array", - "description": "A list of service accounts, with their specified scopes, authorized for this instance. Service accounts generate access tokens that can be accessed through the metadata server and used to authenticate applications on the instance. See Authenticating from Google Compute Engine for more information.", + "description": "A list of service accounts, with their specified scopes, authorized for this instance. Service accounts generate access tokens that can be accessed through the metadata server and used to authenticate applications on the instance. See Service Accounts for more information.", "items": { "$ref": "ServiceAccount" } }, "status": { "type": "string", - "description": "[Output Only] The status of the instance. One of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, and TERMINATED.", + "description": "[Output Only] The status of the instance. One of the following values: PROVISIONING, STAGING, RUNNING, STOPPING, SUSPENDED, SUSPENDING, and TERMINATED.", "enum": [ "PROVISIONING", "RUNNING", @@ -2400,6 +2489,7 @@ "InstanceGroupManager": { "id": "InstanceGroupManager", "type": "object", + "description": "An Instance Template Manager resource.", "properties": { "baseInstanceName": { "type": "string", @@ -2425,7 +2515,7 @@ }, "fingerprint": { "type": "string", - "description": "[Output Only] The fingerprint of the target pools information. You can use this optional field for optimistic locking when you update the target pool entries.", + "description": "[Output Only] The fingerprint of the resource data. You can use this optional field for optimistic locking when you update the resource.", "format": "byte" }, "id": { @@ -2439,12 +2529,7 @@ }, "instanceTemplate": { "type": "string", - "description": "The URL of the instance template that is specified for this managed instance group. The group uses this template to create all new instances in the managed instance group.", - "annotations": { - "required": [ - "compute.instanceGroupManagers.insert" - ] - } + "description": "The URL of the instance template that is specified for this managed instance group. The group uses this template to create all new instances in the managed instance group." }, "kind": { "type": "string", @@ -2978,7 +3063,7 @@ "properties": { "canIpForward": { "type": "boolean", - "description": "Enables instances created based on this template to send packets with source IP addresses other than their own and receive packets with destination IP addresses other than their own. If these instances will be used as an IP gateway or it will be set as the next-hop in a Route resource, specify true. If unsure, leave this set to false. See the canIpForward documentation for more information." + "description": "Enables instances created based on this template to send packets with source IP addresses other than their own and receive packets with destination IP addresses other than their own. If these instances will be used as an IP gateway or it will be set as the next-hop in a Route resource, specify true. If unsure, leave this set to false. See the Enable IP forwarding for instances documentation for more information." }, "description": { "type": "string", @@ -3241,6 +3326,19 @@ } } }, + "InstancesStartWithEncryptionKeyRequest": { + "id": "InstancesStartWithEncryptionKeyRequest", + "type": "object", + "properties": { + "disks": { + "type": "array", + "description": "Array of disks associated with this instance that are protected with a customer-supplied encryption key.\n\nIn order to start the instance, the disk url and its corresponding key must be provided.\n\nIf the disk is not protected with a customer-supplied encryption key it should not be specified.", + "items": { + "$ref": "CustomerEncryptionKeyProtectedDisk" + } + } + } + }, "License": { "id": "License", "type": "object", @@ -3303,6 +3401,10 @@ "description": "[Deprecated] This property is deprecated and will never be populated with any relevant values.", "format": "int32" }, + "isSharedCpu": { + "type": "boolean", + "description": "[Output Only] Whether this machine type has a shared CPU. See Shared-core machine types for more information." + }, "kind": { "type": "string", "description": "[Output Only] The type of the resource. Always compute#machineType for machine types.", @@ -3740,11 +3842,11 @@ }, "networkIP": { "type": "string", - "description": "[Output Only] An optional IPV4 internal network address assigned to the instance for this network interface." + "description": "An IPv4 internal network address to assign to the instance for this network interface. If not specified by the user, an unused internal IP is assigned by the system." }, "subnetwork": { "type": "string", - "description": "The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not provide this property. If the network is in auto subnet mode, providing the subnetwork is optional. If the network is in custom subnet mode, then this field should be specified. If you specify this property, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/subnetworks/subnetwork \n- zones/zone/subnetworks/subnetwork" + "description": "The URL of the Subnetwork resource for this instance. If the network resource is in legacy mode, do not provide this property. If the network is in auto subnet mode, providing the subnetwork is optional. If the network is in custom subnet mode, then this field should be specified. If you specify this property, you can specify the subnetwork as a full or partial URL. For example, the following are all valid URLs: \n- https://www.googleapis.com/compute/v1/projects/project/regions/region/subnetworks/subnetwork \n- regions/region/subnetworks/subnetwork" } } }, @@ -3847,7 +3949,7 @@ }, "kind": { "type": "string", - "description": "[Output Only] Type of the resource. Always compute#operation for operation resources.", + "description": "[Output Only] Type of the resource. Always compute#operation for Operation resources.", "default": "compute#operation" }, "name": { @@ -4177,6 +4279,10 @@ "type": "string", "description": "[Output Only] Creation timestamp in RFC3339 text format." }, + "defaultServiceAccount": { + "type": "string", + "description": "[Output Only] Default service account used by VMs running in this project." + }, "description": { "type": "string", "description": "An optional textual description of the resource." @@ -4248,6 +4354,7 @@ "IN_USE_ADDRESSES", "LOCAL_SSD_TOTAL_GB", "NETWORKS", + "ROUTERS", "ROUTES", "SNAPSHOTS", "SSD_TOTAL_GB", @@ -4290,6 +4397,7 @@ "", "", "", + "", "" ] }, @@ -4602,6 +4710,376 @@ } } }, + "Router": { + "id": "Router", + "type": "object", + "description": "Router resource.", + "properties": { + "bgp": { + "$ref": "RouterBgp" + }, + "bgpPeers": { + "type": "array", + "items": { + "$ref": "RouterBgpPeer" + } + }, + "creationTimestamp": { + "type": "string", + "description": "[Output Only] Creation timestamp in RFC3339 text format." + }, + "description": { + "type": "string", + "description": "An optional description of this resource. Provide this property when you create the resource." + }, + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server.", + "format": "uint64" + }, + "interfaces": { + "type": "array", + "items": { + "$ref": "RouterInterface" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#router for routers.", + "default": "compute#router" + }, + "name": { + "type": "string", + "description": "Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means the first character must be a lowercase letter, and all following characters must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "annotations": { + "required": [ + "compute.routers.insert" + ] + } + }, + "network": { + "type": "string", + "description": "URI of the network to which this router belongs.", + "annotations": { + "required": [ + "compute.routers.insert" + ] + } + }, + "region": { + "type": "string", + "description": "[Output Only] URI of the region where the router resides." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + } + } + }, + "RouterAggregatedList": { + "id": "RouterAggregatedList", + "type": "object", + "description": "Contains a list of routers.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "object", + "description": "A map of scoped router lists.", + "additionalProperties": { + "$ref": "RoutersScopedList", + "description": "Name of the scope containing this set of routers." + } + }, + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#routerAggregatedList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for this resource." + } + } + }, + "RouterBgp": { + "id": "RouterBgp", + "type": "object", + "properties": { + "asn": { + "type": "integer", + "description": "Local BGP Autonomous System Number (ASN). Must be an RFC6996 private ASN, either 16-bit or 32-bit. The value will be fixed for this router resource. All VPN tunnels that link to this router will have the same local ASN.", + "format": "uint32" + } + } + }, + "RouterBgpPeer": { + "id": "RouterBgpPeer", + "type": "object", + "description": "BGP information that needs to be configured into the routing stack to establish the BGP peering. It must specify peer ASN and either interface name, IP, or peer IP. Reference: https://tools.ietf.org/html/rfc4273", + "properties": { + "advertisedRoutePriority": { + "type": "integer", + "description": "The priority of routes advertised to this BGP peer. In the case where there is more than one matching route of maximum length, the routes with lowest priority value win.", + "format": "uint32" + }, + "interfaceName": { + "type": "string", + "description": "Name of the interface the BGP peer is associated with." + }, + "ipAddress": { + "type": "string", + "description": "IP address of the interface inside Google Cloud Platform." + }, + "name": { + "type": "string", + "description": "Name of this BGP peer. The name must be 1-63 characters long and comply with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + }, + "peerAsn": { + "type": "integer", + "description": "Peer BGP Autonomous System Number (ASN). For VPN use case, this value can be different for every tunnel.", + "format": "uint32" + }, + "peerIpAddress": { + "type": "string", + "description": "IP address of the BGP interface outside Google cloud." + } + } + }, + "RouterInterface": { + "id": "RouterInterface", + "type": "object", + "description": "Router interfaces. Each interface requires either one linked resource (e.g. linked_vpn_tunnel) or IP address + range (specified in ip_range).", + "properties": { + "ipRange": { + "type": "string", + "description": "IP address and range of the interface. The IP range must be in the RFC3927 link-local IP space. The value must be a CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not truncate the address as it represents the IP address of the interface." + }, + "linkedVpnTunnel": { + "type": "string", + "description": "URI of linked VPN tunnel. It must be in the same region as the router. Each interface can have at most one linked resource." + }, + "name": { + "type": "string", + "description": "Name of this interface entry. The name must be 1-63 characters long and comply with RFC1035.", + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?" + } + } + }, + "RouterList": { + "id": "RouterList", + "type": "object", + "description": "Contains a list of Router resources.", + "properties": { + "id": { + "type": "string", + "description": "[Output Only] The unique identifier for the resource. This identifier is defined by the server." + }, + "items": { + "type": "array", + "description": "A list of Router resources.", + "items": { + "$ref": "Router" + } + }, + "kind": { + "type": "string", + "description": "[Output Only] Type of resource. Always compute#router for routers.", + "default": "compute#routerList" + }, + "nextPageToken": { + "type": "string", + "description": "[Output Only] This token allows you to get the next page of results for list requests. If the number of results is larger than maxResults, use the nextPageToken as a value for the query parameter pageToken in the next list request. Subsequent list requests will have their own nextPageToken to continue paging through the results." + }, + "selfLink": { + "type": "string", + "description": "[Output Only] Server-defined URL for the resource." + } + } + }, + "RouterStatus": { + "id": "RouterStatus", + "type": "object", + "properties": { + "bestRoutes": { + "type": "array", + "description": "Best routes for this router's network.", + "items": { + "$ref": "Route" + } + }, + "bgpPeerStatus": { + "type": "array", + "items": { + "$ref": "RouterStatusBgpPeerStatus" + } + }, + "network": { + "type": "string", + "description": "URI of the network to which this router belongs." + } + } + }, + "RouterStatusBgpPeerStatus": { + "id": "RouterStatusBgpPeerStatus", + "type": "object", + "properties": { + "advertisedRoutes": { + "type": "array", + "description": "Routes that were advertised to the remote BGP peer", + "items": { + "$ref": "Route" + } + }, + "ipAddress": { + "type": "string", + "description": "IP address of the local BGP interface." + }, + "linkedVpnTunnel": { + "type": "string", + "description": "URL of the VPN tunnel that this BGP peer controls." + }, + "name": { + "type": "string", + "description": "Name of this BGP peer. Unique within the Routers resource." + }, + "numLearnedRoutes": { + "type": "integer", + "description": "Number of routes learned from the remote BGP Peer.", + "format": "uint32" + }, + "peerIpAddress": { + "type": "string", + "description": "IP address of the remote BGP interface." + }, + "state": { + "type": "string", + "description": "BGP state as specified in RFC1771." + }, + "status": { + "type": "string", + "description": "Status of the BGP peer: {UP, DOWN}", + "enum": [ + "DOWN", + "UNKNOWN", + "UP" + ], + "enumDescriptions": [ + "", + "", + "" + ] + }, + "uptime": { + "type": "string", + "description": "Time this session has been up. Format: 14 years, 51 weeks, 6 days, 23 hours, 59 minutes, 59 seconds" + }, + "uptimeSeconds": { + "type": "string", + "description": "Time this session has been up, in seconds. Format: 145" + } + } + }, + "RouterStatusResponse": { + "id": "RouterStatusResponse", + "type": "object", + "properties": { + "kind": { + "type": "string", + "description": "Type of resource.", + "default": "compute#routerStatusResponse" + }, + "result": { + "$ref": "RouterStatus" + } + } + }, + "RoutersScopedList": { + "id": "RoutersScopedList", + "type": "object", + "properties": { + "routers": { + "type": "array", + "description": "List of routers contained in this scope.", + "items": { + "$ref": "Router" + } + }, + "warning": { + "type": "object", + "description": "Informational warning which replaces the list of routers when the list is empty.", + "properties": { + "code": { + "type": "string", + "description": "[Output Only] A warning code, if applicable. For example, Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in the response.", + "enum": [ + "CLEANUP_FAILED", + "DEPRECATED_RESOURCE_USED", + "DISK_SIZE_LARGER_THAN_IMAGE_SIZE", + "INJECTED_KERNELS_DEPRECATED", + "NEXT_HOP_ADDRESS_NOT_ASSIGNED", + "NEXT_HOP_CANNOT_IP_FORWARD", + "NEXT_HOP_INSTANCE_NOT_FOUND", + "NEXT_HOP_INSTANCE_NOT_ON_NETWORK", + "NEXT_HOP_NOT_RUNNING", + "NOT_CRITICAL_ERROR", + "NO_RESULTS_ON_PAGE", + "REQUIRED_TOS_AGREEMENT", + "RESOURCE_NOT_DELETED", + "SINGLE_INSTANCE_PROPERTY_TEMPLATE", + "UNREACHABLE" + ], + "enumDescriptions": [ + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "", + "" + ] + }, + "data": { + "type": "array", + "description": "[Output Only] Metadata about this warning in key: value format. For example:\n\"data\": [ { \"key\": \"scope\", \"value\": \"zones/us-east1-d\" }", + "items": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "[Output Only] A key that provides more detail on the warning being returned. For example, for warnings where there are no results in a list request for a particular zone, this key might be scope and the key value might be the zone name. Other examples might be a key indicating a deprecated resource and a suggested replacement, or a warning about invalid network settings (for example, if an instance attempts to perform IP forwarding but is not enabled for IP forwarding)." + }, + "value": { + "type": "string", + "description": "[Output Only] A warning data value corresponding to the key." + } + } + } + }, + "message": { + "type": "string", + "description": "[Output Only] A human-readable description of the warning code." + } + } + } + } + }, "Scheduling": { "id": "Scheduling", "type": "object", @@ -4711,10 +5189,18 @@ "type": "string", "description": "[Output Only] Server-defined URL for the resource." }, + "snapshotEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "Encrypts the snapshot using a customer-supplied encryption key.\n\nAfter you encrypt a snapshot using a customer-supplied key, you must provide the same key if you use the image later For example, you must provide the encryption key when you create a disk from the encrypted snapshot in a future request.\n\nCustomer-supplied encryption keys do not protect access to metadata of the disk.\n\nIf you do not provide an encryption key when creating the snapshot, then the snapshot will be encrypted using an automatically generated key and you do not need to provide a key to use the snapshot later." + }, "sourceDisk": { "type": "string", "description": "[Output Only] The source disk used to create this snapshot." }, + "sourceDiskEncryptionKey": { + "$ref": "CustomerEncryptionKey", + "description": "The customer-supplied encryption key of the source disk. Required if the source disk is protected by a customer-supplied encryption key." + }, "sourceDiskId": { "type": "string", "description": "[Output Only] The ID value of the disk used to create this snapshot. This value may be used to determine whether the snapshot was taken from the current or a previous instance of a given disk name." @@ -4902,7 +5388,7 @@ }, "region": { "type": "string", - "description": "[Output Only] URL of the region where the Subnetwork resides." + "description": "URL of the region where the Subnetwork resides." }, "selfLink": { "type": "string", @@ -6009,7 +6495,7 @@ }, "tests": { "type": "array", - "description": "The list of expected URL mappings. Request to update this UrlMap will succeed only all of the test cases pass.", + "description": "The list of expected URL mappings. Request to update this UrlMap will succeed only if all of the test cases pass.", "items": { "$ref": "UrlMapTest" } @@ -6196,6 +6682,17 @@ "type": "string", "description": "[Output Only] URL of the region where the VPN tunnel resides." }, + "remoteTrafficSelector": { + "type": "array", + "description": "Remote traffic selectors to use when establishing the VPN tunnel with peer VPN gateway. The value should be a CIDR formatted string, for example: 192.168.0.0/16. The ranges should be disjoint.", + "items": { + "type": "string" + } + }, + "router": { + "type": "string", + "description": "URL of router resource to be used for dynamic routing." + }, "selfLink": { "type": "string", "description": "[Output Only] Server-defined URL for the resource." @@ -6418,31 +6915,6 @@ "description": "[Output Only] Type of the resource. Always compute#zone for zones.", "default": "compute#zone" }, - "maintenanceWindows": { - "type": "array", - "description": "[Output Only] Any scheduled maintenance windows for this zone. When the zone is in a maintenance window, all resources which reside in the zone will be unavailable. For more information, see Maintenance Windows", - "items": { - "type": "object", - "properties": { - "beginTime": { - "type": "string", - "description": "[Output Only] Starting time of the maintenance window, in RFC3339 format." - }, - "description": { - "type": "string", - "description": "[Output Only] Textual description of the maintenance window." - }, - "endTime": { - "type": "string", - "description": "[Output Only] Ending time of the maintenance window, in RFC3339 format." - }, - "name": { - "type": "string", - "description": "[Output Only] Name of the maintenance window." - } - } - } - }, "name": { "type": "string", "description": "[Output Only] Name of the resource." @@ -6512,7 +6984,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -6676,7 +7148,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -6733,7 +7205,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -6897,7 +7369,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -7171,7 +7643,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -7292,7 +7764,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -7379,7 +7851,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -7436,7 +7908,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -7604,7 +8076,7 @@ "id": "compute.disks.insert", "path": "{project}/zones/{zone}/disks", "httpMethod": "POST", - "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk with a sourceImage, a sourceSnapshot, or create an empty 200 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.", + "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk with a sourceImage, a sourceSnapshot, or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.", "parameters": { "project": { "type": "string", @@ -7649,7 +8121,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -7693,6 +8165,50 @@ "https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute.readonly" ] + }, + "resize": { + "id": "compute.disks.resize", + "path": "{project}/zones/{zone}/disks/{disk}/resize", + "httpMethod": "POST", + "description": "Resizes the specified persistent disk.", + "parameters": { + "disk": { + "type": "string", + "description": "The name of the persistent disk.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "disk" + ], + "request": { + "$ref": "DisksResizeRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] } } }, @@ -7801,7 +8317,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -7922,7 +8438,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -8086,7 +8602,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -8282,7 +8798,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -8426,7 +8942,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -8511,7 +9027,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -8620,7 +9136,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -8764,7 +9280,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -8980,7 +9496,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -9196,6 +9712,40 @@ "https://www.googleapis.com/auth/compute.readonly" ] }, + "getFromFamily": { + "id": "compute.images.getFromFamily", + "path": "{project}/global/images/family/{family}", + "httpMethod": "GET", + "description": "Returns the latest image that is part of an image family and is not deprecated.", + "parameters": { + "family": { + "type": "string", + "description": "Name of the image resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "family" + ], + "response": { + "$ref": "Image" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "insert": { "id": "compute.images.insert", "path": "{project}/global/images", @@ -9231,11 +9781,11 @@ "id": "compute.images.list", "path": "{project}/global/images", "httpMethod": "GET", - "description": "Retrieves the list of private images available to the specified project. Private images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 7. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.\n\nSee Accessing images for more information.", + "description": "Retrieves the list of private images available to the specified project. Private images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -9326,7 +9876,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -9527,7 +10077,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -9838,7 +10388,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -9997,7 +10547,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -10049,7 +10599,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "instanceGroup": { @@ -10294,7 +10844,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -10394,7 +10944,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -10757,7 +11307,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -11116,6 +11666,50 @@ "https://www.googleapis.com/auth/compute" ] }, + "startWithEncryptionKey": { + "id": "compute.instances.startWithEncryptionKey", + "path": "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", + "httpMethod": "POST", + "description": "Starts an instance that was stopped using the using the instances().stop method. For more information, see Restart an instance.", + "parameters": { + "instance": { + "type": "string", + "description": "Name of the instance resource to start.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "zone": { + "type": "string", + "description": "The name of the zone for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "zone", + "instance" + ], + "request": { + "$ref": "InstancesStartWithEncryptionKeyRequest" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "stop": { "id": "compute.instances.stop", "path": "{project}/zones/{zone}/instances/{instance}/stop", @@ -11207,7 +11801,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -11294,7 +11888,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -11446,7 +12040,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -11720,7 +12314,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -11811,7 +12405,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -11850,6 +12444,357 @@ } } }, + "routers": { + "methods": { + "aggregatedList": { + "id": "compute.routers.aggregatedList", + "path": "{project}/aggregated/routers", + "httpMethod": "GET", + "description": "Retrieves an aggregated list of routers.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + } + }, + "parameterOrder": [ + "project" + ], + "response": { + "$ref": "RouterAggregatedList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "delete": { + "id": "compute.routers.delete", + "path": "{project}/regions/{region}/routers/{router}", + "httpMethod": "DELETE", + "description": "Deletes the specified Router resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "router": { + "type": "string", + "description": "Name of the Router resource to delete.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "router" + ], + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "get": { + "id": "compute.routers.get", + "path": "{project}/regions/{region}/routers/{router}", + "httpMethod": "GET", + "description": "Returns the specified Router resource. Get a list of available routers by making a list() request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "router": { + "type": "string", + "description": "Name of the Router resource to return.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "router" + ], + "response": { + "$ref": "Router" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "getRouterStatus": { + "id": "compute.routers.getRouterStatus", + "path": "{project}/regions/{region}/routers/{router}/getRouterStatus", + "httpMethod": "GET", + "description": "Retrieves runtime information of the specified router.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "router": { + "type": "string", + "description": "Name of the Router resource to query.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "router" + ], + "response": { + "$ref": "RouterStatusResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "insert": { + "id": "compute.routers.insert", + "path": "{project}/regions/{region}/routers", + "httpMethod": "POST", + "description": "Creates a Router resource in the specified project and region using the data included in the request.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "request": { + "$ref": "Router" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "list": { + "id": "compute.routers.list", + "path": "{project}/regions/{region}/routers", + "httpMethod": "GET", + "description": "Retrieves a list of Router resources available to the specified project.", + "parameters": { + "filter": { + "type": "string", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "location": "query" + }, + "maxResults": { + "type": "integer", + "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + "default": "500", + "format": "uint32", + "minimum": "0", + "maximum": "500", + "location": "query" + }, + "pageToken": { + "type": "string", + "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + "location": "query" + }, + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region" + ], + "response": { + "$ref": "RouterList" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, + "patch": { + "id": "compute.routers.patch", + "path": "{project}/regions/{region}/routers/{router}", + "httpMethod": "PATCH", + "description": "Updates the entire content of the Router resource. This method supports patch semantics.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "router": { + "type": "string", + "description": "Name of the Router resource to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "router" + ], + "request": { + "$ref": "Router" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, + "update": { + "id": "compute.routers.update", + "path": "{project}/regions/{region}/routers/{router}", + "httpMethod": "PUT", + "description": "Updates the entire content of the Router resource.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "router": { + "type": "string", + "description": "Name of the Router resource to update.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "router" + ], + "request": { + "$ref": "Router" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + } + } + }, "routes": { "methods": { "delete": { @@ -11955,7 +12900,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -12071,7 +13016,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -12215,7 +13160,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -12264,7 +13209,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -12346,7 +13291,7 @@ "id": "compute.subnetworks.get", "path": "{project}/regions/{region}/subnetworks/{subnetwork}", "httpMethod": "GET", - "description": "Returns the specified subnetwork. Get a list of available subnetworks by making a list() request.", + "description": "Returns the specified subnetwork. Get a list of available subnetworks list() request.", "parameters": { "project": { "type": "string", @@ -12428,7 +13373,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -12580,7 +13525,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -12760,7 +13705,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -12881,7 +13826,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -13045,7 +13990,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -13190,7 +14135,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -13399,7 +14344,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -13594,7 +14539,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -13758,7 +14703,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -13902,6 +14847,42 @@ "https://www.googleapis.com/auth/compute" ] }, + "invalidateCache": { + "id": "compute.urlMaps.invalidateCache", + "path": "{project}/global/urlMaps/{urlMap}/invalidateCache", + "httpMethod": "POST", + "description": "Initiates a cache invalidation operation, invalidating the specified path, scoped to the specified UrlMap.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "urlMap": { + "type": "string", + "description": "Name of the UrlMap scoping this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "urlMap" + ], + "request": { + "$ref": "CacheInvalidationRule" + }, + "response": { + "$ref": "Operation" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute" + ] + }, "list": { "id": "compute.urlMaps.list", "path": "{project}/global/urlMaps", @@ -13910,7 +14891,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -14067,7 +15048,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -14231,7 +15212,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -14368,7 +15349,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -14459,7 +15440,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go index 9bd95d00f..5a16aa354 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go @@ -7,7 +7,7 @@ // import "google.golang.org/api/compute/v1" // ... // computeService, err := compute.New(oauthHttpClient) -package compute +package compute // import "google.golang.org/api/compute/v1" import ( "bytes" @@ -94,6 +94,7 @@ func New(client *http.Client) (*Service, error) { s.Projects = NewProjectsService(s) s.RegionOperations = NewRegionOperationsService(s) s.Regions = NewRegionsService(s) + s.Routers = NewRoutersService(s) s.Routes = NewRoutesService(s) s.Snapshots = NewSnapshotsService(s) s.SslCertificates = NewSslCertificatesService(s) @@ -161,6 +162,8 @@ type Service struct { Regions *RegionsService + Routers *RoutersService + Routes *RoutesService Snapshots *SnapshotsService @@ -402,6 +405,15 @@ type RegionsService struct { s *Service } +func NewRoutersService(s *Service) *RoutersService { + rs := &RoutersService{s: s} + return rs +} + +type RoutersService struct { + s *Service +} + func NewRoutesService(s *Service) *RoutesService { rs := &RoutesService{s: s} return rs @@ -837,6 +849,29 @@ type AttachedDisk struct { // persistent disks. DeviceName string `json:"deviceName,omitempty"` + // DiskEncryptionKey: Encrypts or decrypts a disk using a + // customer-supplied encryption key. + // + // If you are creating a new disk, this field encrypts the new disk + // using an encryption key that you provide. If you are attaching an + // existing disk that is already encrypted, this field decrypts the disk + // using the customer-supplied encryption key. + // + // If you encrypt a disk using a customer-supplied key, you must provide + // the same key again when you attempt to use this resource at a later + // time. For example, you must provide the key when you create a + // snapshot or an image from the disk or when you attach the disk to a + // virtual machine instance. + // + // If you do not provide an encryption key, then the disk will be + // encrypted using an automatically generated key and you do not need to + // provide a key to use the disk later. + // + // Instance templates do not store customer-supplied encryption keys, so + // you cannot use your own keys to encrypt disks in a managed instance + // group. + DiskEncryptionKey *CustomerEncryptionKey `json:"diskEncryptionKey,omitempty"` + // Index: Assigns a zero-based index to this disk, where 0 is reserved // for the boot disk. For example, if you have many disks attached to an // instance, each disk would have a unique index number. If not @@ -940,24 +975,43 @@ type AttachedDiskInitializeParams struct { // - zones/zone/diskTypes/diskType DiskType string `json:"diskType,omitempty"` - // SourceImage: A source image used to create the disk. You can provide - // a private (custom) image, and Compute Engine will use the - // corresponding image from your project. For - // example: + // SourceImage: The source image used to create this disk. If the source + // image is deleted, this field will not be set. + // + // To create a disk with one of the public operating system images, + // specify the image by its family name. For example, specify + // family/debian-8 to use the latest Debian 8 + // image: + // + // projects/debian-cloud/global/images/family/debian-8 + // + // Alternatively, use a specific version of a public operating system + // image: + // + // projects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD + // + // To create a disk with a private image that you created, specify the + // image name in the following format: // // global/images/my-private-image // - // Or you can provide an image from a publicly-available project. For - // example, to use a Debian image from the debian-cloud project, make - // sure to include the project in the - // URL: + // You can also specify a private image by its image family, which + // returns the latest version of the image in that family. Replace the + // image name with + // family/family-name: // - // projects/debian-cloud/global/images/debian-7-wheezy-vYYYYMMDD - // - // where vYYYYMMDD is the image version. The fully-qualified URL will - // also work in both cases. + // global/images/family/my-private-family SourceImage string `json:"sourceImage,omitempty"` + // SourceImageEncryptionKey: The customer-supplied encryption key of the + // source image. Required if the source image is protected by a + // customer-supplied encryption key. + // + // Instance templates do not store customer-supplied encryption keys, so + // you cannot create disks for instances in a managed instance group if + // the source images are encrypted with your own keys. + SourceImageEncryptionKey *CustomerEncryptionKey `json:"sourceImageEncryptionKey,omitempty"` + // ForceSendFields is a list of field names (e.g. "DiskName") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, @@ -1472,6 +1526,9 @@ type BackendService struct { // property when you create the resource. Description string `json:"description,omitempty"` + // EnableCDN: If true, enable Cloud CDN for this BackendService. + EnableCDN bool `json:"enableCDN,omitempty"` + // Fingerprint: Fingerprint of this resource. A hash of the contents // stored in this object. This field is used in optimistic locking. This // field will be ignored when inserting a BackendService. An up-to-date @@ -1519,6 +1576,11 @@ type BackendService struct { // "HTTPS" Protocol string `json:"protocol,omitempty"` + // Region: [Output Only] URL of the region where the regional backend + // service resides. This field is not applicable to global backend + // services. + Region string `json:"region,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` @@ -1573,8 +1635,8 @@ func (s *BackendServiceGroupHealth) MarshalJSON() ([]byte, error) { // BackendServiceList: Contains a list of BackendService resources. type BackendServiceList struct { - // Id: [Output Only] The unique identifier for the resource. This - // identifier is defined by the server. + // Id: [Output Only] Unique identifier for the resource; defined by the + // server. Id string `json:"id,omitempty"` // Items: A list of BackendService resources. @@ -1584,12 +1646,8 @@ type BackendServiceList struct { // compute#backendServiceList for lists of backend services. Kind string `json:"kind,omitempty"` - // NextPageToken: [Output Only] This token allows you to get the next - // page of results for list requests. If the number of results is larger - // than maxResults, use the nextPageToken as a value for the query - // parameter pageToken in the next list request. Subsequent list - // requests will have their own nextPageToken to continue paging through - // the results. + // NextPageToken: [Output Only] A token used to continue a truncated + // list request. NextPageToken string `json:"nextPageToken,omitempty"` // SelfLink: [Output Only] Server-defined URL for this resource. @@ -1614,6 +1672,74 @@ func (s *BackendServiceList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields) } +type CacheInvalidationRule struct { + Path string `json:"path,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Path") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *CacheInvalidationRule) MarshalJSON() ([]byte, error) { + type noMethod CacheInvalidationRule + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// CustomerEncryptionKey: Represents a customer-supplied encryption key +type CustomerEncryptionKey struct { + // RawKey: Specifies a 256-bit customer-supplied encryption key, encoded + // in RFC 4648 base64 to either encrypt or decrypt this resource. + RawKey string `json:"rawKey,omitempty"` + + // Sha256: [Output only] The RFC 4648 base64 encoded SHA-256 hash of the + // customer-supplied encryption key that protects this resource. + Sha256 string `json:"sha256,omitempty"` + + // ForceSendFields is a list of field names (e.g. "RawKey") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *CustomerEncryptionKey) MarshalJSON() ([]byte, error) { + type noMethod CustomerEncryptionKey + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type CustomerEncryptionKeyProtectedDisk struct { + // DiskEncryptionKey: Decrypts data associated with the disk with a + // customer-supplied encryption key. + DiskEncryptionKey *CustomerEncryptionKey `json:"diskEncryptionKey,omitempty"` + + // Source: Specifies a valid partial or full URL to an existing + // Persistent Disk resource. This field is only applicable for + // persistent disks. + Source string `json:"source,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DiskEncryptionKey") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *CustomerEncryptionKeyProtectedDisk) MarshalJSON() ([]byte, error) { + type noMethod CustomerEncryptionKeyProtectedDisk + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + // DeprecationStatus: Deprecation status for a public resource. type DeprecationStatus struct { // Deleted: An optional RFC3339 timestamp on or after which the @@ -1671,6 +1797,22 @@ type Disk struct { // property when you create the resource. Description string `json:"description,omitempty"` + // DiskEncryptionKey: Encrypts the disk using a customer-supplied + // encryption key. + // + // After you encrypt a disk with a customer-supplied key, you must + // provide the same key if you use the disk later (e.g. to create a disk + // snapshot or an image, or to attach the disk to a virtual + // machine). + // + // Customer-supplied encryption keys do not protect access to metadata + // of the disk. + // + // If you do not provide an encryption key when creating the disk, then + // the disk will be encrypted using an automatically generated key and + // you do not need to provide a key to use the disk later. + DiskEncryptionKey *CustomerEncryptionKey `json:"diskEncryptionKey,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` @@ -1717,39 +1859,38 @@ type Disk struct { SizeGb int64 `json:"sizeGb,omitempty,string"` // SourceImage: The source image used to create this disk. If the source - // image is deleted from the system, this field will not be set, even if - // an image with the same name has been re-created. + // image is deleted, this field will not be set. // - // When creating a disk, you can provide a private (custom) image using - // the following input, and Compute Engine will use the corresponding - // image from your project. For example: + // To create a disk with one of the public operating system images, + // specify the image by its family name. For example, specify + // family/debian-8 to use the latest Debian 8 + // image: + // + // projects/debian-cloud/global/images/family/debian-8 + // + // Alternatively, use a specific version of a public operating system + // image: + // + // projects/debian-cloud/global/images/debian-8-jessie-vYYYYMMDD + // + // To create a disk with a private image that you created, specify the + // image name in the following format: // // global/images/my-private-image // - // Or you can provide an image from a publicly-available project. For - // example, to use a Debian image from the debian-cloud project, make - // sure to include the project in the - // URL: - // - // projects/debian-cloud/global/images/debian-7-wheezy-vYYYYMMDD - // - // where vYYYYMMDD is the image version. The fully-qualified URL will - // also work in both cases. - // - // You can also specify the latest image for a private image family by - // replacing the image name suffix with family/family-name. For - // example: + // You can also specify a private image by its image family, which + // returns the latest version of the image in that family. Replace the + // image name with + // family/family-name: // // global/images/family/my-private-family - // - // Or you can specify an image family from a publicly-available project. - // For example, to use the latest Debian 7 from the debian-cloud - // project, make sure to include the project in the - // URL: - // - // projects/debian-cloud/global/images/family/debian-7 SourceImage string `json:"sourceImage,omitempty"` + // SourceImageEncryptionKey: The customer-supplied encryption key of the + // source image. Required if the source image is protected by a + // customer-supplied encryption key. + SourceImageEncryptionKey *CustomerEncryptionKey `json:"sourceImageEncryptionKey,omitempty"` + // SourceImageId: [Output Only] The ID value of the image used to create // this disk. This value identifies the exact image that was used to // create this persistent disk. For example, if you created the @@ -1767,6 +1908,11 @@ type Disk struct { // - global/snapshots/snapshot SourceSnapshot string `json:"sourceSnapshot,omitempty"` + // SourceSnapshotEncryptionKey: The customer-supplied encryption key of + // the source snapshot. Required if the source snapshot is protected by + // a customer-supplied encryption key. + SourceSnapshotEncryptionKey *CustomerEncryptionKey `json:"sourceSnapshotEncryptionKey,omitempty"` + // SourceSnapshotId: [Output Only] The unique ID of the snapshot used to // create this disk. This value identifies the exact snapshot that was // used to create this persistent disk. For example, if you created the @@ -2177,6 +2323,26 @@ func (s *DiskTypesScopedListWarningData) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields) } +type DisksResizeRequest struct { + // SizeGb: The new size of the persistent disk, which is specified in + // GB. + SizeGb int64 `json:"sizeGb,omitempty,string"` + + // ForceSendFields is a list of field names (e.g. "SizeGb") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *DisksResizeRequest) MarshalJSON() ([]byte, error) { + type noMethod DisksResizeRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + type DisksScopedList struct { // Disks: [Output Only] List of disks contained in this scope. Disks []*Disk `json:"disks,omitempty"` @@ -2343,6 +2509,11 @@ type Firewall struct { // range matches the sourceRanges OR the tag of the source matches the // sourceTags property. The connection does not need to match both // properties. + // + // Source tags cannot be used to allow access to an instance's external + // IP address. Because tags are associated with an instance, not an IP + // address, source tags can only be used to control traffic traveling + // from an instance inside the same network as the firewall. SourceTags []string `json:"sourceTags,omitempty"` // TargetTags: A list of instance tags indicating sets of instances @@ -2456,7 +2627,7 @@ type ForwardingRule struct { IPAddress string `json:"IPAddress,omitempty"` // IPProtocol: The IP protocol to which this rule applies. Valid options - // are TCP, UDP, ESP, AH or SCTP. + // are TCP, UDP, ESP, AH, SCTP or ICMP. // // Possible values: // "AH" @@ -3074,15 +3245,36 @@ type Image struct { // (in GB). DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"` + // Family: The name of the image family to which this image belongs. You + // can create disks by specifying an image family instead of a specific + // image name. The image family always returns its latest image that is + // not deprecated. + Family string `json:"family,omitempty"` + // Id: [Output Only] The unique identifier for the resource. This // identifier is defined by the server. Id uint64 `json:"id,omitempty,string"` + // ImageEncryptionKey: Encrypts the image using a customer-supplied + // encryption key. + // + // After you encrypt an image with a customer-supplied key, you must + // provide the same key if you use the image later (e.g. to create a + // disk from the image). + // + // Customer-supplied encryption keys do not protect access to metadata + // of the disk. + // + // If you do not provide an encryption key when creating the image, then + // the disk will be encrypted using an automatically generated key and + // you do not need to provide a key to use the image later. + ImageEncryptionKey *CustomerEncryptionKey `json:"imageEncryptionKey,omitempty"` + // Kind: [Output Only] Type of the resource. Always compute#image for // images. Kind string `json:"kind,omitempty"` - // Licenses: Any applicable publicly visible licenses. + // Licenses: Any applicable license URI. Licenses []string `json:"licenses,omitempty"` // Name: Name of the resource; provided by the client when the resource @@ -3110,6 +3302,11 @@ type Image struct { // - zones/zone/disks/disk SourceDisk string `json:"sourceDisk,omitempty"` + // SourceDiskEncryptionKey: The customer-supplied encryption key of the + // source disk. Required if the source disk is protected by a + // customer-supplied encryption key. + SourceDiskEncryptionKey *CustomerEncryptionKey `json:"sourceDiskEncryptionKey,omitempty"` + // SourceDiskId: The ID value of the disk used to create this image. // This value may be used to determine whether the image was taken from // the current or a previous instance of a given disk name. @@ -3312,13 +3509,13 @@ type Instance struct { // ServiceAccounts: A list of service accounts, with their specified // scopes, authorized for this instance. Service accounts generate // access tokens that can be accessed through the metadata server and - // used to authenticate applications on the instance. See Authenticating - // from Google Compute Engine for more information. + // used to authenticate applications on the instance. See Service + // Accounts for more information. ServiceAccounts []*ServiceAccount `json:"serviceAccounts,omitempty"` // Status: [Output Only] The status of the instance. One of the - // following values: PROVISIONING, STAGING, RUNNING, STOPPING, and - // TERMINATED. + // following values: PROVISIONING, STAGING, RUNNING, STOPPING, + // SUSPENDED, SUSPENDING, and TERMINATED. // // Possible values: // "PROVISIONING" @@ -3571,6 +3768,7 @@ func (s *InstanceGroupList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields) } +// InstanceGroupManager: An Instance Template Manager resource. type InstanceGroupManager struct { // BaseInstanceName: The base instance name to use for instances in this // group. The value must be 1-58 characters long. Instances are named by @@ -3591,9 +3789,9 @@ type InstanceGroupManager struct { // property when you create the resource. Description string `json:"description,omitempty"` - // Fingerprint: [Output Only] The fingerprint of the target pools - // information. You can use this optional field for optimistic locking - // when you update the target pool entries. + // Fingerprint: [Output Only] The fingerprint of the resource data. You + // can use this optional field for optimistic locking when you update + // the resource. Fingerprint string `json:"fingerprint,omitempty"` // Id: [Output Only] A unique identifier for this resource type. The @@ -4362,8 +4560,8 @@ type InstanceProperties struct { // receive packets with destination IP addresses other than their own. // If these instances will be used as an IP gateway or it will be set as // the next-hop in a Route resource, specify true. If unsure, leave this - // set to false. See the canIpForward documentation for more - // information. + // set to false. See the Enable IP forwarding for instances + // documentation for more information. CanIpForward bool `json:"canIpForward,omitempty"` // Description: An optional text description for the instances that are @@ -4693,6 +4891,32 @@ func (s *InstancesSetMachineTypeRequest) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields) } +type InstancesStartWithEncryptionKeyRequest struct { + // Disks: Array of disks associated with this instance that are + // protected with a customer-supplied encryption key. + // + // In order to start the instance, the disk url and its corresponding + // key must be provided. + // + // If the disk is not protected with a customer-supplied encryption key + // it should not be specified. + Disks []*CustomerEncryptionKeyProtectedDisk `json:"disks,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Disks") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *InstancesStartWithEncryptionKeyRequest) MarshalJSON() ([]byte, error) { + type noMethod InstancesStartWithEncryptionKeyRequest + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + // License: A license resource. type License struct { // ChargesUseFee: [Output Only] If true, the customer will be charged @@ -4756,6 +4980,10 @@ type MachineType struct { // be populated with any relevant values. ImageSpaceGb int64 `json:"imageSpaceGb,omitempty"` + // IsSharedCpu: [Output Only] Whether this machine type has a shared + // CPU. See Shared-core machine types for more information. + IsSharedCpu bool `json:"isSharedCpu,omitempty"` + // Kind: [Output Only] The type of the resource. Always // compute#machineType for machine types. Kind string `json:"kind,omitempty"` @@ -5344,8 +5572,9 @@ type NetworkInterface struct { // - global/networks/default Network string `json:"network,omitempty"` - // NetworkIP: [Output Only] An optional IPV4 internal network address - // assigned to the instance for this network interface. + // NetworkIP: An IPv4 internal network address to assign to the instance + // for this network interface. If not specified by the user, an unused + // internal IP is assigned by the system. NetworkIP string `json:"networkIP,omitempty"` // Subnetwork: The URL of the Subnetwork resource for this instance. If @@ -5356,8 +5585,8 @@ type NetworkInterface struct { // the subnetwork as a full or partial URL. For example, the following // are all valid URLs: // - - // https://www.googleapis.com/compute/v1/projects/project/zones/zone/subnetworks/subnetwork - // - zones/zone/subnetworks/subnetwork + // https://www.googleapis.com/compute/v1/projects/project/regions/region/subnetworks/subnetwork + // - regions/region/subnetworks/subnetwork Subnetwork string `json:"subnetwork,omitempty"` // ForceSendFields is a list of field names (e.g. "AccessConfigs") to @@ -5458,7 +5687,7 @@ type Operation struct { InsertTime string `json:"insertTime,omitempty"` // Kind: [Output Only] Type of the resource. Always compute#operation - // for operation resources. + // for Operation resources. Kind string `json:"kind,omitempty"` // Name: [Output Only] Name of the resource. @@ -5929,6 +6158,10 @@ type Project struct { // format. CreationTimestamp string `json:"creationTimestamp,omitempty"` + // DefaultServiceAccount: [Output Only] Default service account used by + // VMs running in this project. + DefaultServiceAccount string `json:"defaultServiceAccount,omitempty"` + // Description: An optional textual description of the resource. Description string `json:"description,omitempty"` @@ -6001,6 +6234,7 @@ type Quota struct { // "IN_USE_ADDRESSES" // "LOCAL_SSD_TOTAL_GB" // "NETWORKS" + // "ROUTERS" // "ROUTES" // "SNAPSHOTS" // "SSD_TOTAL_GB" @@ -6389,6 +6623,454 @@ func (s *RouteList) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields) } +// Router: Router resource. +type Router struct { + Bgp *RouterBgp `json:"bgp,omitempty"` + + BgpPeers []*RouterBgpPeer `json:"bgpPeers,omitempty"` + + // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text + // format. + CreationTimestamp string `json:"creationTimestamp,omitempty"` + + // Description: An optional description of this resource. Provide this + // property when you create the resource. + Description string `json:"description,omitempty"` + + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id uint64 `json:"id,omitempty,string"` + + Interfaces []*RouterInterface `json:"interfaces,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#router for + // routers. + Kind string `json:"kind,omitempty"` + + // Name: Name of the resource. Provided by the client when the resource + // is created. The name must be 1-63 characters long, and comply with + // RFC1035. Specifically, the name must be 1-63 characters long and + // match the regular expression [a-z]([-a-z0-9]*[a-z0-9])? which means + // the first character must be a lowercase letter, and all following + // characters must be a dash, lowercase letter, or digit, except the + // last character, which cannot be a dash. + Name string `json:"name,omitempty"` + + // Network: URI of the network to which this router belongs. + Network string `json:"network,omitempty"` + + // Region: [Output Only] URI of the region where the router resides. + Region string `json:"region,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Bgp") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *Router) MarshalJSON() ([]byte, error) { + type noMethod Router + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// RouterAggregatedList: Contains a list of routers. +type RouterAggregatedList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: A map of scoped router lists. + Items map[string]RoutersScopedList `json:"items,omitempty"` + + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for this resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *RouterAggregatedList) MarshalJSON() ([]byte, error) { + type noMethod RouterAggregatedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type RouterBgp struct { + // Asn: Local BGP Autonomous System Number (ASN). Must be an RFC6996 + // private ASN, either 16-bit or 32-bit. The value will be fixed for + // this router resource. All VPN tunnels that link to this router will + // have the same local ASN. + Asn int64 `json:"asn,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Asn") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *RouterBgp) MarshalJSON() ([]byte, error) { + type noMethod RouterBgp + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// RouterBgpPeer: BGP information that needs to be configured into the +// routing stack to establish the BGP peering. It must specify peer ASN +// and either interface name, IP, or peer IP. Reference: +// https://tools.ietf.org/html/rfc4273 +type RouterBgpPeer struct { + // AdvertisedRoutePriority: The priority of routes advertised to this + // BGP peer. In the case where there is more than one matching route of + // maximum length, the routes with lowest priority value win. + AdvertisedRoutePriority int64 `json:"advertisedRoutePriority,omitempty"` + + // InterfaceName: Name of the interface the BGP peer is associated with. + InterfaceName string `json:"interfaceName,omitempty"` + + // IpAddress: IP address of the interface inside Google Cloud Platform. + IpAddress string `json:"ipAddress,omitempty"` + + // Name: Name of this BGP peer. The name must be 1-63 characters long + // and comply with RFC1035. + Name string `json:"name,omitempty"` + + // PeerAsn: Peer BGP Autonomous System Number (ASN). For VPN use case, + // this value can be different for every tunnel. + PeerAsn int64 `json:"peerAsn,omitempty"` + + // PeerIpAddress: IP address of the BGP interface outside Google cloud. + PeerIpAddress string `json:"peerIpAddress,omitempty"` + + // ForceSendFields is a list of field names (e.g. + // "AdvertisedRoutePriority") to unconditionally include in API + // requests. By default, fields with empty values are omitted from API + // requests. However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *RouterBgpPeer) MarshalJSON() ([]byte, error) { + type noMethod RouterBgpPeer + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// RouterInterface: Router interfaces. Each interface requires either +// one linked resource (e.g. linked_vpn_tunnel) or IP address + range +// (specified in ip_range). +type RouterInterface struct { + // IpRange: IP address and range of the interface. The IP range must be + // in the RFC3927 link-local IP space. The value must be a + // CIDR-formatted string, for example: 169.254.0.1/30. NOTE: Do not + // truncate the address as it represents the IP address of the + // interface. + IpRange string `json:"ipRange,omitempty"` + + // LinkedVpnTunnel: URI of linked VPN tunnel. It must be in the same + // region as the router. Each interface can have at most one linked + // resource. + LinkedVpnTunnel string `json:"linkedVpnTunnel,omitempty"` + + // Name: Name of this interface entry. The name must be 1-63 characters + // long and comply with RFC1035. + Name string `json:"name,omitempty"` + + // ForceSendFields is a list of field names (e.g. "IpRange") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *RouterInterface) MarshalJSON() ([]byte, error) { + type noMethod RouterInterface + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// RouterList: Contains a list of Router resources. +type RouterList struct { + // Id: [Output Only] The unique identifier for the resource. This + // identifier is defined by the server. + Id string `json:"id,omitempty"` + + // Items: A list of Router resources. + Items []*Router `json:"items,omitempty"` + + // Kind: [Output Only] Type of resource. Always compute#router for + // routers. + Kind string `json:"kind,omitempty"` + + // NextPageToken: [Output Only] This token allows you to get the next + // page of results for list requests. If the number of results is larger + // than maxResults, use the nextPageToken as a value for the query + // parameter pageToken in the next list request. Subsequent list + // requests will have their own nextPageToken to continue paging through + // the results. + NextPageToken string `json:"nextPageToken,omitempty"` + + // SelfLink: [Output Only] Server-defined URL for the resource. + SelfLink string `json:"selfLink,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Id") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *RouterList) MarshalJSON() ([]byte, error) { + type noMethod RouterList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type RouterStatus struct { + // BestRoutes: Best routes for this router's network. + BestRoutes []*Route `json:"bestRoutes,omitempty"` + + BgpPeerStatus []*RouterStatusBgpPeerStatus `json:"bgpPeerStatus,omitempty"` + + // Network: URI of the network to which this router belongs. + Network string `json:"network,omitempty"` + + // ForceSendFields is a list of field names (e.g. "BestRoutes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *RouterStatus) MarshalJSON() ([]byte, error) { + type noMethod RouterStatus + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type RouterStatusBgpPeerStatus struct { + // AdvertisedRoutes: Routes that were advertised to the remote BGP peer + AdvertisedRoutes []*Route `json:"advertisedRoutes,omitempty"` + + // IpAddress: IP address of the local BGP interface. + IpAddress string `json:"ipAddress,omitempty"` + + // LinkedVpnTunnel: URL of the VPN tunnel that this BGP peer controls. + LinkedVpnTunnel string `json:"linkedVpnTunnel,omitempty"` + + // Name: Name of this BGP peer. Unique within the Routers resource. + Name string `json:"name,omitempty"` + + // NumLearnedRoutes: Number of routes learned from the remote BGP Peer. + NumLearnedRoutes int64 `json:"numLearnedRoutes,omitempty"` + + // PeerIpAddress: IP address of the remote BGP interface. + PeerIpAddress string `json:"peerIpAddress,omitempty"` + + // State: BGP state as specified in RFC1771. + State string `json:"state,omitempty"` + + // Status: Status of the BGP peer: {UP, DOWN} + // + // Possible values: + // "DOWN" + // "UNKNOWN" + // "UP" + Status string `json:"status,omitempty"` + + // Uptime: Time this session has been up. Format: 14 years, 51 weeks, 6 + // days, 23 hours, 59 minutes, 59 seconds + Uptime string `json:"uptime,omitempty"` + + // UptimeSeconds: Time this session has been up, in seconds. Format: 145 + UptimeSeconds string `json:"uptimeSeconds,omitempty"` + + // ForceSendFields is a list of field names (e.g. "AdvertisedRoutes") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *RouterStatusBgpPeerStatus) MarshalJSON() ([]byte, error) { + type noMethod RouterStatusBgpPeerStatus + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type RouterStatusResponse struct { + // Kind: Type of resource. + Kind string `json:"kind,omitempty"` + + Result *RouterStatus `json:"result,omitempty"` + + // ServerResponse contains the HTTP response code and headers from the + // server. + googleapi.ServerResponse `json:"-"` + + // ForceSendFields is a list of field names (e.g. "Kind") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *RouterStatusResponse) MarshalJSON() ([]byte, error) { + type noMethod RouterStatusResponse + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type RoutersScopedList struct { + // Routers: List of routers contained in this scope. + Routers []*Router `json:"routers,omitempty"` + + // Warning: Informational warning which replaces the list of routers + // when the list is empty. + Warning *RoutersScopedListWarning `json:"warning,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Routers") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *RoutersScopedList) MarshalJSON() ([]byte, error) { + type noMethod RoutersScopedList + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +// RoutersScopedListWarning: Informational warning which replaces the +// list of routers when the list is empty. +type RoutersScopedListWarning struct { + // Code: [Output Only] A warning code, if applicable. For example, + // Compute Engine returns NO_RESULTS_ON_PAGE if there are no results in + // the response. + // + // Possible values: + // "CLEANUP_FAILED" + // "DEPRECATED_RESOURCE_USED" + // "DISK_SIZE_LARGER_THAN_IMAGE_SIZE" + // "INJECTED_KERNELS_DEPRECATED" + // "NEXT_HOP_ADDRESS_NOT_ASSIGNED" + // "NEXT_HOP_CANNOT_IP_FORWARD" + // "NEXT_HOP_INSTANCE_NOT_FOUND" + // "NEXT_HOP_INSTANCE_NOT_ON_NETWORK" + // "NEXT_HOP_NOT_RUNNING" + // "NOT_CRITICAL_ERROR" + // "NO_RESULTS_ON_PAGE" + // "REQUIRED_TOS_AGREEMENT" + // "RESOURCE_NOT_DELETED" + // "SINGLE_INSTANCE_PROPERTY_TEMPLATE" + // "UNREACHABLE" + Code string `json:"code,omitempty"` + + // Data: [Output Only] Metadata about this warning in key: value format. + // For example: + // "data": [ { "key": "scope", "value": "zones/us-east1-d" } + Data []*RoutersScopedListWarningData `json:"data,omitempty"` + + // Message: [Output Only] A human-readable description of the warning + // code. + Message string `json:"message,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Code") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *RoutersScopedListWarning) MarshalJSON() ([]byte, error) { + type noMethod RoutersScopedListWarning + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + +type RoutersScopedListWarningData struct { + // Key: [Output Only] A key that provides more detail on the warning + // being returned. For example, for warnings where there are no results + // in a list request for a particular zone, this key might be scope and + // the key value might be the zone name. Other examples might be a key + // indicating a deprecated resource and a suggested replacement, or a + // warning about invalid network settings (for example, if an instance + // attempts to perform IP forwarding but is not enabled for IP + // forwarding). + Key string `json:"key,omitempty"` + + // Value: [Output Only] A warning data value corresponding to the key. + Value string `json:"value,omitempty"` + + // ForceSendFields is a list of field names (e.g. "Key") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` +} + +func (s *RoutersScopedListWarningData) MarshalJSON() ([]byte, error) { + type noMethod RoutersScopedListWarningData + raw := noMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields) +} + // Scheduling: Sets the scheduling options for an Instance. type Scheduling struct { // AutomaticRestart: Specifies whether the instance should be @@ -6520,10 +7202,31 @@ type Snapshot struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // SnapshotEncryptionKey: Encrypts the snapshot using a + // customer-supplied encryption key. + // + // After you encrypt a snapshot using a customer-supplied key, you must + // provide the same key if you use the image later For example, you must + // provide the encryption key when you create a disk from the encrypted + // snapshot in a future request. + // + // Customer-supplied encryption keys do not protect access to metadata + // of the disk. + // + // If you do not provide an encryption key when creating the snapshot, + // then the snapshot will be encrypted using an automatically generated + // key and you do not need to provide a key to use the snapshot later. + SnapshotEncryptionKey *CustomerEncryptionKey `json:"snapshotEncryptionKey,omitempty"` + // SourceDisk: [Output Only] The source disk used to create this // snapshot. SourceDisk string `json:"sourceDisk,omitempty"` + // SourceDiskEncryptionKey: The customer-supplied encryption key of the + // source disk. Required if the source disk is protected by a + // customer-supplied encryption key. + SourceDiskEncryptionKey *CustomerEncryptionKey `json:"sourceDiskEncryptionKey,omitempty"` + // SourceDiskId: [Output Only] The ID value of the disk used to create // this snapshot. This value may be used to determine whether the // snapshot was taken from the current or a previous instance of a given @@ -6762,7 +7465,7 @@ type Subnetwork struct { // networks that are in the distributed mode can have subnetworks. Network string `json:"network,omitempty"` - // Region: [Output Only] URL of the region where the Subnetwork resides. + // Region: URL of the region where the Subnetwork resides. Region string `json:"region,omitempty"` // SelfLink: [Output Only] Server-defined URL for the resource. @@ -8258,7 +8961,7 @@ type UrlMap struct { SelfLink string `json:"selfLink,omitempty"` // Tests: The list of expected URL mappings. Request to update this - // UrlMap will succeed only all of the test cases pass. + // UrlMap will succeed only if all of the test cases pass. Tests []*UrlMapTest `json:"tests,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -8524,6 +9227,15 @@ type VpnTunnel struct { // Region: [Output Only] URL of the region where the VPN tunnel resides. Region string `json:"region,omitempty"` + // RemoteTrafficSelector: Remote traffic selectors to use when + // establishing the VPN tunnel with peer VPN gateway. The value should + // be a CIDR formatted string, for example: 192.168.0.0/16. The ranges + // should be disjoint. + RemoteTrafficSelector []string `json:"remoteTrafficSelector,omitempty"` + + // Router: URL of router resource to be used for dynamic routing. + Router string `json:"router,omitempty"` + // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` @@ -8781,12 +9493,6 @@ type Zone struct { // zones. Kind string `json:"kind,omitempty"` - // MaintenanceWindows: [Output Only] Any scheduled maintenance windows - // for this zone. When the zone is in a maintenance window, all - // resources which reside in the zone will be unavailable. For more - // information, see Maintenance Windows - MaintenanceWindows []*ZoneMaintenanceWindows `json:"maintenanceWindows,omitempty"` - // Name: [Output Only] Name of the resource. Name string `json:"name,omitempty"` @@ -8823,37 +9529,6 @@ func (s *Zone) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields) } -type ZoneMaintenanceWindows struct { - // BeginTime: [Output Only] Starting time of the maintenance window, in - // RFC3339 format. - BeginTime string `json:"beginTime,omitempty"` - - // Description: [Output Only] Textual description of the maintenance - // window. - Description string `json:"description,omitempty"` - - // EndTime: [Output Only] Ending time of the maintenance window, in - // RFC3339 format. - EndTime string `json:"endTime,omitempty"` - - // Name: [Output Only] Name of the maintenance window. - Name string `json:"name,omitempty"` - - // ForceSendFields is a list of field names (e.g. "BeginTime") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. - ForceSendFields []string `json:"-"` -} - -func (s *ZoneMaintenanceWindows) MarshalJSON() ([]byte, error) { - type noMethod ZoneMaintenanceWindows - raw := noMethod(*s) - return gensupport.MarshalJSON(raw, s.ForceSendFields) -} - // ZoneList: Contains a list of zone resources. type ZoneList struct { // Id: [Output Only] Unique identifier for the resource; defined by the @@ -8932,12 +9607,11 @@ func (r *AddressesService) AggregatedList(project string) *AddressesAggregatedLi // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -8994,22 +9668,21 @@ func (c *AddressesAggregatedListCall) Context(ctx context.Context) *AddressesAgg } func (c *AddressesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/addresses") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.addresses.aggregatedList" call. @@ -9044,7 +9717,8 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -9057,7 +9731,7 @@ func (c *AddressesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Address // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -9155,21 +9829,20 @@ func (c *AddressesDeleteCall) Context(ctx context.Context) *AddressesDeleteCall } func (c *AddressesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, "address": c.address, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.addresses.delete" call. @@ -9204,7 +9877,8 @@ func (c *AddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -9301,24 +9975,23 @@ func (c *AddressesGetCall) Context(ctx context.Context) *AddressesGetCall { } func (c *AddressesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, "address": c.address, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.addresses.get" call. @@ -9353,7 +10026,8 @@ func (c *AddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -9441,26 +10115,24 @@ func (c *AddressesInsertCall) Context(ctx context.Context) *AddressesInsertCall } func (c *AddressesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.addresses.insert" call. @@ -9495,7 +10167,8 @@ func (c *AddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -9577,12 +10250,11 @@ func (r *AddressesService) List(project string, region string) *AddressesListCal // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -9639,23 +10311,22 @@ func (c *AddressesListCall) Context(ctx context.Context) *AddressesListCall { } func (c *AddressesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.addresses.list" call. @@ -9690,7 +10361,8 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -9704,7 +10376,7 @@ func (c *AddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList, erro // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -9806,12 +10478,11 @@ func (r *AutoscalersService) AggregatedList(project string) *AutoscalersAggregat // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -9868,22 +10539,21 @@ func (c *AutoscalersAggregatedListCall) Context(ctx context.Context) *Autoscaler } func (c *AutoscalersAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.autoscalers.aggregatedList" call. @@ -9918,7 +10588,8 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -9931,7 +10602,7 @@ func (c *AutoscalersAggregatedListCall) Do(opts ...googleapi.CallOption) (*Autos // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -10028,21 +10699,20 @@ func (c *AutoscalersDeleteCall) Context(ctx context.Context) *AutoscalersDeleteC } func (c *AutoscalersDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "autoscaler": c.autoscaler, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.autoscalers.delete" call. @@ -10077,7 +10747,8 @@ func (c *AutoscalersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -10174,24 +10845,23 @@ func (c *AutoscalersGetCall) Context(ctx context.Context) *AutoscalersGetCall { } func (c *AutoscalersGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers/{autoscaler}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "autoscaler": c.autoscaler, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.autoscalers.get" call. @@ -10226,7 +10896,8 @@ func (c *AutoscalersGetCall) Do(opts ...googleapi.CallOption) (*Autoscaler, erro HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -10313,26 +10984,24 @@ func (c *AutoscalersInsertCall) Context(ctx context.Context) *AutoscalersInsertC } func (c *AutoscalersInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.autoscalers.insert" call. @@ -10367,7 +11036,8 @@ func (c *AutoscalersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -10448,12 +11118,11 @@ func (r *AutoscalersService) List(project string, zone string) *AutoscalersListC // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -10510,23 +11179,22 @@ func (c *AutoscalersListCall) Context(ctx context.Context) *AutoscalersListCall } func (c *AutoscalersListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.autoscalers.list" call. @@ -10561,7 +11229,8 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -10575,7 +11244,7 @@ func (c *AutoscalersListCall) Do(opts ...googleapi.CallOption) (*AutoscalerList, // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -10681,26 +11350,24 @@ func (c *AutoscalersPatchCall) Context(ctx context.Context) *AutoscalersPatchCal } func (c *AutoscalersPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler2) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.autoscalers.patch" call. @@ -10735,7 +11402,8 @@ func (c *AutoscalersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, err HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -10831,26 +11499,24 @@ func (c *AutoscalersUpdateCall) Context(ctx context.Context) *AutoscalersUpdateC } func (c *AutoscalersUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.autoscaler) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/autoscalers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.autoscalers.update" call. @@ -10885,7 +11551,8 @@ func (c *AutoscalersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, er HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -10970,20 +11637,19 @@ func (c *BackendServicesDeleteCall) Context(ctx context.Context) *BackendService } func (c *BackendServicesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "backendService": c.backendService, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.backendServices.delete" call. @@ -11018,7 +11684,8 @@ func (c *BackendServicesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -11106,23 +11773,22 @@ func (c *BackendServicesGetCall) Context(ctx context.Context) *BackendServicesGe } func (c *BackendServicesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "backendService": c.backendService, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.backendServices.get" call. @@ -11157,7 +11823,8 @@ func (c *BackendServicesGetCall) Do(opts ...googleapi.CallOption) (*BackendServi HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -11237,26 +11904,24 @@ func (c *BackendServicesGetHealthCall) Context(ctx context.Context) *BackendServ } func (c *BackendServicesGetHealthCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/getHealth") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "backendService": c.backendService, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.backendServices.getHealth" call. @@ -11291,7 +11956,8 @@ func (c *BackendServicesGetHealthCall) Do(opts ...googleapi.CallOption) (*Backen HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -11373,25 +12039,23 @@ func (c *BackendServicesInsertCall) Context(ctx context.Context) *BackendService } func (c *BackendServicesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.backendServices.insert" call. @@ -11426,7 +12090,8 @@ func (c *BackendServicesInsertCall) Do(opts ...googleapi.CallOption) (*Operation HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -11498,12 +12163,11 @@ func (r *BackendServicesService) List(project string) *BackendServicesListCall { // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -11560,22 +12224,21 @@ func (c *BackendServicesListCall) Context(ctx context.Context) *BackendServicesL } func (c *BackendServicesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.backendServices.list" call. @@ -11610,7 +12273,8 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -11623,7 +12287,7 @@ func (c *BackendServicesListCall) Do(opts ...googleapi.CallOption) (*BackendServ // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -11724,26 +12388,24 @@ func (c *BackendServicesPatchCall) Context(ctx context.Context) *BackendServices } func (c *BackendServicesPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "backendService": c.backendService, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.backendServices.patch" call. @@ -11778,7 +12440,8 @@ func (c *BackendServicesPatchCall) Do(opts ...googleapi.CallOption) (*Operation, HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -11862,26 +12525,24 @@ func (c *BackendServicesUpdateCall) Context(ctx context.Context) *BackendService } func (c *BackendServicesUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "backendService": c.backendService, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.backendServices.update" call. @@ -11916,7 +12577,8 @@ func (c *BackendServicesUpdateCall) Do(opts ...googleapi.CallOption) (*Operation HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -11995,12 +12657,11 @@ func (r *DiskTypesService) AggregatedList(project string) *DiskTypesAggregatedLi // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -12057,22 +12718,21 @@ func (c *DiskTypesAggregatedListCall) Context(ctx context.Context) *DiskTypesAgg } func (c *DiskTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/diskTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.diskTypes.aggregatedList" call. @@ -12107,7 +12767,8 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -12120,7 +12781,7 @@ func (c *DiskTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskTyp // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -12230,24 +12891,23 @@ func (c *DiskTypesGetCall) Context(ctx context.Context) *DiskTypesGetCall { } func (c *DiskTypesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes/{diskType}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "diskType": c.diskType, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.diskTypes.get" call. @@ -12282,7 +12942,8 @@ func (c *DiskTypesGetCall) Do(opts ...googleapi.CallOption) (*DiskType, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -12370,12 +13031,11 @@ func (r *DiskTypesService) List(project string, zone string) *DiskTypesListCall // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -12432,23 +13092,22 @@ func (c *DiskTypesListCall) Context(ctx context.Context) *DiskTypesListCall { } func (c *DiskTypesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.diskTypes.list" call. @@ -12483,7 +13142,8 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -12497,7 +13157,7 @@ func (c *DiskTypesListCall) Do(opts ...googleapi.CallOption) (*DiskTypeList, err // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -12600,12 +13260,11 @@ func (r *DisksService) AggregatedList(project string) *DisksAggregatedListCall { // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -12662,22 +13321,21 @@ func (c *DisksAggregatedListCall) Context(ctx context.Context) *DisksAggregatedL } func (c *DisksAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/disks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.disks.aggregatedList" call. @@ -12712,7 +13370,8 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -12725,7 +13384,7 @@ func (c *DisksAggregatedListCall) Do(opts ...googleapi.CallOption) (*DiskAggrega // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -12825,27 +13484,25 @@ func (c *DisksCreateSnapshotCall) Context(ctx context.Context) *DisksCreateSnaps } func (c *DisksCreateSnapshotCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/createSnapshot") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "disk": c.disk, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.disks.createSnapshot" call. @@ -12880,7 +13537,8 @@ func (c *DisksCreateSnapshotCall) Do(opts ...googleapi.CallOption) (*Operation, HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -12972,21 +13630,20 @@ func (c *DisksDeleteCall) Context(ctx context.Context) *DisksDeleteCall { } func (c *DisksDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "disk": c.disk, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.disks.delete" call. @@ -13021,7 +13678,8 @@ func (c *DisksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -13119,24 +13777,23 @@ func (c *DisksGetCall) Context(ctx context.Context) *DisksGetCall { } func (c *DisksGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "disk": c.disk, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.disks.get" call. @@ -13171,7 +13828,8 @@ func (c *DisksGetCall) Do(opts ...googleapi.CallOption) (*Disk, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -13233,7 +13891,7 @@ type DisksInsertCall struct { // Insert: Creates a persistent disk in the specified project using the // data in the request. You can create a disk with a sourceImage, a -// sourceSnapshot, or create an empty 200 GB data disk by omitting all +// sourceSnapshot, or create an empty 500 GB data disk by omitting all // properties. You can also create a disk that is larger than the // default size by specifying the sizeGb property. // For details, see https://cloud.google.com/compute/docs/reference/latest/disks/insert @@ -13269,26 +13927,24 @@ func (c *DisksInsertCall) Context(ctx context.Context) *DisksInsertCall { } func (c *DisksInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.disks.insert" call. @@ -13323,12 +13979,13 @@ func (c *DisksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { - // "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk with a sourceImage, a sourceSnapshot, or create an empty 200 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.", + // "description": "Creates a persistent disk in the specified project using the data in the request. You can create a disk with a sourceImage, a sourceSnapshot, or create an empty 500 GB data disk by omitting all properties. You can also create a disk that is larger than the default size by specifying the sizeGb property.", // "httpMethod": "POST", // "id": "compute.disks.insert", // "parameterOrder": [ @@ -13410,12 +14067,11 @@ func (r *DisksService) List(project string, zone string) *DisksListCall { // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -13472,23 +14128,22 @@ func (c *DisksListCall) Context(ctx context.Context) *DisksListCall { } func (c *DisksListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.disks.list" call. @@ -13523,7 +14178,8 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -13537,7 +14193,7 @@ func (c *DisksListCall) Do(opts ...googleapi.CallOption) (*DiskList, error) { // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -13604,6 +14260,150 @@ func (c *DisksListCall) Pages(ctx context.Context, f func(*DiskList) error) erro } } +// method id "compute.disks.resize": + +type DisksResizeCall struct { + s *Service + project string + zone string + disk string + disksresizerequest *DisksResizeRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Resize: Resizes the specified persistent disk. +func (r *DisksService) Resize(project string, zone string, disk string, disksresizerequest *DisksResizeRequest) *DisksResizeCall { + c := &DisksResizeCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.disk = disk + c.disksresizerequest = disksresizerequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *DisksResizeCall) Fields(s ...googleapi.Field) *DisksResizeCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *DisksResizeCall) Context(ctx context.Context) *DisksResizeCall { + c.ctx_ = ctx + return c +} + +func (c *DisksResizeCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.disksresizerequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/resize") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "disk": c.disk, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.disks.resize" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *DisksResizeCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Resizes the specified persistent disk.", + // "httpMethod": "POST", + // "id": "compute.disks.resize", + // "parameterOrder": [ + // "project", + // "zone", + // "disk" + // ], + // "parameters": { + // "disk": { + // "description": "The name of the persistent disk.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/disks/{disk}/resize", + // "request": { + // "$ref": "DisksResizeRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.firewalls.delete": type FirewallsDeleteCall struct { @@ -13640,20 +14440,19 @@ func (c *FirewallsDeleteCall) Context(ctx context.Context) *FirewallsDeleteCall } func (c *FirewallsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "firewall": c.firewall, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.firewalls.delete" call. @@ -13688,7 +14487,8 @@ func (c *FirewallsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -13775,23 +14575,22 @@ func (c *FirewallsGetCall) Context(ctx context.Context) *FirewallsGetCall { } func (c *FirewallsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "firewall": c.firewall, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.firewalls.get" call. @@ -13826,7 +14625,8 @@ func (c *FirewallsGetCall) Do(opts ...googleapi.CallOption) (*Firewall, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -13904,25 +14704,23 @@ func (c *FirewallsInsertCall) Context(ctx context.Context) *FirewallsInsertCall } func (c *FirewallsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.firewalls.insert" call. @@ -13957,7 +14755,8 @@ func (c *FirewallsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -14029,12 +14828,11 @@ func (r *FirewallsService) List(project string) *FirewallsListCall { // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -14091,22 +14889,21 @@ func (c *FirewallsListCall) Context(ctx context.Context) *FirewallsListCall { } func (c *FirewallsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.firewalls.list" call. @@ -14141,7 +14938,8 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -14154,7 +14952,7 @@ func (c *FirewallsListCall) Do(opts ...googleapi.CallOption) (*FirewallList, err // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -14253,26 +15051,24 @@ func (c *FirewallsPatchCall) Context(ctx context.Context) *FirewallsPatchCall { } func (c *FirewallsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "firewall": c.firewall, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.firewalls.patch" call. @@ -14307,7 +15103,8 @@ func (c *FirewallsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -14389,26 +15186,24 @@ func (c *FirewallsUpdateCall) Context(ctx context.Context) *FirewallsUpdateCall } func (c *FirewallsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "firewall": c.firewall, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.firewalls.update" call. @@ -14443,7 +15238,8 @@ func (c *FirewallsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, erro HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -14522,12 +15318,11 @@ func (r *ForwardingRulesService) AggregatedList(project string) *ForwardingRules // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -14584,22 +15379,21 @@ func (c *ForwardingRulesAggregatedListCall) Context(ctx context.Context) *Forwar } func (c *ForwardingRulesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/forwardingRules") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.forwardingRules.aggregatedList" call. @@ -14634,7 +15428,8 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -14647,7 +15442,7 @@ func (c *ForwardingRulesAggregatedListCall) Do(opts ...googleapi.CallOption) (*F // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -14745,21 +15540,20 @@ func (c *ForwardingRulesDeleteCall) Context(ctx context.Context) *ForwardingRule } func (c *ForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, "forwardingRule": c.forwardingRule, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.forwardingRules.delete" call. @@ -14794,7 +15588,8 @@ func (c *ForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -14891,24 +15686,23 @@ func (c *ForwardingRulesGetCall) Context(ctx context.Context) *ForwardingRulesGe } func (c *ForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, "forwardingRule": c.forwardingRule, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.forwardingRules.get" call. @@ -14943,7 +15737,8 @@ func (c *ForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*ForwardingRu HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -15031,26 +15826,24 @@ func (c *ForwardingRulesInsertCall) Context(ctx context.Context) *ForwardingRule } func (c *ForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.forwardingRules.insert" call. @@ -15085,7 +15878,8 @@ func (c *ForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Operation HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -15167,12 +15961,11 @@ func (r *ForwardingRulesService) List(project string, region string) *Forwarding // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -15229,23 +16022,22 @@ func (c *ForwardingRulesListCall) Context(ctx context.Context) *ForwardingRulesL } func (c *ForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.forwardingRules.list" call. @@ -15280,7 +16072,8 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -15294,7 +16087,7 @@ func (c *ForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*ForwardingR // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -15402,27 +16195,25 @@ func (c *ForwardingRulesSetTargetCall) Context(ctx context.Context) *ForwardingR } func (c *ForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, "forwardingRule": c.forwardingRule, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.forwardingRules.setTarget" call. @@ -15457,7 +16248,8 @@ func (c *ForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (*Operat HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -15544,20 +16336,19 @@ func (c *GlobalAddressesDeleteCall) Context(ctx context.Context) *GlobalAddresse } func (c *GlobalAddressesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "address": c.address, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.globalAddresses.delete" call. @@ -15592,7 +16383,8 @@ func (c *GlobalAddressesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -15680,23 +16472,22 @@ func (c *GlobalAddressesGetCall) Context(ctx context.Context) *GlobalAddressesGe } func (c *GlobalAddressesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "address": c.address, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.globalAddresses.get" call. @@ -15731,7 +16522,8 @@ func (c *GlobalAddressesGetCall) Do(opts ...googleapi.CallOption) (*Address, err HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -15809,25 +16601,23 @@ func (c *GlobalAddressesInsertCall) Context(ctx context.Context) *GlobalAddresse } func (c *GlobalAddressesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.address) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.globalAddresses.insert" call. @@ -15862,7 +16652,8 @@ func (c *GlobalAddressesInsertCall) Do(opts ...googleapi.CallOption) (*Operation HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -15933,12 +16724,11 @@ func (r *GlobalAddressesService) List(project string) *GlobalAddressesListCall { // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -15995,22 +16785,21 @@ func (c *GlobalAddressesListCall) Context(ctx context.Context) *GlobalAddressesL } func (c *GlobalAddressesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.globalAddresses.list" call. @@ -16045,7 +16834,8 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -16058,7 +16848,7 @@ func (c *GlobalAddressesListCall) Do(opts ...googleapi.CallOption) (*AddressList // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -16154,20 +16944,19 @@ func (c *GlobalForwardingRulesDeleteCall) Context(ctx context.Context) *GlobalFo } func (c *GlobalForwardingRulesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "forwardingRule": c.forwardingRule, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.globalForwardingRules.delete" call. @@ -16202,7 +16991,8 @@ func (c *GlobalForwardingRulesDeleteCall) Do(opts ...googleapi.CallOption) (*Ope HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -16290,23 +17080,22 @@ func (c *GlobalForwardingRulesGetCall) Context(ctx context.Context) *GlobalForwa } func (c *GlobalForwardingRulesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "forwardingRule": c.forwardingRule, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.globalForwardingRules.get" call. @@ -16341,7 +17130,8 @@ func (c *GlobalForwardingRulesGetCall) Do(opts ...googleapi.CallOption) (*Forwar HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -16419,25 +17209,23 @@ func (c *GlobalForwardingRulesInsertCall) Context(ctx context.Context) *GlobalFo } func (c *GlobalForwardingRulesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.globalForwardingRules.insert" call. @@ -16472,7 +17260,8 @@ func (c *GlobalForwardingRulesInsertCall) Do(opts ...googleapi.CallOption) (*Ope HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -16544,12 +17333,11 @@ func (r *GlobalForwardingRulesService) List(project string) *GlobalForwardingRul // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -16606,22 +17394,21 @@ func (c *GlobalForwardingRulesListCall) Context(ctx context.Context) *GlobalForw } func (c *GlobalForwardingRulesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.globalForwardingRules.list" call. @@ -16656,7 +17443,8 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -16669,7 +17457,7 @@ func (c *GlobalForwardingRulesListCall) Do(opts ...googleapi.CallOption) (*Forwa // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -16768,26 +17556,24 @@ func (c *GlobalForwardingRulesSetTargetCall) Context(ctx context.Context) *Globa } func (c *GlobalForwardingRulesSetTargetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}/setTarget") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "forwardingRule": c.forwardingRule, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.globalForwardingRules.setTarget" call. @@ -16822,7 +17608,8 @@ func (c *GlobalForwardingRulesSetTargetCall) Do(opts ...googleapi.CallOption) (* HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -16901,12 +17688,11 @@ func (r *GlobalOperationsService) AggregatedList(project string) *GlobalOperatio // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -16963,22 +17749,21 @@ func (c *GlobalOperationsAggregatedListCall) Context(ctx context.Context) *Globa } func (c *GlobalOperationsAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/operations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.globalOperations.aggregatedList" call. @@ -17013,7 +17798,8 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -17026,7 +17812,7 @@ func (c *GlobalOperationsAggregatedListCall) Do(opts ...googleapi.CallOption) (* // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -17122,20 +17908,19 @@ func (c *GlobalOperationsDeleteCall) Context(ctx context.Context) *GlobalOperati } func (c *GlobalOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "operation": c.operation, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.globalOperations.delete" call. @@ -17231,23 +18016,22 @@ func (c *GlobalOperationsGetCall) Context(ctx context.Context) *GlobalOperations } func (c *GlobalOperationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "operation": c.operation, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.globalOperations.get" call. @@ -17282,7 +18066,8 @@ func (c *GlobalOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -17360,12 +18145,11 @@ func (r *GlobalOperationsService) List(project string) *GlobalOperationsListCall // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -17422,22 +18206,21 @@ func (c *GlobalOperationsListCall) Context(ctx context.Context) *GlobalOperation } func (c *GlobalOperationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.globalOperations.list" call. @@ -17472,7 +18255,8 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -17485,7 +18269,7 @@ func (c *GlobalOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -17581,20 +18365,19 @@ func (c *HttpHealthChecksDeleteCall) Context(ctx context.Context) *HttpHealthChe } func (c *HttpHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "httpHealthCheck": c.httpHealthCheck, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.httpHealthChecks.delete" call. @@ -17629,7 +18412,8 @@ func (c *HttpHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operatio HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -17717,23 +18501,22 @@ func (c *HttpHealthChecksGetCall) Context(ctx context.Context) *HttpHealthChecks } func (c *HttpHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "httpHealthCheck": c.httpHealthCheck, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.httpHealthChecks.get" call. @@ -17768,7 +18551,8 @@ func (c *HttpHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpHealthC HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -17846,25 +18630,23 @@ func (c *HttpHealthChecksInsertCall) Context(ctx context.Context) *HttpHealthChe } func (c *HttpHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.httpHealthChecks.insert" call. @@ -17899,7 +18681,8 @@ func (c *HttpHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operatio HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -17971,12 +18754,11 @@ func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -18033,22 +18815,21 @@ func (c *HttpHealthChecksListCall) Context(ctx context.Context) *HttpHealthCheck } func (c *HttpHealthChecksListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.httpHealthChecks.list" call. @@ -18083,7 +18864,8 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -18096,7 +18878,7 @@ func (c *HttpHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpHealth // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -18196,26 +18978,24 @@ func (c *HttpHealthChecksPatchCall) Context(ctx context.Context) *HttpHealthChec } func (c *HttpHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "httpHealthCheck": c.httpHealthCheck, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.httpHealthChecks.patch" call. @@ -18250,7 +19030,8 @@ func (c *HttpHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operation HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -18332,26 +19113,24 @@ func (c *HttpHealthChecksUpdateCall) Context(ctx context.Context) *HttpHealthChe } func (c *HttpHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "httpHealthCheck": c.httpHealthCheck, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.httpHealthChecks.update" call. @@ -18386,7 +19165,8 @@ func (c *HttpHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operatio HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -18464,20 +19244,19 @@ func (c *HttpsHealthChecksDeleteCall) Context(ctx context.Context) *HttpsHealthC } func (c *HttpsHealthChecksDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "httpsHealthCheck": c.httpsHealthCheck, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.httpsHealthChecks.delete" call. @@ -18512,7 +19291,8 @@ func (c *HttpsHealthChecksDeleteCall) Do(opts ...googleapi.CallOption) (*Operati HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -18599,23 +19379,22 @@ func (c *HttpsHealthChecksGetCall) Context(ctx context.Context) *HttpsHealthChec } func (c *HttpsHealthChecksGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "httpsHealthCheck": c.httpsHealthCheck, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.httpsHealthChecks.get" call. @@ -18650,7 +19429,8 @@ func (c *HttpsHealthChecksGetCall) Do(opts ...googleapi.CallOption) (*HttpsHealt HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -18727,25 +19507,23 @@ func (c *HttpsHealthChecksInsertCall) Context(ctx context.Context) *HttpsHealthC } func (c *HttpsHealthChecksInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.httpsHealthChecks.insert" call. @@ -18780,7 +19558,8 @@ func (c *HttpsHealthChecksInsertCall) Do(opts ...googleapi.CallOption) (*Operati HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -18851,12 +19630,11 @@ func (r *HttpsHealthChecksService) List(project string) *HttpsHealthChecksListCa // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -18913,22 +19691,21 @@ func (c *HttpsHealthChecksListCall) Context(ctx context.Context) *HttpsHealthChe } func (c *HttpsHealthChecksListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.httpsHealthChecks.list" call. @@ -18963,7 +19740,8 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -18976,7 +19754,7 @@ func (c *HttpsHealthChecksListCall) Do(opts ...googleapi.CallOption) (*HttpsHeal // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -19075,26 +19853,24 @@ func (c *HttpsHealthChecksPatchCall) Context(ctx context.Context) *HttpsHealthCh } func (c *HttpsHealthChecksPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "httpsHealthCheck": c.httpsHealthCheck, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.httpsHealthChecks.patch" call. @@ -19129,7 +19905,8 @@ func (c *HttpsHealthChecksPatchCall) Do(opts ...googleapi.CallOption) (*Operatio HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -19210,26 +19987,24 @@ func (c *HttpsHealthChecksUpdateCall) Context(ctx context.Context) *HttpsHealthC } func (c *HttpsHealthChecksUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.httpshealthcheck) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpsHealthChecks/{httpsHealthCheck}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "httpsHealthCheck": c.httpsHealthCheck, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.httpsHealthChecks.update" call. @@ -19264,7 +20039,8 @@ func (c *HttpsHealthChecksUpdateCall) Do(opts ...googleapi.CallOption) (*Operati HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -19343,20 +20119,19 @@ func (c *ImagesDeleteCall) Context(ctx context.Context) *ImagesDeleteCall { } func (c *ImagesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "image": c.image, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.images.delete" call. @@ -19391,7 +20166,8 @@ func (c *ImagesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -19472,26 +20248,24 @@ func (c *ImagesDeprecateCall) Context(ctx context.Context) *ImagesDeprecateCall } func (c *ImagesDeprecateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.deprecationstatus) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}/deprecate") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "image": c.image, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.images.deprecate" call. @@ -19526,7 +20300,8 @@ func (c *ImagesDeprecateCall) Do(opts ...googleapi.CallOption) (*Operation, erro HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -19617,23 +20392,22 @@ func (c *ImagesGetCall) Context(ctx context.Context) *ImagesGetCall { } func (c *ImagesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "image": c.image, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.images.get" call. @@ -19668,7 +20442,8 @@ func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -19709,6 +20484,145 @@ func (c *ImagesGetCall) Do(opts ...googleapi.CallOption) (*Image, error) { } +// method id "compute.images.getFromFamily": + +type ImagesGetFromFamilyCall struct { + s *Service + project string + family string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// GetFromFamily: Returns the latest image that is part of an image +// family and is not deprecated. +func (r *ImagesService) GetFromFamily(project string, family string) *ImagesGetFromFamilyCall { + c := &ImagesGetFromFamilyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.family = family + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *ImagesGetFromFamilyCall) Fields(s ...googleapi.Field) *ImagesGetFromFamilyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *ImagesGetFromFamilyCall) IfNoneMatch(entityTag string) *ImagesGetFromFamilyCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *ImagesGetFromFamilyCall) Context(ctx context.Context) *ImagesGetFromFamilyCall { + c.ctx_ = ctx + return c +} + +func (c *ImagesGetFromFamilyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/family/{family}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "family": c.family, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.images.getFromFamily" call. +// Exactly one of *Image or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Image.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *ImagesGetFromFamilyCall) Do(opts ...googleapi.CallOption) (*Image, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Image{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the latest image that is part of an image family and is not deprecated.", + // "httpMethod": "GET", + // "id": "compute.images.getFromFamily", + // "parameterOrder": [ + // "project", + // "family" + // ], + // "parameters": { + // "family": { + // "description": "Name of the image resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/images/family/{family}", + // "response": { + // "$ref": "Image" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + // method id "compute.images.insert": type ImagesInsertCall struct { @@ -19746,25 +20660,23 @@ func (c *ImagesInsertCall) Context(ctx context.Context) *ImagesInsertCall { } func (c *ImagesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.image) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.images.insert" call. @@ -19799,7 +20711,8 @@ func (c *ImagesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -19850,12 +20763,10 @@ type ImagesListCall struct { // List: Retrieves the list of private images available to the specified // project. Private images are images you create that belong to your // project. This method does not get any images that belong to other -// projects, including publicly-available images, like Debian 7. If you +// projects, including publicly-available images, like Debian 8. If you // want to get a list of publicly-available images, use this method to // make a request to the respective image project, such as debian-cloud // or windows-cloud. -// -// See Accessing images for more information. // For details, see https://cloud.google.com/compute/docs/reference/latest/images/list func (r *ImagesService) List(project string) *ImagesListCall { c := &ImagesListCall{s: r.s, urlParams_: make(gensupport.URLParams)} @@ -19881,12 +20792,11 @@ func (r *ImagesService) List(project string) *ImagesListCall { // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -19943,22 +20853,21 @@ func (c *ImagesListCall) Context(ctx context.Context) *ImagesListCall { } func (c *ImagesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.images.list" call. @@ -19993,12 +20902,13 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { - // "description": "Retrieves the list of private images available to the specified project. Private images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 7. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.\n\nSee Accessing images for more information.", + // "description": "Retrieves the list of private images available to the specified project. Private images are images you create that belong to your project. This method does not get any images that belong to other projects, including publicly-available images, like Debian 8. If you want to get a list of publicly-available images, use this method to make a request to the respective image project, such as debian-cloud or windows-cloud.", // "httpMethod": "GET", // "id": "compute.images.list", // "parameterOrder": [ @@ -20006,7 +20916,7 @@ func (c *ImagesListCall) Do(opts ...googleapi.CallOption) (*ImageList, error) { // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -20113,27 +21023,25 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Context(ctx context.Context) } func (c *InstanceGroupManagersAbandonInstancesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersabandoninstancesrequest) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/abandonInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "instanceGroupManager": c.instanceGroupManager, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instanceGroupManagers.abandonInstances" call. @@ -20168,7 +21076,8 @@ func (c *InstanceGroupManagersAbandonInstancesCall) Do(opts ...googleapi.CallOpt HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -20253,12 +21162,11 @@ func (r *InstanceGroupManagersService) AggregatedList(project string) *InstanceG // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -20315,22 +21223,21 @@ func (c *InstanceGroupManagersAggregatedListCall) Context(ctx context.Context) * } func (c *InstanceGroupManagersAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instanceGroupManagers.aggregatedList" call. @@ -20366,7 +21273,8 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -20379,7 +21287,7 @@ func (c *InstanceGroupManagersAggregatedListCall) Do(opts ...googleapi.CallOptio // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -20479,21 +21387,20 @@ func (c *InstanceGroupManagersDeleteCall) Context(ctx context.Context) *Instance } func (c *InstanceGroupManagersDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "instanceGroupManager": c.instanceGroupManager, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instanceGroupManagers.delete" call. @@ -20528,7 +21435,8 @@ func (c *InstanceGroupManagersDeleteCall) Do(opts ...googleapi.CallOption) (*Ope HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -20620,27 +21528,25 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Context(ctx context.Context) } func (c *InstanceGroupManagersDeleteInstancesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersdeleteinstancesrequest) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/deleteInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "instanceGroupManager": c.instanceGroupManager, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instanceGroupManagers.deleteInstances" call. @@ -20675,7 +21581,8 @@ func (c *InstanceGroupManagersDeleteInstancesCall) Do(opts ...googleapi.CallOpti HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -20774,24 +21681,23 @@ func (c *InstanceGroupManagersGetCall) Context(ctx context.Context) *InstanceGro } func (c *InstanceGroupManagersGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "instanceGroupManager": c.instanceGroupManager, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instanceGroupManagers.get" call. @@ -20826,7 +21732,8 @@ func (c *InstanceGroupManagersGetCall) Do(opts ...googleapi.CallOption) (*Instan HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -20916,26 +21823,24 @@ func (c *InstanceGroupManagersInsertCall) Context(ctx context.Context) *Instance } func (c *InstanceGroupManagersInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanager) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instanceGroupManagers.insert" call. @@ -20970,7 +21875,8 @@ func (c *InstanceGroupManagersInsertCall) Do(opts ...googleapi.CallOption) (*Ope HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -21050,12 +21956,11 @@ func (r *InstanceGroupManagersService) List(project string, zone string) *Instan // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -21112,23 +22017,22 @@ func (c *InstanceGroupManagersListCall) Context(ctx context.Context) *InstanceGr } func (c *InstanceGroupManagersListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instanceGroupManagers.list" call. @@ -21163,7 +22067,8 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -21177,7 +22082,7 @@ func (c *InstanceGroupManagersListCall) Do(opts ...googleapi.CallOption) (*Insta // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -21285,21 +22190,20 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Context(ctx context.Cont } func (c *InstanceGroupManagersListManagedInstancesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/listManagedInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "instanceGroupManager": c.instanceGroupManager, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instanceGroupManagers.listManagedInstances" call. @@ -21336,7 +22240,8 @@ func (c *InstanceGroupManagersListManagedInstancesCall) Do(opts ...googleapi.Cal HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -21428,27 +22333,25 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Context(ctx context.Context } func (c *InstanceGroupManagersRecreateInstancesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagersrecreateinstancesrequest) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/recreateInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "instanceGroupManager": c.instanceGroupManager, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instanceGroupManagers.recreateInstances" call. @@ -21483,7 +22386,8 @@ func (c *InstanceGroupManagersRecreateInstancesCall) Do(opts ...googleapi.CallOp HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -21576,21 +22480,20 @@ func (c *InstanceGroupManagersResizeCall) Context(ctx context.Context) *Instance } func (c *InstanceGroupManagersResizeCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/resize") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "instanceGroupManager": c.instanceGroupManager, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instanceGroupManagers.resize" call. @@ -21625,7 +22528,8 @@ func (c *InstanceGroupManagersResizeCall) Do(opts ...googleapi.CallOption) (*Ope HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -21720,27 +22624,25 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Context(ctx context.Conte } func (c *InstanceGroupManagersSetInstanceTemplateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssetinstancetemplaterequest) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setInstanceTemplate") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "instanceGroupManager": c.instanceGroupManager, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instanceGroupManagers.setInstanceTemplate" call. @@ -21775,7 +22677,8 @@ func (c *InstanceGroupManagersSetInstanceTemplateCall) Do(opts ...googleapi.Call HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -21869,27 +22772,25 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Context(ctx context.Context) * } func (c *InstanceGroupManagersSetTargetPoolsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupmanagerssettargetpoolsrequest) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroupManagers/{instanceGroupManager}/setTargetPools") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "instanceGroupManager": c.instanceGroupManager, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instanceGroupManagers.setTargetPools" call. @@ -21924,7 +22825,8 @@ func (c *InstanceGroupManagersSetTargetPoolsCall) Do(opts ...googleapi.CallOptio HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -22014,27 +22916,25 @@ func (c *InstanceGroupsAddInstancesCall) Context(ctx context.Context) *InstanceG } func (c *InstanceGroupsAddInstancesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsaddinstancesrequest) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/addInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "instanceGroup": c.instanceGroup, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instanceGroups.addInstances" call. @@ -22069,7 +22969,8 @@ func (c *InstanceGroupsAddInstancesCall) Do(opts ...googleapi.CallOption) (*Oper HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -22154,12 +23055,11 @@ func (r *InstanceGroupsService) AggregatedList(project string) *InstanceGroupsAg // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -22216,22 +23116,21 @@ func (c *InstanceGroupsAggregatedListCall) Context(ctx context.Context) *Instanc } func (c *InstanceGroupsAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instanceGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instanceGroups.aggregatedList" call. @@ -22266,7 +23165,8 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -22279,7 +23179,7 @@ func (c *InstanceGroupsAggregatedListCall) Do(opts ...googleapi.CallOption) (*In // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -22379,21 +23279,20 @@ func (c *InstanceGroupsDeleteCall) Context(ctx context.Context) *InstanceGroupsD } func (c *InstanceGroupsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "instanceGroup": c.instanceGroup, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instanceGroups.delete" call. @@ -22428,7 +23327,8 @@ func (c *InstanceGroupsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -22523,24 +23423,23 @@ func (c *InstanceGroupsGetCall) Context(ctx context.Context) *InstanceGroupsGetC } func (c *InstanceGroupsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "instanceGroup": c.instanceGroup, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instanceGroups.get" call. @@ -22575,7 +23474,8 @@ func (c *InstanceGroupsGetCall) Do(opts ...googleapi.CallOption) (*InstanceGroup HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -22660,26 +23560,24 @@ func (c *InstanceGroupsInsertCall) Context(ctx context.Context) *InstanceGroupsI } func (c *InstanceGroupsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroup) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instanceGroups.insert" call. @@ -22714,7 +23612,8 @@ func (c *InstanceGroupsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -22794,12 +23693,11 @@ func (r *InstanceGroupsService) List(project string, zone string) *InstanceGroup // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -22856,23 +23754,22 @@ func (c *InstanceGroupsListCall) Context(ctx context.Context) *InstanceGroupsLis } func (c *InstanceGroupsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instanceGroups.list" call. @@ -22907,7 +23804,8 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -22921,7 +23819,7 @@ func (c *InstanceGroupsListCall) Do(opts ...googleapi.CallOption) (*InstanceGrou // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -23027,12 +23925,11 @@ func (r *InstanceGroupsService) ListInstances(project string, zone string, insta // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -23079,27 +23976,25 @@ func (c *InstanceGroupsListInstancesCall) Context(ctx context.Context) *Instance } func (c *InstanceGroupsListInstancesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupslistinstancesrequest) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/listInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "instanceGroup": c.instanceGroup, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instanceGroups.listInstances" call. @@ -23134,7 +24029,8 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -23149,7 +24045,7 @@ func (c *InstanceGroupsListInstancesCall) Do(opts ...googleapi.CallOption) (*Ins // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -23243,27 +24139,25 @@ func (c *InstanceGroupsRemoveInstancesCall) Context(ctx context.Context) *Instan } func (c *InstanceGroupsRemoveInstancesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupsremoveinstancesrequest) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/removeInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "instanceGroup": c.instanceGroup, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instanceGroups.removeInstances" call. @@ -23298,7 +24192,8 @@ func (c *InstanceGroupsRemoveInstancesCall) Do(opts ...googleapi.CallOption) (*O HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -23386,27 +24281,25 @@ func (c *InstanceGroupsSetNamedPortsCall) Context(ctx context.Context) *Instance } func (c *InstanceGroupsSetNamedPortsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancegroupssetnamedportsrequest) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instanceGroups/{instanceGroup}/setNamedPorts") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "instanceGroup": c.instanceGroup, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instanceGroups.setNamedPorts" call. @@ -23441,7 +24334,8 @@ func (c *InstanceGroupsSetNamedPortsCall) Do(opts ...googleapi.CallOption) (*Ope HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -23530,20 +24424,19 @@ func (c *InstanceTemplatesDeleteCall) Context(ctx context.Context) *InstanceTemp } func (c *InstanceTemplatesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "instanceTemplate": c.instanceTemplate, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instanceTemplates.delete" call. @@ -23578,7 +24471,8 @@ func (c *InstanceTemplatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -23666,23 +24560,22 @@ func (c *InstanceTemplatesGetCall) Context(ctx context.Context) *InstanceTemplat } func (c *InstanceTemplatesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "instanceTemplate": c.instanceTemplate, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instanceTemplates.get" call. @@ -23717,7 +24610,8 @@ func (c *InstanceTemplatesGetCall) Do(opts ...googleapi.CallOption) (*InstanceTe HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -23798,25 +24692,23 @@ func (c *InstanceTemplatesInsertCall) Context(ctx context.Context) *InstanceTemp } func (c *InstanceTemplatesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancetemplate) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instanceTemplates.insert" call. @@ -23851,7 +24743,8 @@ func (c *InstanceTemplatesInsertCall) Do(opts ...googleapi.CallOption) (*Operati HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -23923,12 +24816,11 @@ func (r *InstanceTemplatesService) List(project string) *InstanceTemplatesListCa // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -23985,22 +24877,21 @@ func (c *InstanceTemplatesListCall) Context(ctx context.Context) *InstanceTempla } func (c *InstanceTemplatesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instanceTemplates.list" call. @@ -24035,7 +24926,8 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -24048,7 +24940,7 @@ func (c *InstanceTemplatesListCall) Do(opts ...googleapi.CallOption) (*InstanceT // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -24150,27 +25042,25 @@ func (c *InstancesAddAccessConfigCall) Context(ctx context.Context) *InstancesAd } func (c *InstancesAddAccessConfigCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/addAccessConfig") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "instance": c.instance, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instances.addAccessConfig" call. @@ -24205,7 +25095,8 @@ func (c *InstancesAddAccessConfigCall) Do(opts ...googleapi.CallOption) (*Operat HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -24299,12 +25190,11 @@ func (r *InstancesService) AggregatedList(project string) *InstancesAggregatedLi // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -24361,22 +25251,21 @@ func (c *InstancesAggregatedListCall) Context(ctx context.Context) *InstancesAgg } func (c *InstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instances.aggregatedList" call. @@ -24411,7 +25300,8 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -24424,7 +25314,7 @@ func (c *InstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Instanc // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -24524,27 +25414,25 @@ func (c *InstancesAttachDiskCall) Context(ctx context.Context) *InstancesAttachD } func (c *InstancesAttachDiskCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.attacheddisk) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/attachDisk") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "instance": c.instance, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instances.attachDisk" call. @@ -24579,7 +25467,8 @@ func (c *InstancesAttachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -24669,21 +25558,20 @@ func (c *InstancesDeleteCall) Context(ctx context.Context) *InstancesDeleteCall } func (c *InstancesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "instance": c.instance, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instances.delete" call. @@ -24718,7 +25606,8 @@ func (c *InstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -24807,21 +25696,20 @@ func (c *InstancesDeleteAccessConfigCall) Context(ctx context.Context) *Instance } func (c *InstancesDeleteAccessConfigCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "instance": c.instance, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instances.deleteAccessConfig" call. @@ -24856,7 +25744,8 @@ func (c *InstancesDeleteAccessConfigCall) Do(opts ...googleapi.CallOption) (*Ope HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -24957,21 +25846,20 @@ func (c *InstancesDetachDiskCall) Context(ctx context.Context) *InstancesDetachD } func (c *InstancesDetachDiskCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/detachDisk") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "instance": c.instance, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instances.detachDisk" call. @@ -25006,7 +25894,8 @@ func (c *InstancesDetachDiskCall) Do(opts ...googleapi.CallOption) (*Operation, HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -25112,24 +26001,23 @@ func (c *InstancesGetCall) Context(ctx context.Context) *InstancesGetCall { } func (c *InstancesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "instance": c.instance, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instances.get" call. @@ -25164,7 +26052,8 @@ func (c *InstancesGetCall) Do(opts ...googleapi.CallOption) (*Instance, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -25270,24 +26159,23 @@ func (c *InstancesGetSerialPortOutputCall) Context(ctx context.Context) *Instanc } func (c *InstancesGetSerialPortOutputCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/serialPort") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "instance": c.instance, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instances.getSerialPortOutput" call. @@ -25322,7 +26210,8 @@ func (c *InstancesGetSerialPortOutputCall) Do(opts ...googleapi.CallOption) (*Se HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -25419,26 +26308,24 @@ func (c *InstancesInsertCall) Context(ctx context.Context) *InstancesInsertCall } func (c *InstancesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.instance) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instances.insert" call. @@ -25473,7 +26360,8 @@ func (c *InstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, erro HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -25555,12 +26443,11 @@ func (r *InstancesService) List(project string, zone string) *InstancesListCall // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -25617,23 +26504,22 @@ func (c *InstancesListCall) Context(ctx context.Context) *InstancesListCall { } func (c *InstancesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instances.list" call. @@ -25668,7 +26554,8 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -25682,7 +26569,7 @@ func (c *InstancesListCall) Do(opts ...googleapi.CallOption) (*InstanceList, err // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -25787,21 +26674,20 @@ func (c *InstancesResetCall) Context(ctx context.Context) *InstancesResetCall { } func (c *InstancesResetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/reset") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "instance": c.instance, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instances.reset" call. @@ -25836,7 +26722,8 @@ func (c *InstancesResetCall) Do(opts ...googleapi.CallOption) (*Operation, error HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -25925,21 +26812,20 @@ func (c *InstancesSetDiskAutoDeleteCall) Context(ctx context.Context) *Instances } func (c *InstancesSetDiskAutoDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "instance": c.instance, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instances.setDiskAutoDelete" call. @@ -25974,7 +26860,8 @@ func (c *InstancesSetDiskAutoDeleteCall) Do(opts ...googleapi.CallOption) (*Oper HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -26077,27 +26964,25 @@ func (c *InstancesSetMachineTypeCall) Context(ctx context.Context) *InstancesSet } func (c *InstancesSetMachineTypeCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancessetmachinetyperequest) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMachineType") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "instance": c.instance, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instances.setMachineType" call. @@ -26132,7 +27017,8 @@ func (c *InstancesSetMachineTypeCall) Do(opts ...googleapi.CallOption) (*Operati HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -26224,27 +27110,25 @@ func (c *InstancesSetMetadataCall) Context(ctx context.Context) *InstancesSetMet } func (c *InstancesSetMetadataCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMetadata") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "instance": c.instance, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instances.setMetadata" call. @@ -26279,7 +27163,8 @@ func (c *InstancesSetMetadataCall) Do(opts ...googleapi.CallOption) (*Operation, HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -26370,27 +27255,25 @@ func (c *InstancesSetSchedulingCall) Context(ctx context.Context) *InstancesSetS } func (c *InstancesSetSchedulingCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.scheduling) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setScheduling") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "instance": c.instance, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instances.setScheduling" call. @@ -26425,7 +27308,8 @@ func (c *InstancesSetSchedulingCall) Do(opts ...googleapi.CallOption) (*Operatio HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -26517,27 +27401,25 @@ func (c *InstancesSetTagsCall) Context(ctx context.Context) *InstancesSetTagsCal } func (c *InstancesSetTagsCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.tags) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setTags") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "instance": c.instance, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instances.setTags" call. @@ -26572,7 +27454,8 @@ func (c *InstancesSetTagsCall) Do(opts ...googleapi.CallOption) (*Operation, err HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -26663,21 +27546,20 @@ func (c *InstancesStartCall) Context(ctx context.Context) *InstancesStartCall { } func (c *InstancesStartCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/start") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "instance": c.instance, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instances.start" call. @@ -26712,7 +27594,8 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -26760,6 +27643,152 @@ func (c *InstancesStartCall) Do(opts ...googleapi.CallOption) (*Operation, error } +// method id "compute.instances.startWithEncryptionKey": + +type InstancesStartWithEncryptionKeyCall struct { + s *Service + project string + zone string + instance string + instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// StartWithEncryptionKey: Starts an instance that was stopped using the +// using the instances().stop method. For more information, see Restart +// an instance. +func (r *InstancesService) StartWithEncryptionKey(project string, zone string, instance string, instancesstartwithencryptionkeyrequest *InstancesStartWithEncryptionKeyRequest) *InstancesStartWithEncryptionKeyCall { + c := &InstancesStartWithEncryptionKeyCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.zone = zone + c.instance = instance + c.instancesstartwithencryptionkeyrequest = instancesstartwithencryptionkeyrequest + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *InstancesStartWithEncryptionKeyCall) Fields(s ...googleapi.Field) *InstancesStartWithEncryptionKeyCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *InstancesStartWithEncryptionKeyCall) Context(ctx context.Context) *InstancesStartWithEncryptionKeyCall { + c.ctx_ = ctx + return c +} + +func (c *InstancesStartWithEncryptionKeyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancesstartwithencryptionkeyrequest) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "zone": c.zone, + "instance": c.instance, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.instances.startWithEncryptionKey" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *InstancesStartWithEncryptionKeyCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Starts an instance that was stopped using the using the instances().stop method. For more information, see Restart an instance.", + // "httpMethod": "POST", + // "id": "compute.instances.startWithEncryptionKey", + // "parameterOrder": [ + // "project", + // "zone", + // "instance" + // ], + // "parameters": { + // "instance": { + // "description": "Name of the instance resource to start.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "zone": { + // "description": "The name of the zone for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/zones/{zone}/instances/{instance}/startWithEncryptionKey", + // "request": { + // "$ref": "InstancesStartWithEncryptionKeyRequest" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.instances.stop": type InstancesStopCall struct { @@ -26804,21 +27833,20 @@ func (c *InstancesStopCall) Context(ctx context.Context) *InstancesStopCall { } func (c *InstancesStopCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/stop") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "instance": c.instance, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.instances.stop" call. @@ -26853,7 +27881,8 @@ func (c *InstancesStopCall) Do(opts ...googleapi.CallOption) (*Operation, error) HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -26949,23 +27978,22 @@ func (c *LicensesGetCall) Context(ctx context.Context) *LicensesGetCall { } func (c *LicensesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "license": c.license, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.licenses.get" call. @@ -27000,7 +28028,8 @@ func (c *LicensesGetCall) Do(opts ...googleapi.CallOption) (*License, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -27077,12 +28106,11 @@ func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggreg // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -27139,22 +28167,21 @@ func (c *MachineTypesAggregatedListCall) Context(ctx context.Context) *MachineTy } func (c *MachineTypesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/machineTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.machineTypes.aggregatedList" call. @@ -27189,7 +28216,8 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -27202,7 +28230,7 @@ func (c *MachineTypesAggregatedListCall) Do(opts ...googleapi.CallOption) (*Mach // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -27312,24 +28340,23 @@ func (c *MachineTypesGetCall) Context(ctx context.Context) *MachineTypesGetCall } func (c *MachineTypesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes/{machineType}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "machineType": c.machineType, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.machineTypes.get" call. @@ -27364,7 +28391,8 @@ func (c *MachineTypesGetCall) Do(opts ...googleapi.CallOption) (*MachineType, er HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -27452,12 +28480,11 @@ func (r *MachineTypesService) List(project string, zone string) *MachineTypesLis // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -27514,23 +28541,22 @@ func (c *MachineTypesListCall) Context(ctx context.Context) *MachineTypesListCal } func (c *MachineTypesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.machineTypes.list" call. @@ -27565,7 +28591,8 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -27579,7 +28606,7 @@ func (c *MachineTypesListCall) Do(opts ...googleapi.CallOption) (*MachineTypeLis // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -27682,20 +28709,19 @@ func (c *NetworksDeleteCall) Context(ctx context.Context) *NetworksDeleteCall { } func (c *NetworksDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "network": c.network, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.networks.delete" call. @@ -27730,7 +28756,8 @@ func (c *NetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -27818,23 +28845,22 @@ func (c *NetworksGetCall) Context(ctx context.Context) *NetworksGetCall { } func (c *NetworksGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "network": c.network, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.networks.get" call. @@ -27869,7 +28895,8 @@ func (c *NetworksGetCall) Do(opts ...googleapi.CallOption) (*Network, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -27947,25 +28974,23 @@ func (c *NetworksInsertCall) Context(ctx context.Context) *NetworksInsertCall { } func (c *NetworksInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.network) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.networks.insert" call. @@ -28000,7 +29025,8 @@ func (c *NetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -28072,12 +29098,11 @@ func (r *NetworksService) List(project string) *NetworksListCall { // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -28134,22 +29159,21 @@ func (c *NetworksListCall) Context(ctx context.Context) *NetworksListCall { } func (c *NetworksListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.networks.list" call. @@ -28184,7 +29208,8 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -28197,7 +29222,7 @@ func (c *NetworksListCall) Do(opts ...googleapi.CallOption) (*NetworkList, error // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -28302,22 +29327,21 @@ func (c *ProjectsGetCall) Context(ctx context.Context) *ProjectsGetCall { } func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.projects.get" call. @@ -28352,7 +29376,8 @@ func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -28420,25 +29445,23 @@ func (c *ProjectsMoveDiskCall) Context(ctx context.Context) *ProjectsMoveDiskCal } func (c *ProjectsMoveDiskCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.diskmoverequest) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveDisk") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.projects.moveDisk" call. @@ -28473,7 +29496,8 @@ func (c *ProjectsMoveDiskCall) Do(opts ...googleapi.CallOption) (*Operation, err HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -28544,25 +29568,23 @@ func (c *ProjectsMoveInstanceCall) Context(ctx context.Context) *ProjectsMoveIns } func (c *ProjectsMoveInstanceCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancemoverequest) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/moveInstance") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.projects.moveInstance" call. @@ -28597,7 +29619,8 @@ func (c *ProjectsMoveInstanceCall) Do(opts ...googleapi.CallOption) (*Operation, HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -28669,25 +29692,23 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Context(ctx context.Context) *Pr } func (c *ProjectsSetCommonInstanceMetadataCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setCommonInstanceMetadata") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.projects.setCommonInstanceMetadata" call. @@ -28722,7 +29743,8 @@ func (c *ProjectsSetCommonInstanceMetadataCall) Do(opts ...googleapi.CallOption) HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -28796,25 +29818,23 @@ func (c *ProjectsSetUsageExportBucketCall) Context(ctx context.Context) *Project } func (c *ProjectsSetUsageExportBucketCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.usageexportlocation) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setUsageExportBucket") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.projects.setUsageExportBucket" call. @@ -28849,7 +29869,8 @@ func (c *ProjectsSetUsageExportBucketCall) Do(opts ...googleapi.CallOption) (*Op HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -28925,21 +29946,20 @@ func (c *RegionOperationsDeleteCall) Context(ctx context.Context) *RegionOperati } func (c *RegionOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, "operation": c.operation, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.regionOperations.delete" call. @@ -29044,24 +30064,23 @@ func (c *RegionOperationsGetCall) Context(ctx context.Context) *RegionOperations } func (c *RegionOperationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, "operation": c.operation, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.regionOperations.get" call. @@ -29096,7 +30115,8 @@ func (c *RegionOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -29184,12 +30204,11 @@ func (r *RegionOperationsService) List(project string, region string) *RegionOpe // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -29246,23 +30265,22 @@ func (c *RegionOperationsListCall) Context(ctx context.Context) *RegionOperation } func (c *RegionOperationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.regionOperations.list" call. @@ -29297,7 +30315,8 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -29311,7 +30330,7 @@ func (c *RegionOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationL // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -29426,23 +30445,22 @@ func (c *RegionsGetCall) Context(ctx context.Context) *RegionsGetCall { } func (c *RegionsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.regions.get" call. @@ -29477,7 +30495,8 @@ func (c *RegionsGetCall) Do(opts ...googleapi.CallOption) (*Region, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -29555,12 +30574,11 @@ func (r *RegionsService) List(project string) *RegionsListCall { // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -29617,22 +30635,21 @@ func (c *RegionsListCall) Context(ctx context.Context) *RegionsListCall { } func (c *RegionsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.regions.list" call. @@ -29667,7 +30684,8 @@ func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -29680,7 +30698,7 @@ func (c *RegionsListCall) Do(opts ...googleapi.CallOption) (*RegionList, error) // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -29740,6 +30758,1313 @@ func (c *RegionsListCall) Pages(ctx context.Context, f func(*RegionList) error) } } +// method id "compute.routers.aggregatedList": + +type RoutersAggregatedListCall struct { + s *Service + project string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// AggregatedList: Retrieves an aggregated list of routers. +func (r *RoutersService) AggregatedList(project string) *RoutersAggregatedListCall { + c := &RoutersAggregatedListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *RoutersAggregatedListCall) Filter(filter string) *RoutersAggregatedListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *RoutersAggregatedListCall) MaxResults(maxResults int64) *RoutersAggregatedListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RoutersAggregatedListCall) PageToken(pageToken string) *RoutersAggregatedListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RoutersAggregatedListCall) Fields(s ...googleapi.Field) *RoutersAggregatedListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RoutersAggregatedListCall) IfNoneMatch(entityTag string) *RoutersAggregatedListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RoutersAggregatedListCall) Context(ctx context.Context) *RoutersAggregatedListCall { + c.ctx_ = ctx + return c +} + +func (c *RoutersAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/routers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.routers.aggregatedList" call. +// Exactly one of *RouterAggregatedList or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *RouterAggregatedList.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RoutersAggregatedListCall) Do(opts ...googleapi.CallOption) (*RouterAggregatedList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &RouterAggregatedList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves an aggregated list of routers.", + // "httpMethod": "GET", + // "id": "compute.routers.aggregatedList", + // "parameterOrder": [ + // "project" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/aggregated/routers", + // "response": { + // "$ref": "RouterAggregatedList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RoutersAggregatedListCall) Pages(ctx context.Context, f func(*RouterAggregatedList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.routers.delete": + +type RoutersDeleteCall struct { + s *Service + project string + region string + router string + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Delete: Deletes the specified Router resource. +func (r *RoutersService) Delete(project string, region string, router string) *RoutersDeleteCall { + c := &RoutersDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.router = router + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RoutersDeleteCall) Fields(s ...googleapi.Field) *RoutersDeleteCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RoutersDeleteCall) Context(ctx context.Context) *RoutersDeleteCall { + c.ctx_ = ctx + return c +} + +func (c *RoutersDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "router": c.router, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.routers.delete" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RoutersDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Deletes the specified Router resource.", + // "httpMethod": "DELETE", + // "id": "compute.routers.delete", + // "parameterOrder": [ + // "project", + // "region", + // "router" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "router": { + // "description": "Name of the Router resource to delete.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/routers/{router}", + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.routers.get": + +type RoutersGetCall struct { + s *Service + project string + region string + router string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// Get: Returns the specified Router resource. Get a list of available +// routers by making a list() request. +func (r *RoutersService) Get(project string, region string, router string) *RoutersGetCall { + c := &RoutersGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.router = router + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RoutersGetCall) Fields(s ...googleapi.Field) *RoutersGetCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RoutersGetCall) IfNoneMatch(entityTag string) *RoutersGetCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RoutersGetCall) Context(ctx context.Context) *RoutersGetCall { + c.ctx_ = ctx + return c +} + +func (c *RoutersGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "router": c.router, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.routers.get" call. +// Exactly one of *Router or error will be non-nil. Any non-2xx status +// code is an error. Response headers are in either +// *Router.ServerResponse.Header or (if a response was returned at all) +// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to +// check whether the returned error was because http.StatusNotModified +// was returned. +func (c *RoutersGetCall) Do(opts ...googleapi.CallOption) (*Router, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Router{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Returns the specified Router resource. Get a list of available routers by making a list() request.", + // "httpMethod": "GET", + // "id": "compute.routers.get", + // "parameterOrder": [ + // "project", + // "region", + // "router" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "router": { + // "description": "Name of the Router resource to return.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/routers/{router}", + // "response": { + // "$ref": "Router" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.routers.getRouterStatus": + +type RoutersGetRouterStatusCall struct { + s *Service + project string + region string + router string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// GetRouterStatus: Retrieves runtime information of the specified +// router. +func (r *RoutersService) GetRouterStatus(project string, region string, router string) *RoutersGetRouterStatusCall { + c := &RoutersGetRouterStatusCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.router = router + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RoutersGetRouterStatusCall) Fields(s ...googleapi.Field) *RoutersGetRouterStatusCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RoutersGetRouterStatusCall) IfNoneMatch(entityTag string) *RoutersGetRouterStatusCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RoutersGetRouterStatusCall) Context(ctx context.Context) *RoutersGetRouterStatusCall { + c.ctx_ = ctx + return c +} + +func (c *RoutersGetRouterStatusCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}/getRouterStatus") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "router": c.router, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.routers.getRouterStatus" call. +// Exactly one of *RouterStatusResponse or error will be non-nil. Any +// non-2xx status code is an error. Response headers are in either +// *RouterStatusResponse.ServerResponse.Header or (if a response was +// returned at all) in error.(*googleapi.Error).Header. Use +// googleapi.IsNotModified to check whether the returned error was +// because http.StatusNotModified was returned. +func (c *RoutersGetRouterStatusCall) Do(opts ...googleapi.CallOption) (*RouterStatusResponse, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &RouterStatusResponse{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves runtime information of the specified router.", + // "httpMethod": "GET", + // "id": "compute.routers.getRouterStatus", + // "parameterOrder": [ + // "project", + // "region", + // "router" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "router": { + // "description": "Name of the Router resource to query.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/routers/{router}/getRouterStatus", + // "response": { + // "$ref": "RouterStatusResponse" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// method id "compute.routers.insert": + +type RoutersInsertCall struct { + s *Service + project string + region string + router *Router + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Insert: Creates a Router resource in the specified project and region +// using the data included in the request. +func (r *RoutersService) Insert(project string, region string, router *Router) *RoutersInsertCall { + c := &RoutersInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.router = router + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RoutersInsertCall) Fields(s ...googleapi.Field) *RoutersInsertCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RoutersInsertCall) Context(ctx context.Context) *RoutersInsertCall { + c.ctx_ = ctx + return c +} + +func (c *RoutersInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.router) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.routers.insert" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RoutersInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Creates a Router resource in the specified project and region using the data included in the request.", + // "httpMethod": "POST", + // "id": "compute.routers.insert", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/routers", + // "request": { + // "$ref": "Router" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.routers.list": + +type RoutersListCall struct { + s *Service + project string + region string + urlParams_ gensupport.URLParams + ifNoneMatch_ string + ctx_ context.Context +} + +// List: Retrieves a list of Router resources available to the specified +// project. +func (r *RoutersService) List(project string, region string) *RoutersListCall { + c := &RoutersListCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + return c +} + +// Filter sets the optional parameter "filter": Sets a filter expression +// for filtering listed resources, in the form filter={expression}. Your +// {expression} must be in the format: field_name comparison_string +// literal_string. +// +// The field_name is the name of the field you want to compare. Only +// atomic field types are supported (string, number, boolean). The +// comparison_string must be either eq (equals) or ne (not equals). The +// literal_string is the string value to filter to. The literal value +// must be valid for the type of field you are filtering by (string, +// number, boolean). For string fields, the literal value is interpreted +// as a regular expression using RE2 syntax. The literal value must +// match the entire field. +// +// For example, to filter for instances that do not have a name of +// example-instance, you would use filter=name ne +// example-instance. +// +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. +// +// The Beta API also supports filtering on multiple expressions by +// providing each separate expression within parentheses. For example, +// (scheduling.automaticRestart eq true) (zone eq us-central1-f). +// Multiple expressions are treated as AND expressions, meaning that +// resources must match all expressions to pass the filters. +func (c *RoutersListCall) Filter(filter string) *RoutersListCall { + c.urlParams_.Set("filter", filter) + return c +} + +// MaxResults sets the optional parameter "maxResults": The maximum +// number of results per page that should be returned. If the number of +// available results is larger than maxResults, Compute Engine returns a +// nextPageToken that can be used to get the next page of results in +// subsequent list requests. +func (c *RoutersListCall) MaxResults(maxResults int64) *RoutersListCall { + c.urlParams_.Set("maxResults", fmt.Sprint(maxResults)) + return c +} + +// PageToken sets the optional parameter "pageToken": Specifies a page +// token to use. Set pageToken to the nextPageToken returned by a +// previous list request to get the next page of results. +func (c *RoutersListCall) PageToken(pageToken string) *RoutersListCall { + c.urlParams_.Set("pageToken", pageToken) + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RoutersListCall) Fields(s ...googleapi.Field) *RoutersListCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// IfNoneMatch sets the optional parameter which makes the operation +// fail if the object's ETag matches the given value. This is useful for +// getting updates only after the object has changed since the last +// request. Use googleapi.IsNotModified to check whether the response +// error from Do is the result of In-None-Match. +func (c *RoutersListCall) IfNoneMatch(entityTag string) *RoutersListCall { + c.ifNoneMatch_ = entityTag + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RoutersListCall) Context(ctx context.Context) *RoutersListCall { + c.ctx_ = ctx + return c +} + +func (c *RoutersListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } + var body io.Reader = nil + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.routers.list" call. +// Exactly one of *RouterList or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *RouterList.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RoutersListCall) Do(opts ...googleapi.CallOption) (*RouterList, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &RouterList{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Retrieves a list of Router resources available to the specified project.", + // "httpMethod": "GET", + // "id": "compute.routers.list", + // "parameterOrder": [ + // "project", + // "region" + // ], + // "parameters": { + // "filter": { + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "location": "query", + // "type": "string" + // }, + // "maxResults": { + // "default": "500", + // "description": "The maximum number of results per page that should be returned. If the number of available results is larger than maxResults, Compute Engine returns a nextPageToken that can be used to get the next page of results in subsequent list requests.", + // "format": "uint32", + // "location": "query", + // "maximum": "500", + // "minimum": "0", + // "type": "integer" + // }, + // "pageToken": { + // "description": "Specifies a page token to use. Set pageToken to the nextPageToken returned by a previous list request to get the next page of results.", + // "location": "query", + // "type": "string" + // }, + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/routers", + // "response": { + // "$ref": "RouterList" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute", + // "https://www.googleapis.com/auth/compute.readonly" + // ] + // } + +} + +// Pages invokes f for each page of results. +// A non-nil error returned from f will halt the iteration. +// The provided context supersedes any context provided to the Context method. +func (c *RoutersListCall) Pages(ctx context.Context, f func(*RouterList) error) error { + c.ctx_ = ctx + defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point + for { + x, err := c.Do() + if err != nil { + return err + } + if err := f(x); err != nil { + return err + } + if x.NextPageToken == "" { + return nil + } + c.PageToken(x.NextPageToken) + } +} + +// method id "compute.routers.patch": + +type RoutersPatchCall struct { + s *Service + project string + region string + router string + router2 *Router + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Patch: Updates the entire content of the Router resource. This method +// supports patch semantics. +func (r *RoutersService) Patch(project string, region string, router string, router2 *Router) *RoutersPatchCall { + c := &RoutersPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.router = router + c.router2 = router2 + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RoutersPatchCall) Fields(s ...googleapi.Field) *RoutersPatchCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RoutersPatchCall) Context(ctx context.Context) *RoutersPatchCall { + c.ctx_ = ctx + return c +} + +func (c *RoutersPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.router2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "router": c.router, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.routers.patch" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RoutersPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the entire content of the Router resource. This method supports patch semantics.", + // "httpMethod": "PATCH", + // "id": "compute.routers.patch", + // "parameterOrder": [ + // "project", + // "region", + // "router" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "router": { + // "description": "Name of the Router resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/routers/{router}", + // "request": { + // "$ref": "Router" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + +// method id "compute.routers.update": + +type RoutersUpdateCall struct { + s *Service + project string + region string + router string + router2 *Router + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// Update: Updates the entire content of the Router resource. +func (r *RoutersService) Update(project string, region string, router string, router2 *Router) *RoutersUpdateCall { + c := &RoutersUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.region = region + c.router = router + c.router2 = router2 + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *RoutersUpdateCall) Fields(s ...googleapi.Field) *RoutersUpdateCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *RoutersUpdateCall) Context(ctx context.Context) *RoutersUpdateCall { + c.ctx_ = ctx + return c +} + +func (c *RoutersUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.router2) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/routers/{router}") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "region": c.region, + "router": c.router, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.routers.update" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *RoutersUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Updates the entire content of the Router resource.", + // "httpMethod": "PUT", + // "id": "compute.routers.update", + // "parameterOrder": [ + // "project", + // "region", + // "router" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "region": { + // "description": "Name of the region for this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // }, + // "router": { + // "description": "Name of the Router resource to update.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/regions/{region}/routers/{router}", + // "request": { + // "$ref": "Router" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.routes.delete": type RoutesDeleteCall struct { @@ -29776,20 +32101,19 @@ func (c *RoutesDeleteCall) Context(ctx context.Context) *RoutesDeleteCall { } func (c *RoutesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "route": c.route, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.routes.delete" call. @@ -29824,7 +32148,8 @@ func (c *RoutesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -29912,23 +32237,22 @@ func (c *RoutesGetCall) Context(ctx context.Context) *RoutesGetCall { } func (c *RoutesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "route": c.route, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.routes.get" call. @@ -29963,7 +32287,8 @@ func (c *RoutesGetCall) Do(opts ...googleapi.CallOption) (*Route, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -30041,25 +32366,23 @@ func (c *RoutesInsertCall) Context(ctx context.Context) *RoutesInsertCall { } func (c *RoutesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.route) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.routes.insert" call. @@ -30094,7 +32417,8 @@ func (c *RoutesInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -30166,12 +32490,11 @@ func (r *RoutesService) List(project string) *RoutesListCall { // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -30228,22 +32551,21 @@ func (c *RoutesListCall) Context(ctx context.Context) *RoutesListCall { } func (c *RoutesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.routes.list" call. @@ -30278,7 +32600,8 @@ func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -30291,7 +32614,7 @@ func (c *RoutesListCall) Do(opts ...googleapi.CallOption) (*RouteList, error) { // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -30393,20 +32716,19 @@ func (c *SnapshotsDeleteCall) Context(ctx context.Context) *SnapshotsDeleteCall } func (c *SnapshotsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{snapshot}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "snapshot": c.snapshot, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.snapshots.delete" call. @@ -30441,7 +32763,8 @@ func (c *SnapshotsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, erro HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -30529,23 +32852,22 @@ func (c *SnapshotsGetCall) Context(ctx context.Context) *SnapshotsGetCall { } func (c *SnapshotsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{snapshot}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "snapshot": c.snapshot, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.snapshots.get" call. @@ -30580,7 +32902,8 @@ func (c *SnapshotsGetCall) Do(opts ...googleapi.CallOption) (*Snapshot, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -30658,12 +32981,11 @@ func (r *SnapshotsService) List(project string) *SnapshotsListCall { // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -30720,22 +33042,21 @@ func (c *SnapshotsListCall) Context(ctx context.Context) *SnapshotsListCall { } func (c *SnapshotsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.snapshots.list" call. @@ -30770,7 +33091,8 @@ func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, err HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -30783,7 +33105,7 @@ func (c *SnapshotsListCall) Do(opts ...googleapi.CallOption) (*SnapshotList, err // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -30878,20 +33200,19 @@ func (c *SslCertificatesDeleteCall) Context(ctx context.Context) *SslCertificate } func (c *SslCertificatesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{sslCertificate}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "sslCertificate": c.sslCertificate, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.sslCertificates.delete" call. @@ -30926,7 +33247,8 @@ func (c *SslCertificatesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -31013,23 +33335,22 @@ func (c *SslCertificatesGetCall) Context(ctx context.Context) *SslCertificatesGe } func (c *SslCertificatesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates/{sslCertificate}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "sslCertificate": c.sslCertificate, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.sslCertificates.get" call. @@ -31064,7 +33385,8 @@ func (c *SslCertificatesGetCall) Do(opts ...googleapi.CallOption) (*SslCertifica HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -31141,25 +33463,23 @@ func (c *SslCertificatesInsertCall) Context(ctx context.Context) *SslCertificate } func (c *SslCertificatesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.sslcertificate) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.sslCertificates.insert" call. @@ -31194,7 +33514,8 @@ func (c *SslCertificatesInsertCall) Do(opts ...googleapi.CallOption) (*Operation HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -31265,12 +33586,11 @@ func (r *SslCertificatesService) List(project string) *SslCertificatesListCall { // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -31327,22 +33647,21 @@ func (c *SslCertificatesListCall) Context(ctx context.Context) *SslCertificatesL } func (c *SslCertificatesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/sslCertificates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.sslCertificates.list" call. @@ -31377,7 +33696,8 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -31390,7 +33710,7 @@ func (c *SslCertificatesListCall) Do(opts ...googleapi.CallOption) (*SslCertific // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -31485,12 +33805,11 @@ func (r *SubnetworksService) AggregatedList(project string) *SubnetworksAggregat // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -31547,22 +33866,21 @@ func (c *SubnetworksAggregatedListCall) Context(ctx context.Context) *Subnetwork } func (c *SubnetworksAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/subnetworks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.subnetworks.aggregatedList" call. @@ -31597,7 +33915,8 @@ func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Subne HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -31610,7 +33929,7 @@ func (c *SubnetworksAggregatedListCall) Do(opts ...googleapi.CallOption) (*Subne // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -31707,21 +34026,20 @@ func (c *SubnetworksDeleteCall) Context(ctx context.Context) *SubnetworksDeleteC } func (c *SubnetworksDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, "subnetwork": c.subnetwork, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.subnetworks.delete" call. @@ -31756,7 +34074,8 @@ func (c *SubnetworksDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -31817,7 +34136,7 @@ type SubnetworksGetCall struct { } // Get: Returns the specified subnetwork. Get a list of available -// subnetworks by making a list() request. +// subnetworks list() request. func (r *SubnetworksService) Get(project string, region string, subnetwork string) *SubnetworksGetCall { c := &SubnetworksGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project @@ -31853,24 +34172,23 @@ func (c *SubnetworksGetCall) Context(ctx context.Context) *SubnetworksGetCall { } func (c *SubnetworksGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks/{subnetwork}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, "subnetwork": c.subnetwork, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.subnetworks.get" call. @@ -31905,12 +34223,13 @@ func (c *SubnetworksGetCall) Do(opts ...googleapi.CallOption) (*Subnetwork, erro HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil // { - // "description": "Returns the specified subnetwork. Get a list of available subnetworks by making a list() request.", + // "description": "Returns the specified subnetwork. Get a list of available subnetworks list() request.", // "httpMethod": "GET", // "id": "compute.subnetworks.get", // "parameterOrder": [ @@ -31992,26 +34311,24 @@ func (c *SubnetworksInsertCall) Context(ctx context.Context) *SubnetworksInsertC } func (c *SubnetworksInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.subnetwork) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.subnetworks.insert" call. @@ -32046,7 +34363,8 @@ func (c *SubnetworksInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -32127,12 +34445,11 @@ func (r *SubnetworksService) List(project string, region string) *SubnetworksLis // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -32189,23 +34506,22 @@ func (c *SubnetworksListCall) Context(ctx context.Context) *SubnetworksListCall } func (c *SubnetworksListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/subnetworks") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.subnetworks.list" call. @@ -32240,7 +34556,8 @@ func (c *SubnetworksListCall) Do(opts ...googleapi.CallOption) (*SubnetworkList, HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -32254,7 +34571,7 @@ func (c *SubnetworksListCall) Do(opts ...googleapi.CallOption) (*SubnetworkList, // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -32357,20 +34674,19 @@ func (c *TargetHttpProxiesDeleteCall) Context(ctx context.Context) *TargetHttpPr } func (c *TargetHttpProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{targetHttpProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "targetHttpProxy": c.targetHttpProxy, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.targetHttpProxies.delete" call. @@ -32405,7 +34721,8 @@ func (c *TargetHttpProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operati HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -32493,23 +34810,22 @@ func (c *TargetHttpProxiesGetCall) Context(ctx context.Context) *TargetHttpProxi } func (c *TargetHttpProxiesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{targetHttpProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "targetHttpProxy": c.targetHttpProxy, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.targetHttpProxies.get" call. @@ -32544,7 +34860,8 @@ func (c *TargetHttpProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHttp HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -32622,25 +34939,23 @@ func (c *TargetHttpProxiesInsertCall) Context(ctx context.Context) *TargetHttpPr } func (c *TargetHttpProxiesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpproxy) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.targetHttpProxies.insert" call. @@ -32675,7 +34990,8 @@ func (c *TargetHttpProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operati HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -32747,12 +35063,11 @@ func (r *TargetHttpProxiesService) List(project string) *TargetHttpProxiesListCa // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -32809,22 +35124,21 @@ func (c *TargetHttpProxiesListCall) Context(ctx context.Context) *TargetHttpProx } func (c *TargetHttpProxiesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.targetHttpProxies.list" call. @@ -32859,7 +35173,8 @@ func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHtt HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -32872,7 +35187,7 @@ func (c *TargetHttpProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHtt // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -32970,26 +35285,24 @@ func (c *TargetHttpProxiesSetUrlMapCall) Context(ctx context.Context) *TargetHtt } func (c *TargetHttpProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "targetHttpProxy": c.targetHttpProxy, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.targetHttpProxies.setUrlMap" call. @@ -33024,7 +35337,8 @@ func (c *TargetHttpProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Oper HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -33102,20 +35416,19 @@ func (c *TargetHttpsProxiesDeleteCall) Context(ctx context.Context) *TargetHttps } func (c *TargetHttpsProxiesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "targetHttpsProxy": c.targetHttpsProxy, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.targetHttpsProxies.delete" call. @@ -33150,7 +35463,8 @@ func (c *TargetHttpsProxiesDeleteCall) Do(opts ...googleapi.CallOption) (*Operat HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -33237,23 +35551,22 @@ func (c *TargetHttpsProxiesGetCall) Context(ctx context.Context) *TargetHttpsPro } func (c *TargetHttpsProxiesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies/{targetHttpsProxy}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "targetHttpsProxy": c.targetHttpsProxy, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.targetHttpsProxies.get" call. @@ -33288,7 +35601,8 @@ func (c *TargetHttpsProxiesGetCall) Do(opts ...googleapi.CallOption) (*TargetHtt HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -33365,25 +35679,23 @@ func (c *TargetHttpsProxiesInsertCall) Context(ctx context.Context) *TargetHttps } func (c *TargetHttpsProxiesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpsproxy) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.targetHttpsProxies.insert" call. @@ -33418,7 +35730,8 @@ func (c *TargetHttpsProxiesInsertCall) Do(opts ...googleapi.CallOption) (*Operat HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -33489,12 +35802,11 @@ func (r *TargetHttpsProxiesService) List(project string) *TargetHttpsProxiesList // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -33551,22 +35863,21 @@ func (c *TargetHttpsProxiesListCall) Context(ctx context.Context) *TargetHttpsPr } func (c *TargetHttpsProxiesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpsProxies") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.targetHttpsProxies.list" call. @@ -33601,7 +35912,8 @@ func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHt HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -33614,7 +35926,7 @@ func (c *TargetHttpsProxiesListCall) Do(opts ...googleapi.CallOption) (*TargetHt // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -33711,26 +36023,24 @@ func (c *TargetHttpsProxiesSetSslCertificatesCall) Context(ctx context.Context) } func (c *TargetHttpsProxiesSetSslCertificatesCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpsproxiessetsslcertificatesrequest) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpsProxies/{targetHttpsProxy}/setSslCertificates") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "targetHttpsProxy": c.targetHttpsProxy, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.targetHttpsProxies.setSslCertificates" call. @@ -33765,7 +36075,8 @@ func (c *TargetHttpsProxiesSetSslCertificatesCall) Do(opts ...googleapi.CallOpti HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -33845,26 +36156,24 @@ func (c *TargetHttpsProxiesSetUrlMapCall) Context(ctx context.Context) *TargetHt } func (c *TargetHttpsProxiesSetUrlMapCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpsProxies/{targetHttpsProxy}/setUrlMap") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "targetHttpsProxy": c.targetHttpsProxy, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.targetHttpsProxies.setUrlMap" call. @@ -33899,7 +36208,8 @@ func (c *TargetHttpsProxiesSetUrlMapCall) Do(opts ...googleapi.CallOption) (*Ope HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -33978,12 +36288,11 @@ func (r *TargetInstancesService) AggregatedList(project string) *TargetInstances // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -34040,22 +36349,21 @@ func (c *TargetInstancesAggregatedListCall) Context(ctx context.Context) *Target } func (c *TargetInstancesAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.targetInstances.aggregatedList" call. @@ -34090,7 +36398,8 @@ func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*T HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -34103,7 +36412,7 @@ func (c *TargetInstancesAggregatedListCall) Do(opts ...googleapi.CallOption) (*T // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -34201,21 +36510,20 @@ func (c *TargetInstancesDeleteCall) Context(ctx context.Context) *TargetInstance } func (c *TargetInstancesDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances/{targetInstance}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "targetInstance": c.targetInstance, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.targetInstances.delete" call. @@ -34250,7 +36558,8 @@ func (c *TargetInstancesDeleteCall) Do(opts ...googleapi.CallOption) (*Operation HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -34348,24 +36657,23 @@ func (c *TargetInstancesGetCall) Context(ctx context.Context) *TargetInstancesGe } func (c *TargetInstancesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances/{targetInstance}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "targetInstance": c.targetInstance, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.targetInstances.get" call. @@ -34400,7 +36708,8 @@ func (c *TargetInstancesGetCall) Do(opts ...googleapi.CallOption) (*TargetInstan HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -34488,26 +36797,24 @@ func (c *TargetInstancesInsertCall) Context(ctx context.Context) *TargetInstance } func (c *TargetInstancesInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetinstance) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.targetInstances.insert" call. @@ -34542,7 +36849,8 @@ func (c *TargetInstancesInsertCall) Do(opts ...googleapi.CallOption) (*Operation HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -34624,12 +36932,11 @@ func (r *TargetInstancesService) List(project string, zone string) *TargetInstan // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -34686,23 +36993,22 @@ func (c *TargetInstancesListCall) Context(ctx context.Context) *TargetInstancesL } func (c *TargetInstancesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.targetInstances.list" call. @@ -34737,7 +37043,8 @@ func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInsta HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -34751,7 +37058,7 @@ func (c *TargetInstancesListCall) Do(opts ...googleapi.CallOption) (*TargetInsta // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -34858,27 +37165,25 @@ func (c *TargetPoolsAddHealthCheckCall) Context(ctx context.Context) *TargetPool } func (c *TargetPoolsAddHealthCheckCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsaddhealthcheckrequest) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, "targetPool": c.targetPool, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.targetPools.addHealthCheck" call. @@ -34913,7 +37218,8 @@ func (c *TargetPoolsAddHealthCheckCall) Do(opts ...googleapi.CallOption) (*Opera HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -35004,27 +37310,25 @@ func (c *TargetPoolsAddInstanceCall) Context(ctx context.Context) *TargetPoolsAd } func (c *TargetPoolsAddInstanceCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsaddinstancerequest) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/addInstance") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, "targetPool": c.targetPool, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.targetPools.addInstance" call. @@ -35059,7 +37363,8 @@ func (c *TargetPoolsAddInstanceCall) Do(opts ...googleapi.CallOption) (*Operatio HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -35146,12 +37451,11 @@ func (r *TargetPoolsService) AggregatedList(project string) *TargetPoolsAggregat // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -35208,22 +37512,21 @@ func (c *TargetPoolsAggregatedListCall) Context(ctx context.Context) *TargetPool } func (c *TargetPoolsAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetPools") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.targetPools.aggregatedList" call. @@ -35258,7 +37561,8 @@ func (c *TargetPoolsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Targe HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -35271,7 +37575,7 @@ func (c *TargetPoolsAggregatedListCall) Do(opts ...googleapi.CallOption) (*Targe // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -35369,21 +37673,20 @@ func (c *TargetPoolsDeleteCall) Context(ctx context.Context) *TargetPoolsDeleteC } func (c *TargetPoolsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, "targetPool": c.targetPool, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.targetPools.delete" call. @@ -35418,7 +37721,8 @@ func (c *TargetPoolsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, er HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -35516,24 +37820,23 @@ func (c *TargetPoolsGetCall) Context(ctx context.Context) *TargetPoolsGetCall { } func (c *TargetPoolsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, "targetPool": c.targetPool, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.targetPools.get" call. @@ -35568,7 +37871,8 @@ func (c *TargetPoolsGetCall) Do(opts ...googleapi.CallOption) (*TargetPool, erro HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -35658,27 +37962,25 @@ func (c *TargetPoolsGetHealthCall) Context(ctx context.Context) *TargetPoolsGetH } func (c *TargetPoolsGetHealthCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancereference) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/getHealth") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, "targetPool": c.targetPool, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.targetPools.getHealth" call. @@ -35713,7 +38015,8 @@ func (c *TargetPoolsGetHealthCall) Do(opts ...googleapi.CallOption) (*TargetPool HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -35804,26 +38107,24 @@ func (c *TargetPoolsInsertCall) Context(ctx context.Context) *TargetPoolsInsertC } func (c *TargetPoolsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpool) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.targetPools.insert" call. @@ -35858,7 +38159,8 @@ func (c *TargetPoolsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, er HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -35940,12 +38242,11 @@ func (r *TargetPoolsService) List(project string, region string) *TargetPoolsLis // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -36002,23 +38303,22 @@ func (c *TargetPoolsListCall) Context(ctx context.Context) *TargetPoolsListCall } func (c *TargetPoolsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.targetPools.list" call. @@ -36053,7 +38353,8 @@ func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -36067,7 +38368,7 @@ func (c *TargetPoolsListCall) Do(opts ...googleapi.CallOption) (*TargetPoolList, // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -36174,27 +38475,25 @@ func (c *TargetPoolsRemoveHealthCheckCall) Context(ctx context.Context) *TargetP } func (c *TargetPoolsRemoveHealthCheckCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsremovehealthcheckrequest) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, "targetPool": c.targetPool, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.targetPools.removeHealthCheck" call. @@ -36229,7 +38528,8 @@ func (c *TargetPoolsRemoveHealthCheckCall) Do(opts ...googleapi.CallOption) (*Op HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -36320,27 +38620,25 @@ func (c *TargetPoolsRemoveInstanceCall) Context(ctx context.Context) *TargetPool } func (c *TargetPoolsRemoveInstanceCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsremoveinstancerequest) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/removeInstance") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, "targetPool": c.targetPool, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.targetPools.removeInstance" call. @@ -36375,7 +38673,8 @@ func (c *TargetPoolsRemoveInstanceCall) Do(opts ...googleapi.CallOption) (*Opera HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -36473,27 +38772,25 @@ func (c *TargetPoolsSetBackupCall) Context(ctx context.Context) *TargetPoolsSetB } func (c *TargetPoolsSetBackupCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/setBackup") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, "targetPool": c.targetPool, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.targetPools.setBackup" call. @@ -36528,7 +38825,8 @@ func (c *TargetPoolsSetBackupCall) Do(opts ...googleapi.CallOption) (*Operation, HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -36620,12 +38918,11 @@ func (r *TargetVpnGatewaysService) AggregatedList(project string) *TargetVpnGate // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -36682,22 +38979,21 @@ func (c *TargetVpnGatewaysAggregatedListCall) Context(ctx context.Context) *Targ } func (c *TargetVpnGatewaysAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetVpnGateways") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.targetVpnGateways.aggregatedList" call. @@ -36732,7 +39028,8 @@ func (c *TargetVpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) ( HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -36745,7 +39042,7 @@ func (c *TargetVpnGatewaysAggregatedListCall) Do(opts ...googleapi.CallOption) ( // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -36842,21 +39139,20 @@ func (c *TargetVpnGatewaysDeleteCall) Context(ctx context.Context) *TargetVpnGat } func (c *TargetVpnGatewaysDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, "targetVpnGateway": c.targetVpnGateway, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.targetVpnGateways.delete" call. @@ -36891,7 +39187,8 @@ func (c *TargetVpnGatewaysDeleteCall) Do(opts ...googleapi.CallOption) (*Operati HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -36988,24 +39285,23 @@ func (c *TargetVpnGatewaysGetCall) Context(ctx context.Context) *TargetVpnGatewa } func (c *TargetVpnGatewaysGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways/{targetVpnGateway}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, "targetVpnGateway": c.targetVpnGateway, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.targetVpnGateways.get" call. @@ -37040,7 +39336,8 @@ func (c *TargetVpnGatewaysGetCall) Do(opts ...googleapi.CallOption) (*TargetVpnG HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -37127,26 +39424,24 @@ func (c *TargetVpnGatewaysInsertCall) Context(ctx context.Context) *TargetVpnGat } func (c *TargetVpnGatewaysInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetvpngateway) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.targetVpnGateways.insert" call. @@ -37181,7 +39476,8 @@ func (c *TargetVpnGatewaysInsertCall) Do(opts ...googleapi.CallOption) (*Operati HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -37262,12 +39558,11 @@ func (r *TargetVpnGatewaysService) List(project string, region string) *TargetVp // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -37324,23 +39619,22 @@ func (c *TargetVpnGatewaysListCall) Context(ctx context.Context) *TargetVpnGatew } func (c *TargetVpnGatewaysListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetVpnGateways") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.targetVpnGateways.list" call. @@ -37375,7 +39669,8 @@ func (c *TargetVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*TargetVpn HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -37389,7 +39684,7 @@ func (c *TargetVpnGatewaysListCall) Do(opts ...googleapi.CallOption) (*TargetVpn // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -37492,20 +39787,19 @@ func (c *UrlMapsDeleteCall) Context(ctx context.Context) *UrlMapsDeleteCall { } func (c *UrlMapsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "urlMap": c.urlMap, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.urlMaps.delete" call. @@ -37540,7 +39834,8 @@ func (c *UrlMapsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, error) HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -37628,23 +39923,22 @@ func (c *UrlMapsGetCall) Context(ctx context.Context) *UrlMapsGetCall { } func (c *UrlMapsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "urlMap": c.urlMap, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.urlMaps.get" call. @@ -37679,7 +39973,8 @@ func (c *UrlMapsGetCall) Do(opts ...googleapi.CallOption) (*UrlMap, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -37757,25 +40052,23 @@ func (c *UrlMapsInsertCall) Context(ctx context.Context) *UrlMapsInsertCall { } func (c *UrlMapsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.urlMaps.insert" call. @@ -37810,7 +40103,8 @@ func (c *UrlMapsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -37845,6 +40139,140 @@ func (c *UrlMapsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, error) } +// method id "compute.urlMaps.invalidateCache": + +type UrlMapsInvalidateCacheCall struct { + s *Service + project string + urlMap string + cacheinvalidationrule *CacheInvalidationRule + urlParams_ gensupport.URLParams + ctx_ context.Context +} + +// InvalidateCache: Initiates a cache invalidation operation, +// invalidating the specified path, scoped to the specified UrlMap. +func (r *UrlMapsService) InvalidateCache(project string, urlMap string, cacheinvalidationrule *CacheInvalidationRule) *UrlMapsInvalidateCacheCall { + c := &UrlMapsInvalidateCacheCall{s: r.s, urlParams_: make(gensupport.URLParams)} + c.project = project + c.urlMap = urlMap + c.cacheinvalidationrule = cacheinvalidationrule + return c +} + +// Fields allows partial responses to be retrieved. See +// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse +// for more information. +func (c *UrlMapsInvalidateCacheCall) Fields(s ...googleapi.Field) *UrlMapsInvalidateCacheCall { + c.urlParams_.Set("fields", googleapi.CombineFields(s)) + return c +} + +// Context sets the context to be used in this call's Do method. Any +// pending HTTP request will be aborted if the provided context is +// canceled. +func (c *UrlMapsInvalidateCacheCall) Context(ctx context.Context) *UrlMapsInvalidateCacheCall { + c.ctx_ = ctx + return c +} + +func (c *UrlMapsInvalidateCacheCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + var body io.Reader = nil + body, err := googleapi.WithoutDataWrapper.JSONReader(c.cacheinvalidationrule) + if err != nil { + return nil, err + } + reqHeaders.Set("Content-Type", "application/json") + c.urlParams_.Set("alt", alt) + urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}/invalidateCache") + urls += "?" + c.urlParams_.Encode() + req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders + googleapi.Expand(req.URL, map[string]string{ + "project": c.project, + "urlMap": c.urlMap, + }) + return gensupport.SendRequest(c.ctx_, c.s.client, req) +} + +// Do executes the "compute.urlMaps.invalidateCache" call. +// Exactly one of *Operation or error will be non-nil. Any non-2xx +// status code is an error. Response headers are in either +// *Operation.ServerResponse.Header or (if a response was returned at +// all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified +// to check whether the returned error was because +// http.StatusNotModified was returned. +func (c *UrlMapsInvalidateCacheCall) Do(opts ...googleapi.CallOption) (*Operation, error) { + gensupport.SetOptions(c.urlParams_, opts...) + res, err := c.doRequest("json") + if res != nil && res.StatusCode == http.StatusNotModified { + if res.Body != nil { + res.Body.Close() + } + return nil, &googleapi.Error{ + Code: res.StatusCode, + Header: res.Header, + } + } + if err != nil { + return nil, err + } + defer googleapi.CloseBody(res) + if err := googleapi.CheckResponse(res); err != nil { + return nil, err + } + ret := &Operation{ + ServerResponse: googleapi.ServerResponse{ + Header: res.Header, + HTTPStatusCode: res.StatusCode, + }, + } + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { + return nil, err + } + return ret, nil + // { + // "description": "Initiates a cache invalidation operation, invalidating the specified path, scoped to the specified UrlMap.", + // "httpMethod": "POST", + // "id": "compute.urlMaps.invalidateCache", + // "parameterOrder": [ + // "project", + // "urlMap" + // ], + // "parameters": { + // "project": { + // "description": "Project ID for this request.", + // "location": "path", + // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + // "required": true, + // "type": "string" + // }, + // "urlMap": { + // "description": "Name of the UrlMap scoping this request.", + // "location": "path", + // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + // "required": true, + // "type": "string" + // } + // }, + // "path": "{project}/global/urlMaps/{urlMap}/invalidateCache", + // "request": { + // "$ref": "CacheInvalidationRule" + // }, + // "response": { + // "$ref": "Operation" + // }, + // "scopes": [ + // "https://www.googleapis.com/auth/cloud-platform", + // "https://www.googleapis.com/auth/compute" + // ] + // } + +} + // method id "compute.urlMaps.list": type UrlMapsListCall struct { @@ -37882,12 +40310,11 @@ func (r *UrlMapsService) List(project string) *UrlMapsListCall { // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -37944,22 +40371,21 @@ func (c *UrlMapsListCall) Context(ctx context.Context) *UrlMapsListCall { } func (c *UrlMapsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.urlMaps.list" call. @@ -37994,7 +40420,8 @@ func (c *UrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -38007,7 +40434,7 @@ func (c *UrlMapsListCall) Do(opts ...googleapi.CallOption) (*UrlMapList, error) // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -38106,26 +40533,24 @@ func (c *UrlMapsPatchCall) Context(ctx context.Context) *UrlMapsPatchCall { } func (c *UrlMapsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "urlMap": c.urlMap, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.urlMaps.patch" call. @@ -38160,7 +40585,8 @@ func (c *UrlMapsPatchCall) Do(opts ...googleapi.CallOption) (*Operation, error) HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -38241,26 +40667,24 @@ func (c *UrlMapsUpdateCall) Context(ctx context.Context) *UrlMapsUpdateCall { } func (c *UrlMapsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "urlMap": c.urlMap, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.urlMaps.update" call. @@ -38295,7 +40719,8 @@ func (c *UrlMapsUpdateCall) Do(opts ...googleapi.CallOption) (*Operation, error) HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -38378,26 +40803,24 @@ func (c *UrlMapsValidateCall) Context(ctx context.Context) *UrlMapsValidateCall } func (c *UrlMapsValidateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapsvalidaterequest) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}/validate") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "urlMap": c.urlMap, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.urlMaps.validate" call. @@ -38432,7 +40855,8 @@ func (c *UrlMapsValidateCall) Do(opts ...googleapi.CallOption) (*UrlMapsValidate HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -38510,12 +40934,11 @@ func (r *VpnTunnelsService) AggregatedList(project string) *VpnTunnelsAggregated // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -38572,22 +40995,21 @@ func (c *VpnTunnelsAggregatedListCall) Context(ctx context.Context) *VpnTunnelsA } func (c *VpnTunnelsAggregatedListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/vpnTunnels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.vpnTunnels.aggregatedList" call. @@ -38622,7 +41044,8 @@ func (c *VpnTunnelsAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnTun HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -38635,7 +41058,7 @@ func (c *VpnTunnelsAggregatedListCall) Do(opts ...googleapi.CallOption) (*VpnTun // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -38732,21 +41155,20 @@ func (c *VpnTunnelsDeleteCall) Context(ctx context.Context) *VpnTunnelsDeleteCal } func (c *VpnTunnelsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels/{vpnTunnel}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, "vpnTunnel": c.vpnTunnel, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.vpnTunnels.delete" call. @@ -38781,7 +41203,8 @@ func (c *VpnTunnelsDeleteCall) Do(opts ...googleapi.CallOption) (*Operation, err HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -38878,24 +41301,23 @@ func (c *VpnTunnelsGetCall) Context(ctx context.Context) *VpnTunnelsGetCall { } func (c *VpnTunnelsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels/{vpnTunnel}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, "vpnTunnel": c.vpnTunnel, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.vpnTunnels.get" call. @@ -38930,7 +41352,8 @@ func (c *VpnTunnelsGetCall) Do(opts ...googleapi.CallOption) (*VpnTunnel, error) HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -39017,26 +41440,24 @@ func (c *VpnTunnelsInsertCall) Context(ctx context.Context) *VpnTunnelsInsertCal } func (c *VpnTunnelsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.vpntunnel) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.vpnTunnels.insert" call. @@ -39071,7 +41492,8 @@ func (c *VpnTunnelsInsertCall) Do(opts ...googleapi.CallOption) (*Operation, err HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -39152,12 +41574,11 @@ func (r *VpnTunnelsService) List(project string, region string) *VpnTunnelsListC // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -39214,23 +41635,22 @@ func (c *VpnTunnelsListCall) Context(ctx context.Context) *VpnTunnelsListCall { } func (c *VpnTunnelsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/vpnTunnels") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "region": c.region, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.vpnTunnels.list" call. @@ -39265,7 +41685,8 @@ func (c *VpnTunnelsListCall) Do(opts ...googleapi.CallOption) (*VpnTunnelList, e HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -39279,7 +41700,7 @@ func (c *VpnTunnelsListCall) Do(opts ...googleapi.CallOption) (*VpnTunnelList, e // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -39384,21 +41805,20 @@ func (c *ZoneOperationsDeleteCall) Context(ctx context.Context) *ZoneOperationsD } func (c *ZoneOperationsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/operations/{operation}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "operation": c.operation, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.zoneOperations.delete" call. @@ -39503,24 +41923,23 @@ func (c *ZoneOperationsGetCall) Context(ctx context.Context) *ZoneOperationsGetC } func (c *ZoneOperationsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/operations/{operation}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, "operation": c.operation, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.zoneOperations.get" call. @@ -39555,7 +41974,8 @@ func (c *ZoneOperationsGetCall) Do(opts ...googleapi.CallOption) (*Operation, er HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -39643,12 +42063,11 @@ func (r *ZoneOperationsService) List(project string, zone string) *ZoneOperation // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -39705,23 +42124,22 @@ func (c *ZoneOperationsListCall) Context(ctx context.Context) *ZoneOperationsLis } func (c *ZoneOperationsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/operations") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.zoneOperations.list" call. @@ -39756,7 +42174,8 @@ func (c *ZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationLis HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -39770,7 +42189,7 @@ func (c *ZoneOperationsListCall) Do(opts ...googleapi.CallOption) (*OperationLis // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, @@ -39885,23 +42304,22 @@ func (c *ZonesGetCall) Context(ctx context.Context) *ZonesGetCall { } func (c *ZonesGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, "zone": c.zone, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.zones.get" call. @@ -39936,7 +42354,8 @@ func (c *ZonesGetCall) Do(opts ...googleapi.CallOption) (*Zone, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -40014,12 +42433,11 @@ func (r *ZonesService) List(project string) *ZonesListCall { // example-instance, you would use filter=name ne // example-instance. // -// Compute Engine Beta API Only: If you use filtering in the Beta API, -// you can also filter on nested fields. For example, you could filter -// on instances that have set the scheduling.automaticRestart field to -// true. In particular, use filtering on nested fields to take advantage -// of instance labels to organize and filter results based on label -// values. +// Compute Engine Beta API Only: When filtering in the Beta API, you can +// also filter on nested fields. For example, you could filter on +// instances that have set the scheduling.automaticRestart field to +// true. Use filtering on nested fields to take advantage of labels to +// organize and search for results based on label values. // // The Beta API also supports filtering on multiple expressions by // providing each separate expression within parentheses. For example, @@ -40076,22 +42494,21 @@ func (c *ZonesListCall) Context(ctx context.Context) *ZonesListCall { } func (c *ZonesListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "project": c.project, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "compute.zones.list" call. @@ -40126,7 +42543,8 @@ func (c *ZonesListCall) Do(opts ...googleapi.CallOption) (*ZoneList, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -40139,7 +42557,7 @@ func (c *ZonesListCall) Do(opts ...googleapi.CallOption) (*ZoneList, error) { // ], // "parameters": { // "filter": { - // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: If you use filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. In particular, use filtering on nested fields to take advantage of instance labels to organize and filter results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + // "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", // "location": "query", // "type": "string" // }, diff --git a/vendor/google.golang.org/api/gensupport/buffer.go b/vendor/google.golang.org/api/gensupport/buffer.go index 4b8ec1424..992104911 100644 --- a/vendor/google.golang.org/api/gensupport/buffer.go +++ b/vendor/google.golang.org/api/gensupport/buffer.go @@ -11,8 +11,8 @@ import ( "google.golang.org/api/googleapi" ) -// ResumableBuffer buffers data from an io.Reader to support uploading media in retryable chunks. -type ResumableBuffer struct { +// MediaBuffer buffers data from an io.Reader to support uploading media in retryable chunks. +type MediaBuffer struct { media io.Reader chunk []byte // The current chunk which is pending upload. The capacity is the chunk size. @@ -22,42 +22,42 @@ type ResumableBuffer struct { off int64 } -func NewResumableBuffer(media io.Reader, chunkSize int) *ResumableBuffer { - return &ResumableBuffer{media: media, chunk: make([]byte, 0, chunkSize)} +func NewMediaBuffer(media io.Reader, chunkSize int) *MediaBuffer { + return &MediaBuffer{media: media, chunk: make([]byte, 0, chunkSize)} } // Chunk returns the current buffered chunk, the offset in the underlying media // from which the chunk is drawn, and the size of the chunk. // Successive calls to Chunk return the same chunk between calls to Next. -func (rb *ResumableBuffer) Chunk() (chunk io.Reader, off int64, size int, err error) { +func (mb *MediaBuffer) Chunk() (chunk io.Reader, off int64, size int, err error) { // There may already be data in chunk if Next has not been called since the previous call to Chunk. - if rb.err == nil && len(rb.chunk) == 0 { - rb.err = rb.loadChunk() + if mb.err == nil && len(mb.chunk) == 0 { + mb.err = mb.loadChunk() } - return bytes.NewReader(rb.chunk), rb.off, len(rb.chunk), rb.err + return bytes.NewReader(mb.chunk), mb.off, len(mb.chunk), mb.err } // loadChunk will read from media into chunk, up to the capacity of chunk. -func (rb *ResumableBuffer) loadChunk() error { - bufSize := cap(rb.chunk) - rb.chunk = rb.chunk[:bufSize] +func (mb *MediaBuffer) loadChunk() error { + bufSize := cap(mb.chunk) + mb.chunk = mb.chunk[:bufSize] read := 0 var err error for err == nil && read < bufSize { var n int - n, err = rb.media.Read(rb.chunk[read:]) + n, err = mb.media.Read(mb.chunk[read:]) read += n } - rb.chunk = rb.chunk[:read] + mb.chunk = mb.chunk[:read] return err } // Next advances to the next chunk, which will be returned by the next call to Chunk. // Calls to Next without a corresponding prior call to Chunk will have no effect. -func (rb *ResumableBuffer) Next() { - rb.off += int64(len(rb.chunk)) - rb.chunk = rb.chunk[0:0] +func (mb *MediaBuffer) Next() { + mb.off += int64(len(mb.chunk)) + mb.chunk = mb.chunk[0:0] } type readerTyper struct { diff --git a/vendor/google.golang.org/api/gensupport/media.go b/vendor/google.golang.org/api/gensupport/media.go index 817f46f5d..c6410e89a 100644 --- a/vendor/google.golang.org/api/gensupport/media.go +++ b/vendor/google.golang.org/api/gensupport/media.go @@ -176,25 +176,24 @@ func typeHeader(contentType string) textproto.MIMEHeader { // chunkSize is the size of the chunk that media should be split into. // If chunkSize is non-zero and the contents of media do not fit in a single // chunk (or there is an error reading media), then media will be returned as a -// ResumableBuffer. Otherwise, media will be returned as a Reader. +// MediaBuffer. Otherwise, media will be returned as a Reader. // // After PrepareUpload has been called, media should no longer be used: the // media content should be accessed via one of the return values. -func PrepareUpload(media io.Reader, chunkSize int) (io.Reader, - *ResumableBuffer) { +func PrepareUpload(media io.Reader, chunkSize int) (io.Reader, *MediaBuffer) { if chunkSize == 0 { // do not chunk return media, nil } - rb := NewResumableBuffer(media, chunkSize) - rdr, _, _, err := rb.Chunk() + mb := NewMediaBuffer(media, chunkSize) + rdr, _, _, err := mb.Chunk() if err == io.EOF { // we can upload this in a single request return rdr, nil } - // err might be a non-EOF error. If it is, the next call to rb.Chunk will - // return the same error. Returning a ResumableBuffer ensures that this error + // err might be a non-EOF error. If it is, the next call to mb.Chunk will + // return the same error. Returning a MediaBuffer ensures that this error // will be handled at some point. - return nil, rb + return nil, mb } diff --git a/vendor/google.golang.org/api/gensupport/resumable.go b/vendor/google.golang.org/api/gensupport/resumable.go index b3e774aa4..fb133a841 100644 --- a/vendor/google.golang.org/api/gensupport/resumable.go +++ b/vendor/google.golang.org/api/gensupport/resumable.go @@ -35,7 +35,7 @@ type ResumableUpload struct { URI string UserAgent string // User-Agent for header of the request // Media is the object being uploaded. - Media *ResumableBuffer + Media *MediaBuffer // MediaType defines the media type, e.g. "image/jpeg". MediaType string @@ -80,7 +80,10 @@ func (rx *ResumableUpload) doUploadRequest(ctx context.Context, data io.Reader, req.Header.Set("Content-Range", contentRange) req.Header.Set("Content-Type", rx.MediaType) req.Header.Set("User-Agent", rx.UserAgent) - return ctxhttp.Do(ctx, rx.Client, req) + fn := Hook(ctx, req) + resp, err := ctxhttp.Do(ctx, rx.Client, req) + fn(resp) + return resp, err } @@ -135,6 +138,8 @@ func contextDone(ctx context.Context) bool { // It retries using the provided back off strategy until cancelled or the // strategy indicates to stop retrying. // It is called from the auto-generated API code and is not visible to the user. +// Before sending an HTTP request, Upload calls Hook to obtain a function which +// it subsequently calls with the HTTP response. // rx is private to the auto-generated API code. // Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close. func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) { diff --git a/vendor/google.golang.org/api/gensupport/send.go b/vendor/google.golang.org/api/gensupport/send.go new file mode 100644 index 000000000..5b2727d72 --- /dev/null +++ b/vendor/google.golang.org/api/gensupport/send.go @@ -0,0 +1,35 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gensupport + +import ( + "net/http" + + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" +) + +// Hook is a function that is called once before each HTTP request that is sent +// by a generated API. It returns a function that is called after the request +// returns. +// Hook is never called if the context is nil. +var Hook func(ctx context.Context, req *http.Request) func(resp *http.Response) = defaultHook + +func defaultHook(ctx context.Context, req *http.Request) func(resp *http.Response) { + return func(resp *http.Response) {} +} + +// SendRequest sends a single HTTP request using the given client. +// If ctx is non-nil, uses ctxhttp.Do, and calls Hook beforehand. The function +// returned by Hook is called after the request returns. +func SendRequest(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if ctx != nil { + fn := Hook(ctx, req) + resp, err := ctxhttp.Do(ctx, client, req) + fn(resp) + return resp, err + } + return client.Do(req) +} diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index 3768b4687..52811fdbb 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -1,11 +1,11 @@ { "kind": "discovery#restDescription", - "etag": "\"bRFOOrZKfO9LweMbPqu0kcu6De8/KVPQfwGxQTBtH0g1kuij0C9i4uc\"", + "etag": "\"C5oy1hgQsABtYOYIOXWcR3BgYqU/cPnwg2U9hg8m8Y6wHWcvqIF8qSM\"", "discoveryVersion": "v1", "id": "storage:v1", "name": "storage", "version": "v1", - "revision": "20160304", + "revision": "20160609", "title": "Cloud Storage JSON API", "description": "Stores and retrieves potentially large, immutable data objects.", "ownerDomain": "google.com", @@ -294,15 +294,15 @@ }, "website": { "type": "object", - "description": "The bucket's website configuration.", + "description": "The bucket's website configuration, controlling how the service behaves when accessing bucket contents as a web site. See the Static Website Examples for more information.", "properties": { "mainPageSuffix": { "type": "string", - "description": "Behaves as the bucket's directory index where missing objects are treated as potential directories." + "description": "If the requested object path is missing, the service will ensure the path has a trailing '/', append this suffix, and attempt to retrieve the resulting object. This allows the creation of index.html objects to represent directory pages." }, "notFoundPage": { "type": "string", - "description": "The custom object to return when a requested resource is not found." + "description": "If the requested object path is missing, and any mainPageSuffix object is missing, if applicable, the service will return the named object from this bucket as the content for a 404 Not Found result." } } } @@ -574,7 +574,7 @@ }, "contentType": { "type": "string", - "description": "Content-Type of the object data." + "description": "Content-Type of the object data. If contentType is not specified, object downloads will be served as application/octet-stream." }, "crc32c": { "type": "string", @@ -1088,7 +1088,7 @@ ], "enumDescriptions": [ "Include all properties.", - "Omit acl and defaultObjectAcl properties." + "Omit owner, acl and defaultObjectAcl properties." ], "location": "query" } @@ -1168,7 +1168,7 @@ ], "enumDescriptions": [ "Include all properties.", - "Omit acl and defaultObjectAcl properties." + "Omit owner, acl and defaultObjectAcl properties." ], "location": "query" } @@ -1226,7 +1226,7 @@ ], "enumDescriptions": [ "Include all properties.", - "Omit acl and defaultObjectAcl properties." + "Omit owner, acl and defaultObjectAcl properties." ], "location": "query" } @@ -1318,7 +1318,7 @@ ], "enumDescriptions": [ "Include all properties.", - "Omit acl and defaultObjectAcl properties." + "Omit owner, acl and defaultObjectAcl properties." ], "location": "query" } @@ -1334,8 +1334,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" + "https://www.googleapis.com/auth/devstorage.full_control" ] }, "update": { @@ -1411,7 +1410,7 @@ ], "enumDescriptions": [ "Include all properties.", - "Omit acl and defaultObjectAcl properties." + "Omit owner, acl and defaultObjectAcl properties." ], "location": "query" } @@ -1427,8 +1426,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" + "https://www.googleapis.com/auth/devstorage.full_control" ] } } @@ -2076,7 +2074,7 @@ ], "enumDescriptions": [ "Include all properties.", - "Omit the acl property." + "Omit the owner, acl property." ], "location": "query" }, @@ -2235,7 +2233,7 @@ ], "enumDescriptions": [ "Include all properties.", - "Omit the acl property." + "Omit the owner, acl property." ], "location": "query" } @@ -2333,7 +2331,7 @@ ], "enumDescriptions": [ "Include all properties.", - "Omit the acl property." + "Omit the owner, acl property." ], "location": "query" } @@ -2414,7 +2412,7 @@ ], "enumDescriptions": [ "Include all properties.", - "Omit the acl property." + "Omit the owner, acl property." ], "location": "query" }, @@ -2517,7 +2515,7 @@ ], "enumDescriptions": [ "Include all properties.", - "Omit the acl property." + "Omit the owner, acl property." ], "location": "query" } @@ -2534,8 +2532,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" + "https://www.googleapis.com/auth/devstorage.full_control" ] }, "rewrite": { @@ -2640,7 +2637,7 @@ ], "enumDescriptions": [ "Include all properties.", - "Omit the acl property." + "Omit the owner, acl property." ], "location": "query" }, @@ -2764,7 +2761,7 @@ ], "enumDescriptions": [ "Include all properties.", - "Omit the acl property." + "Omit the owner, acl property." ], "location": "query" } @@ -2781,8 +2778,7 @@ }, "scopes": [ "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/devstorage.full_control", - "https://www.googleapis.com/auth/devstorage.read_write" + "https://www.googleapis.com/auth/devstorage.full_control" ], "supportsMediaDownload": true, "useMediaDownloadService": true @@ -2830,7 +2826,7 @@ ], "enumDescriptions": [ "Include all properties.", - "Omit the acl property." + "Omit the owner, acl property." ], "location": "query" }, diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index a29904439..2090a20dc 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -7,7 +7,7 @@ // import "google.golang.org/api/storage/v1" // ... // storageService, err := storage.New(oauthHttpClient) -package storage +package storage // import "google.golang.org/api/storage/v1" import ( "bytes" @@ -225,7 +225,9 @@ type Bucket struct { // Versioning: The bucket's versioning configuration. Versioning *BucketVersioning `json:"versioning,omitempty"` - // Website: The bucket's website configuration. + // Website: The bucket's website configuration, controlling how the + // service behaves when accessing bucket contents as a web site. See the + // Static Website Examples for more information. Website *BucketWebsite `json:"website,omitempty"` // ServerResponse contains the HTTP response code and headers from the @@ -454,14 +456,20 @@ func (s *BucketVersioning) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields) } -// BucketWebsite: The bucket's website configuration. +// BucketWebsite: The bucket's website configuration, controlling how +// the service behaves when accessing bucket contents as a web site. See +// the Static Website Examples for more information. type BucketWebsite struct { - // MainPageSuffix: Behaves as the bucket's directory index where missing - // objects are treated as potential directories. + // MainPageSuffix: If the requested object path is missing, the service + // will ensure the path has a trailing '/', append this suffix, and + // attempt to retrieve the resulting object. This allows the creation of + // index.html objects to represent directory pages. MainPageSuffix string `json:"mainPageSuffix,omitempty"` - // NotFoundPage: The custom object to return when a requested resource - // is not found. + // NotFoundPage: If the requested object path is missing, and any + // mainPageSuffix object is missing, if applicable, the service will + // return the named object from this bucket as the content for a 404 Not + // Found result. NotFoundPage string `json:"notFoundPage,omitempty"` // ForceSendFields is a list of field names (e.g. "MainPageSuffix") to @@ -794,7 +802,9 @@ type Object struct { // ContentLanguage: Content-Language of the object data. ContentLanguage string `json:"contentLanguage,omitempty"` - // ContentType: Content-Type of the object data. + // ContentType: Content-Type of the object data. If contentType is not + // specified, object downloads will be served as + // application/octet-stream. ContentType string `json:"contentType,omitempty"` // Crc32c: CRC32c checksum, as described in RFC 4960, Appendix B; @@ -1181,20 +1191,19 @@ func (c *BucketAccessControlsDeleteCall) Context(ctx context.Context) *BucketAcc } func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.bucketAccessControls.delete" call. @@ -1287,23 +1296,22 @@ func (c *BucketAccessControlsGetCall) Context(ctx context.Context) *BucketAccess } func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.bucketAccessControls.get" call. @@ -1338,7 +1346,8 @@ func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketA HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -1411,25 +1420,23 @@ func (c *BucketAccessControlsInsertCall) Context(ctx context.Context) *BucketAcc } func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.bucketAccessControls.insert" call. @@ -1464,7 +1471,8 @@ func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Buck HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -1542,22 +1550,21 @@ func (c *BucketAccessControlsListCall) Context(ctx context.Context) *BucketAcces } func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.bucketAccessControls.list" call. @@ -1592,7 +1599,8 @@ func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Bucket HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -1661,26 +1669,24 @@ func (c *BucketAccessControlsPatchCall) Context(ctx context.Context) *BucketAcce } func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.bucketAccessControls.patch" call. @@ -1715,7 +1721,8 @@ func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Bucke HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -1793,26 +1800,24 @@ func (c *BucketAccessControlsUpdateCall) Context(ctx context.Context) *BucketAcc } func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.bucketAccessControls.update" call. @@ -1847,7 +1852,8 @@ func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Buck HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -1937,19 +1943,18 @@ func (c *BucketsDeleteCall) Context(ctx context.Context) *BucketsDeleteCall { } func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.buckets.delete" call. @@ -2041,7 +2046,7 @@ func (c *BucketsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64 // // Possible values: // "full" - Include all properties. -// "noAcl" - Omit acl and defaultObjectAcl properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall { c.urlParams_.Set("projection", projection) return c @@ -2074,22 +2079,21 @@ func (c *BucketsGetCall) Context(ctx context.Context) *BucketsGetCall { } func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.buckets.get" call. @@ -2124,7 +2128,8 @@ func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -2162,7 +2167,7 @@ func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { // ], // "enumDescriptions": [ // "Include all properties.", - // "Omit acl and defaultObjectAcl properties." + // "Omit owner, acl and defaultObjectAcl properties." // ], // "location": "query", // "type": "string" @@ -2246,7 +2251,7 @@ func (c *BucketsInsertCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAc // // Possible values: // "full" - Include all properties. -// "noAcl" - Omit acl and defaultObjectAcl properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. func (c *BucketsInsertCall) Projection(projection string) *BucketsInsertCall { c.urlParams_.Set("projection", projection) return c @@ -2269,23 +2274,21 @@ func (c *BucketsInsertCall) Context(ctx context.Context) *BucketsInsertCall { } func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.SetOpaque(req.URL) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.buckets.insert" call. @@ -2320,7 +2323,8 @@ func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -2386,7 +2390,7 @@ func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { // ], // "enumDescriptions": [ // "Include all properties.", - // "Omit acl and defaultObjectAcl properties." + // "Omit owner, acl and defaultObjectAcl properties." // ], // "location": "query", // "type": "string" @@ -2451,7 +2455,7 @@ func (c *BucketsListCall) Prefix(prefix string) *BucketsListCall { // // Possible values: // "full" - Include all properties. -// "noAcl" - Omit acl and defaultObjectAcl properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. func (c *BucketsListCall) Projection(projection string) *BucketsListCall { c.urlParams_.Set("projection", projection) return c @@ -2484,20 +2488,19 @@ func (c *BucketsListCall) Context(ctx context.Context) *BucketsListCall { } func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.SetOpaque(req.URL) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.buckets.list" call. @@ -2532,7 +2535,8 @@ func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -2575,7 +2579,7 @@ func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) { // ], // "enumDescriptions": [ // "Include all properties.", - // "Omit acl and defaultObjectAcl properties." + // "Omit owner, acl and defaultObjectAcl properties." // ], // "location": "query", // "type": "string" @@ -2697,7 +2701,7 @@ func (c *BucketsPatchCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl // // Possible values: // "full" - Include all properties. -// "noAcl" - Omit acl and defaultObjectAcl properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. func (c *BucketsPatchCall) Projection(projection string) *BucketsPatchCall { c.urlParams_.Set("projection", projection) return c @@ -2720,25 +2724,23 @@ func (c *BucketsPatchCall) Context(ctx context.Context) *BucketsPatchCall { } func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.buckets.patch" call. @@ -2773,7 +2775,8 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -2851,7 +2854,7 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { // ], // "enumDescriptions": [ // "Include all properties.", - // "Omit acl and defaultObjectAcl properties." + // "Omit owner, acl and defaultObjectAcl properties." // ], // "location": "query", // "type": "string" @@ -2866,8 +2869,7 @@ func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } @@ -2953,7 +2955,7 @@ func (c *BucketsUpdateCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAc // // Possible values: // "full" - Include all properties. -// "noAcl" - Omit acl and defaultObjectAcl properties. +// "noAcl" - Omit owner, acl and defaultObjectAcl properties. func (c *BucketsUpdateCall) Projection(projection string) *BucketsUpdateCall { c.urlParams_.Set("projection", projection) return c @@ -2976,25 +2978,23 @@ func (c *BucketsUpdateCall) Context(ctx context.Context) *BucketsUpdateCall { } func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.buckets.update" call. @@ -3029,7 +3029,8 @@ func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -3107,7 +3108,7 @@ func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { // ], // "enumDescriptions": [ // "Include all properties.", - // "Omit acl and defaultObjectAcl properties." + // "Omit owner, acl and defaultObjectAcl properties." // ], // "location": "query", // "type": "string" @@ -3122,8 +3123,7 @@ func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) { // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } @@ -3162,23 +3162,21 @@ func (c *ChannelsStopCall) Context(ctx context.Context) *ChannelsStopCall { } func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "channels/stop") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.SetOpaque(req.URL) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.channels.stop" call. @@ -3249,20 +3247,19 @@ func (c *DefaultObjectAccessControlsDeleteCall) Context(ctx context.Context) *De } func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.defaultObjectAccessControls.delete" call. @@ -3355,23 +3352,22 @@ func (c *DefaultObjectAccessControlsGetCall) Context(ctx context.Context) *Defau } func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.defaultObjectAccessControls.get" call. @@ -3406,7 +3402,8 @@ func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (* HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -3480,25 +3477,23 @@ func (c *DefaultObjectAccessControlsInsertCall) Context(ctx context.Context) *De } func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.defaultObjectAccessControls.insert" call. @@ -3533,7 +3528,8 @@ func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -3628,22 +3624,21 @@ func (c *DefaultObjectAccessControlsListCall) Context(ctx context.Context) *Defa } func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.defaultObjectAccessControls.list" call. @@ -3678,7 +3673,8 @@ func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) ( HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -3759,26 +3755,24 @@ func (c *DefaultObjectAccessControlsPatchCall) Context(ctx context.Context) *Def } func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.defaultObjectAccessControls.patch" call. @@ -3813,7 +3807,8 @@ func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -3891,26 +3886,24 @@ func (c *DefaultObjectAccessControlsUpdateCall) Context(ctx context.Context) *De } func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "entity": c.entity, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.defaultObjectAccessControls.update" call. @@ -3945,7 +3938,8 @@ func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -4032,21 +4026,20 @@ func (c *ObjectAccessControlsDeleteCall) Context(ctx context.Context) *ObjectAcc } func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, "entity": c.entity, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objectAccessControls.delete" call. @@ -4162,24 +4155,23 @@ func (c *ObjectAccessControlsGetCall) Context(ctx context.Context) *ObjectAccess } func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, "entity": c.entity, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objectAccessControls.get" call. @@ -4214,7 +4206,8 @@ func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectA HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -4310,26 +4303,24 @@ func (c *ObjectAccessControlsInsertCall) Context(ctx context.Context) *ObjectAcc } func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objectAccessControls.insert" call. @@ -4364,7 +4355,8 @@ func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*Obje HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -4465,23 +4457,22 @@ func (c *ObjectAccessControlsListCall) Context(ctx context.Context) *ObjectAcces } func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objectAccessControls.list" call. @@ -4516,7 +4507,8 @@ func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*Object HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -4608,27 +4600,25 @@ func (c *ObjectAccessControlsPatchCall) Context(ctx context.Context) *ObjectAcce } func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, "entity": c.entity, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objectAccessControls.patch" call. @@ -4663,7 +4653,8 @@ func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*Objec HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -4764,27 +4755,25 @@ func (c *ObjectAccessControlsUpdateCall) Context(ctx context.Context) *ObjectAcc } func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, "entity": c.entity, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objectAccessControls.update" call. @@ -4819,7 +4808,8 @@ func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*Obje HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -4948,26 +4938,24 @@ func (c *ObjectsComposeCall) Context(ctx context.Context) *ObjectsComposeCall { } func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.composerequest) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{destinationBucket}/o/{destinationObject}/compose") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "destinationBucket": c.destinationBucket, "destinationObject": c.destinationObject, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Download fetches the API endpoint's "media" value, instead of the normal @@ -5018,7 +5006,8 @@ func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -5217,7 +5206,7 @@ func (c *ObjectsCopyCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationN // // Possible values: // "full" - Include all properties. -// "noAcl" - Omit the acl property. +// "noAcl" - Omit the owner, acl property. func (c *ObjectsCopyCall) Projection(projection string) *ObjectsCopyCall { c.urlParams_.Set("projection", projection) return c @@ -5248,28 +5237,26 @@ func (c *ObjectsCopyCall) Context(ctx context.Context) *ObjectsCopyCall { } func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "sourceBucket": c.sourceBucket, "sourceObject": c.sourceObject, "destinationBucket": c.destinationBucket, "destinationObject": c.destinationObject, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Download fetches the API endpoint's "media" value, instead of the normal @@ -5320,7 +5307,8 @@ func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -5424,7 +5412,7 @@ func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) { // ], // "enumDescriptions": [ // "Include all properties.", - // "Omit the acl property." + // "Omit the owner, acl property." // ], // "location": "query", // "type": "string" @@ -5544,20 +5532,19 @@ func (c *ObjectsDeleteCall) Context(ctx context.Context) *ObjectsDeleteCall { } func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("DELETE", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objects.delete" call. @@ -5699,7 +5686,7 @@ func (c *ObjectsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64 // // Possible values: // "full" - Include all properties. -// "noAcl" - Omit the acl property. +// "noAcl" - Omit the owner, acl property. func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall { c.urlParams_.Set("projection", projection) return c @@ -5732,23 +5719,22 @@ func (c *ObjectsGetCall) Context(ctx context.Context) *ObjectsGetCall { } func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Download fetches the API endpoint's "media" value, instead of the normal @@ -5799,7 +5785,8 @@ func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -5862,7 +5849,7 @@ func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) { // ], // "enumDescriptions": [ // "Include all properties.", - // "Omit the acl property." + // "Omit the owner, acl property." // ], // "location": "query", // "type": "string" @@ -5893,7 +5880,7 @@ type ObjectsInsertCall struct { object *Object urlParams_ gensupport.URLParams media_ io.Reader - resumableBuffer_ *gensupport.ResumableBuffer + mediaBuffer_ *gensupport.MediaBuffer mediaType_ string mediaSize_ int64 // mediaSize, if known. Used only for calls to progressUpdater_. progressUpdater_ googleapi.ProgressUpdater @@ -5987,7 +5974,7 @@ func (c *ObjectsInsertCall) PredefinedAcl(predefinedAcl string) *ObjectsInsertCa // // Possible values: // "full" - Include all properties. -// "noAcl" - Omit the acl property. +// "noAcl" - Omit the owner, acl property. func (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall { c.urlParams_.Set("projection", projection) return c @@ -6007,7 +5994,7 @@ func (c *ObjectsInsertCall) Media(r io.Reader, options ...googleapi.MediaOption) if !opts.ForceEmptyContentType { r, c.mediaType_ = gensupport.DetermineContentType(r, opts.ContentType) } - c.media_, c.resumableBuffer_ = gensupport.PrepareUpload(r, chunkSize) + c.media_, c.mediaBuffer_ = gensupport.PrepareUpload(r, chunkSize) return c } @@ -6024,7 +6011,7 @@ func (c *ObjectsInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, s c.ctx_ = ctx rdr := gensupport.ReaderAtToReader(r, size) rdr, c.mediaType_ = gensupport.DetermineContentType(rdr, mediaType) - c.resumableBuffer_ = gensupport.NewResumableBuffer(rdr, googleapi.DefaultUploadChunkSize) + c.mediaBuffer_ = gensupport.NewMediaBuffer(rdr, googleapi.DefaultUploadChunkSize) c.media_ = nil c.mediaSize_ = size return c @@ -6058,42 +6045,44 @@ func (c *ObjectsInsertCall) Context(ctx context.Context) *ObjectsInsertCall { } func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") - if c.media_ != nil || c.resumableBuffer_ != nil { + if c.media_ != nil || c.mediaBuffer_ != nil { urls = strings.Replace(urls, "https://www.googleapis.com/", "https://www.googleapis.com/upload/", 1) protocol := "multipart" - if c.resumableBuffer_ != nil { + if c.mediaBuffer_ != nil { protocol = "resumable" } c.urlParams_.Set("uploadType", protocol) } - urls += "?" + c.urlParams_.Encode() + if body == nil { + body = new(bytes.Buffer) + reqHeaders.Set("Content-Type", "application/json") + } if c.media_ != nil { - var combined io.ReadCloser - combined, ctype = gensupport.CombineBodyMedia(body, ctype, c.media_, c.mediaType_) + combined, ctype := gensupport.CombineBodyMedia(body, "application/json", c.media_, c.mediaType_) defer combined.Close() + reqHeaders.Set("Content-Type", ctype) body = combined } + if c.mediaBuffer_ != nil && c.mediaType_ != "" { + reqHeaders.Set("X-Upload-Content-Type", c.mediaType_) + } + urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) - if c.resumableBuffer_ != nil && c.mediaType_ != "" { - req.Header.Set("X-Upload-Content-Type", c.mediaType_) - } - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objects.insert" call. @@ -6105,9 +6094,7 @@ func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { // was returned. func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { gensupport.SetOptions(c.urlParams_, opts...) - res, err := gensupport.Retry(c.ctx_, func() (*http.Response, error) { - return c.doRequest("json") - }, gensupport.DefaultBackoffStrategy()) + res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() @@ -6124,13 +6111,13 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { if err := googleapi.CheckResponse(res); err != nil { return nil, err } - if c.resumableBuffer_ != nil { + if c.mediaBuffer_ != nil { loc := res.Header.Get("Location") rx := &gensupport.ResumableUpload{ Client: c.s.client, UserAgent: c.s.userAgent(), URI: loc, - Media: c.resumableBuffer_, + Media: c.mediaBuffer_, MediaType: c.mediaType_, Callback: func(curr int64) { if c.progressUpdater_ != nil { @@ -6157,7 +6144,8 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -6253,7 +6241,7 @@ func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) { // ], // "enumDescriptions": [ // "Include all properties.", - // "Omit the acl property." + // "Omit the owner, acl property." // ], // "location": "query", // "type": "string" @@ -6335,7 +6323,7 @@ func (c *ObjectsListCall) Prefix(prefix string) *ObjectsListCall { // // Possible values: // "full" - Include all properties. -// "noAcl" - Omit the acl property. +// "noAcl" - Omit the owner, acl property. func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall { c.urlParams_.Set("projection", projection) return c @@ -6376,22 +6364,21 @@ func (c *ObjectsListCall) Context(ctx context.Context) *ObjectsListCall { } func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) + if c.ifNoneMatch_ != "" { + reqHeaders.Set("If-None-Match", c.ifNoneMatch_) + } var body io.Reader = nil c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("GET", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ifNoneMatch_ != "" { - req.Header.Set("If-None-Match", c.ifNoneMatch_) - } - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objects.list" call. @@ -6426,7 +6413,8 @@ func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -6474,7 +6462,7 @@ func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) { // ], // "enumDescriptions": [ // "Include all properties.", - // "Omit the acl property." + // "Omit the owner, acl property." // ], // "location": "query", // "type": "string" @@ -6609,7 +6597,7 @@ func (c *ObjectsPatchCall) PredefinedAcl(predefinedAcl string) *ObjectsPatchCall // // Possible values: // "full" - Include all properties. -// "noAcl" - Omit the acl property. +// "noAcl" - Omit the owner, acl property. func (c *ObjectsPatchCall) Projection(projection string) *ObjectsPatchCall { c.urlParams_.Set("projection", projection) return c @@ -6632,26 +6620,24 @@ func (c *ObjectsPatchCall) Context(ctx context.Context) *ObjectsPatchCall { } func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PATCH", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objects.patch" call. @@ -6686,7 +6672,8 @@ func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -6770,7 +6757,7 @@ func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { // ], // "enumDescriptions": [ // "Include all properties.", - // "Omit the acl property." + // "Omit the owner, acl property." // ], // "location": "query", // "type": "string" @@ -6785,8 +6772,7 @@ func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) { // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/devstorage.full_control" // ] // } @@ -6928,7 +6914,7 @@ func (c *ObjectsRewriteCall) MaxBytesRewrittenPerCall(maxBytesRewrittenPerCall i // // Possible values: // "full" - Include all properties. -// "noAcl" - Omit the acl property. +// "noAcl" - Omit the owner, acl property. func (c *ObjectsRewriteCall) Projection(projection string) *ObjectsRewriteCall { c.urlParams_.Set("projection", projection) return c @@ -6970,28 +6956,26 @@ func (c *ObjectsRewriteCall) Context(ctx context.Context) *ObjectsRewriteCall { } func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "sourceBucket": c.sourceBucket, "sourceObject": c.sourceObject, "destinationBucket": c.destinationBucket, "destinationObject": c.destinationObject, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objects.rewrite" call. @@ -7026,7 +7010,8 @@ func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -7136,7 +7121,7 @@ func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, // ], // "enumDescriptions": [ // "Include all properties.", - // "Omit the acl property." + // "Omit the owner, acl property." // ], // "location": "query", // "type": "string" @@ -7267,7 +7252,7 @@ func (c *ObjectsUpdateCall) PredefinedAcl(predefinedAcl string) *ObjectsUpdateCa // // Possible values: // "full" - Include all properties. -// "noAcl" - Omit the acl property. +// "noAcl" - Omit the owner, acl property. func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall { c.urlParams_.Set("projection", projection) return c @@ -7290,26 +7275,24 @@ func (c *ObjectsUpdateCall) Context(ctx context.Context) *ObjectsUpdateCall { } func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("PUT", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, "object": c.object, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Download fetches the API endpoint's "media" value, instead of the normal @@ -7360,7 +7343,8 @@ func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -7444,7 +7428,7 @@ func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { // ], // "enumDescriptions": [ // "Include all properties.", - // "Omit the acl property." + // "Omit the owner, acl property." // ], // "location": "query", // "type": "string" @@ -7459,8 +7443,7 @@ func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) { // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", - // "https://www.googleapis.com/auth/devstorage.full_control", - // "https://www.googleapis.com/auth/devstorage.read_write" + // "https://www.googleapis.com/auth/devstorage.full_control" // ], // "supportsMediaDownload": true, // "useMediaDownloadService": true @@ -7526,7 +7509,7 @@ func (c *ObjectsWatchAllCall) Prefix(prefix string) *ObjectsWatchAllCall { // // Possible values: // "full" - Include all properties. -// "noAcl" - Omit the acl property. +// "noAcl" - Omit the owner, acl property. func (c *ObjectsWatchAllCall) Projection(projection string) *ObjectsWatchAllCall { c.urlParams_.Set("projection", projection) return c @@ -7557,25 +7540,23 @@ func (c *ObjectsWatchAllCall) Context(ctx context.Context) *ObjectsWatchAllCall } func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { + reqHeaders := make(http.Header) + reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel) if err != nil { return nil, err } - ctype := "application/json" + reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/watch") urls += "?" + c.urlParams_.Encode() req, _ := http.NewRequest("POST", urls, body) + req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "bucket": c.bucket, }) - req.Header.Set("Content-Type", ctype) - req.Header.Set("User-Agent", c.s.userAgent()) - if c.ctx_ != nil { - return ctxhttp.Do(c.ctx_, c.s.client, req) - } - return c.s.client.Do(req) + return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "storage.objects.watchAll" call. @@ -7610,7 +7591,8 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) HTTPStatusCode: res.StatusCode, }, } - if err := json.NewDecoder(res.Body).Decode(&ret); err != nil { + target := &ret + if err := json.NewDecoder(res.Body).Decode(target); err != nil { return nil, err } return ret, nil @@ -7658,7 +7640,7 @@ func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) // ], // "enumDescriptions": [ // "Include all properties.", - // "Omit the acl property." + // "Omit the owner, acl property." // ], // "location": "query", // "type": "string" diff --git a/vendor/vendor.json b/vendor/vendor.json index db04e37e7..f943a74e2 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -1919,8 +1919,10 @@ "revision": "5eaf0df67e70d6997a9fe0ed24383fa1b01638d3" }, { + "checksumSHA1": "SjcL6w27LsP7xLQe9V068FO3qWI=", "path": "google.golang.org/api/compute/v1", - "revision": "43c645d4bcf9251ced36c823a93b6d198764aae4" + "revision": "fa0566afd4c8fdae644725fdf9b57b5851a20742", + "revisionTime": "2016-07-18T05:58:24Z" }, { "path": "google.golang.org/api/container/v1", @@ -1931,8 +1933,10 @@ "revision": "43c645d4bcf9251ced36c823a93b6d198764aae4" }, { + "checksumSHA1": "SLzHstPylt3EcBt9yEBJV+JqGp4=", "path": "google.golang.org/api/gensupport", - "revision": "43c645d4bcf9251ced36c823a93b6d198764aae4" + "revision": "fa0566afd4c8fdae644725fdf9b57b5851a20742", + "revisionTime": "2016-07-18T05:58:24Z" }, { "path": "google.golang.org/api/googleapi", @@ -1951,8 +1955,10 @@ "revision": "43c645d4bcf9251ced36c823a93b6d198764aae4" }, { + "checksumSHA1": "xIEDa8ZDicVplvLtQUHc9eVZays=", "path": "google.golang.org/api/storage/v1", - "revision": "43c645d4bcf9251ced36c823a93b6d198764aae4" + "revision": "fa0566afd4c8fdae644725fdf9b57b5851a20742", + "revisionTime": "2016-07-18T05:58:24Z" }, { "path": "google.golang.org/appengine", From a8e8922ea84c5496dfce255b20e6678d07f55d4b Mon Sep 17 00:00:00 2001 From: Greg Aker Date: Thu, 28 Jul 2016 15:38:09 -0500 Subject: [PATCH 0471/1238] Add enable_cdn to google_compute_backend_service. Add the ability to add/remove the Cloud CDN configuration option on a backend service. --- .../google/resource_compute_backend_service.go | 14 ++++++++++++++ .../google/r/compute_backend_service.html.markdown | 3 +++ 2 files changed, 17 insertions(+) diff --git a/builtin/providers/google/resource_compute_backend_service.go b/builtin/providers/google/resource_compute_backend_service.go index 94bc23439..bef3e5b60 100644 --- a/builtin/providers/google/resource_compute_backend_service.go +++ b/builtin/providers/google/resource_compute_backend_service.go @@ -88,6 +88,11 @@ func resourceComputeBackendService() *schema.Resource { Optional: true, }, + "enable_cdn": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "fingerprint": &schema.Schema{ Type: schema.TypeString, Computed: true, @@ -165,6 +170,10 @@ func resourceComputeBackendServiceCreate(d *schema.ResourceData, meta interface{ service.TimeoutSec = int64(v.(int)) } + if v, ok := d.GetOk("enable_cdn"); ok { + service.EnableCDN = v.(bool) + } + project, err := getProject(d, config) if err != nil { return err @@ -212,6 +221,7 @@ func resourceComputeBackendServiceRead(d *schema.ResourceData, meta interface{}) } d.Set("description", service.Description) + d.Set("enable_cdn", service.EnableCDN) d.Set("port_name", service.PortName) d.Set("protocol", service.Protocol) d.Set("timeout_sec", service.TimeoutSec) @@ -260,6 +270,10 @@ func resourceComputeBackendServiceUpdate(d *schema.ResourceData, meta interface{ service.TimeoutSec = int64(d.Get("timeout_sec").(int)) } + if d.HasChange("enable_cdn") { + service.EnableCDN = d.Get("enable_cdn").(bool) + } + log.Printf("[DEBUG] Updating existing Backend Service %q: %#v", d.Id(), service) op, err := config.clientCompute.BackendServices.Update( project, d.Id(), &service).Do() diff --git a/website/source/docs/providers/google/r/compute_backend_service.html.markdown b/website/source/docs/providers/google/r/compute_backend_service.html.markdown index 9bfbe3bdc..4b578fdc7 100644 --- a/website/source/docs/providers/google/r/compute_backend_service.html.markdown +++ b/website/source/docs/providers/google/r/compute_backend_service.html.markdown @@ -19,6 +19,7 @@ resource "google_compute_backend_service" "foobar" { port_name = "http" protocol = "HTTP" timeout_sec = 10 + enable_cdn = false backend { group = "${google_compute_instance_group_manager.foo.instance_group}" @@ -74,6 +75,8 @@ The following arguments are supported: * `description` - (Optional) The textual description for the backend service. +* `enable_cdn` - (Optional) Whether or not to enable the Cloud CDN on the backend service. + * `port_name` - (Optional) The name of a service that has been added to an instance group in this backend. See [related docs](https://cloud.google.com/compute/docs/instance-groups/#specifying_service_endpoints) for details. Defaults to http. From d50aeeef0d85d7f1d094fc01d943bd438f2398fe Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Mon, 11 Jul 2016 17:37:51 -0500 Subject: [PATCH 0472/1238] website: Docs sweep for lists & maps --- .../source/docs/commands/apply.html.markdown | 14 +- .../source/docs/commands/output.html.markdown | 10 +- .../source/docs/commands/plan.html.markdown | 12 +- .../source/docs/commands/push.html.markdown | 2 +- .../docs/commands/refresh.html.markdown | 14 +- .../environment-variables.html.md | 2 + .../docs/configuration/interpolation.html.md | 15 +- .../source/docs/configuration/modules.html.md | 8 +- .../source/docs/configuration/outputs.html.md | 18 +-- .../docs/configuration/resources.html.md | 3 +- .../source/docs/configuration/syntax.html.md | 57 ++++---- .../docs/configuration/variables.html.md | 136 ++++++++++++------ .../source/docs/internals/debugging.html.md | 4 +- .../source/docs/modules/usage.html.markdown | 44 ++---- .../docker/r/container.html.markdown | 2 +- .../intro/getting-started/variables.html.md | 79 +++++----- 16 files changed, 239 insertions(+), 181 deletions(-) diff --git a/website/source/docs/commands/apply.html.markdown b/website/source/docs/commands/apply.html.markdown index 6f9a35e2f..67626df2b 100644 --- a/website/source/docs/commands/apply.html.markdown +++ b/website/source/docs/commands/apply.html.markdown @@ -52,11 +52,13 @@ The command-line flags are all optional. The list of available flags are: be limited to this resource and its dependencies. This flag can be used multiple times. -* `-var 'foo=bar'` - Set a variable in the Terraform configuration. This - flag can be set multiple times. +* `-var 'foo=bar'` - Set a variable in the Terraform configuration. This flag + can be set multiple times. Variable values are interpreted as + [HCL](/docs/configuration/syntax.html#HCL), so list and map values can be + specified via this flag. * `-var-file=foo` - Set variables in the Terraform configuration from - a file. If "terraform.tfvars" is present, it will be automatically - loaded first. Any files specified by `-var-file` override any values - in a "terraform.tfvars". This flag can be used multiple times. - + a [variable file](/docs/configuration/variables.html#variable-files). If + "terraform.tfvars" is present, it will be automatically loaded first. Any + files specified by `-var-file` override any values in a "terraform.tfvars". + This flag can be used multiple times. diff --git a/website/source/docs/commands/output.html.markdown b/website/source/docs/commands/output.html.markdown index b284c79a5..7ddd7546a 100644 --- a/website/source/docs/commands/output.html.markdown +++ b/website/source/docs/commands/output.html.markdown @@ -13,16 +13,16 @@ an output variable from the state file. ## Usage -Usage: `terraform output [options] NAME` +Usage: `terraform output [options] [NAME]` -By default, `output` requires only a variable name and looks in the -current directory for the state file to query. +With no additional arguments, `output` will display all the outputs for the root module. +If an output `NAME` is specified, only the value of that output is printed. The command-line flags are all optional. The list of available flags are: * `-json` - If specified, the outputs are formatted as a JSON object, with - a key per output. This can be piped into tools such as `jq` for further - processing. + a key per output. If `NAME` is specified, only the output specified will be + returned. This can be piped into tools such as `jq` for further processing. * `-state=path` - Path to the state file. Defaults to "terraform.tfstate". * `-module=module_name` - The module path which has needed output. By default this is the root path. Other modules can be specified by diff --git a/website/source/docs/commands/plan.html.markdown b/website/source/docs/commands/plan.html.markdown index 8100a83f3..df0987312 100644 --- a/website/source/docs/commands/plan.html.markdown +++ b/website/source/docs/commands/plan.html.markdown @@ -57,12 +57,16 @@ The command-line flags are all optional. The list of available flags are: be limited to this resource and its dependencies. This flag can be used multiple times. -* `-var 'foo=bar'` - Set a variable in the Terraform configuration. This - flag can be set multiple times. +* `-var 'foo=bar'` - Set a variable in the Terraform configuration. This flag + can be set multiple times. Variable values are interpreted as + [HCL](/docs/configuration/syntax.html#HCL), so list and map values can be + specified via this flag. * `-var-file=foo` - Set variables in the Terraform configuration from - a file. If "terraform.tfvars" is present, it will be automatically - loaded if this flag is not specified. This flag can be used multiple times. + a [variable file](/docs/configuration/variables.html#variable-files). If + "terraform.tfvars" is present, it will be automatically loaded first. Any + files specified by `-var-file` override any values in a "terraform.tfvars". + This flag can be used multiple times. ## Security Warning diff --git a/website/source/docs/commands/push.html.markdown b/website/source/docs/commands/push.html.markdown index 8b2f59907..eacf83488 100644 --- a/website/source/docs/commands/push.html.markdown +++ b/website/source/docs/commands/push.html.markdown @@ -59,7 +59,7 @@ The command-line flags are all optional. The list of available flags are: send the local value to Atlas. This flag can be repeated multiple times. * `-token=` - Atlas API token to use to authorize the upload. - If blank or unspecified, the `ATLAS_TOKEN` environmental variable + If blank or unspecified, the `ATLAS_TOKEN` environment variable will be used. * `-var='foo=bar'` - Set the value of a variable for the Terraform configuration. diff --git a/website/source/docs/commands/refresh.html.markdown b/website/source/docs/commands/refresh.html.markdown index 10db2d7f9..064583437 100644 --- a/website/source/docs/commands/refresh.html.markdown +++ b/website/source/docs/commands/refresh.html.markdown @@ -41,11 +41,13 @@ The command-line flags are all optional. The list of available flags are: be limited to this resource and its dependencies. This flag can be used multiple times. -* `-var 'foo=bar'` - Set a variable in the Terraform configuration. This - flag can be set multiple times. +* `-var 'foo=bar'` - Set a variable in the Terraform configuration. This flag + can be set multiple times. Variable values are interpreted as + [HCL](/docs/configuration/syntax.html#HCL), so list and map values can be + specified via this flag. * `-var-file=foo` - Set variables in the Terraform configuration from - a file. If "terraform.tfvars" is present, it will be automatically - loaded if this flag is not specified. This flag can be used multiple times. - - + a [variable file](/docs/configuration/variables.html#variable-files). If + "terraform.tfvars" is present, it will be automatically loaded first. Any + files specified by `-var-file` override any values in a "terraform.tfvars". + This flag can be used multiple times. diff --git a/website/source/docs/configuration/environment-variables.html.md b/website/source/docs/configuration/environment-variables.html.md index 8182f6907..30e3b421a 100644 --- a/website/source/docs/configuration/environment-variables.html.md +++ b/website/source/docs/configuration/environment-variables.html.md @@ -59,6 +59,8 @@ Environment variables can be used to set variables. The environment variables mu ``` export TF_VAR_region=us-west-1 export TF_VAR_ami=ami-049d8641 +export TF_VAR_alist='[1,2,3]' +export TF_VAR_amap='{ foo = "bar", baz = "qux" }' ``` For more on how to use `TF_VAR_name` in context, check out the section on [Variable Configuration](/docs/configuration/variables.html). diff --git a/website/source/docs/configuration/interpolation.html.md b/website/source/docs/configuration/interpolation.html.md index 71b9e150b..2d3e42672 100644 --- a/website/source/docs/configuration/interpolation.html.md +++ b/website/source/docs/configuration/interpolation.html.md @@ -9,14 +9,13 @@ description: |- # Interpolation Syntax Embedded within strings in Terraform, whether you're using the -Terraform syntax or JSON syntax, you can interpolate other values -into strings. These interpolations are wrapped in `${}`, such as -`${var.foo}`. +Terraform syntax or JSON syntax, you can interpolate other values. These +interpolations are wrapped in `${}`, such as `${var.foo}`. The interpolation syntax is powerful and allows you to reference variables, attributes of resources, call functions, etc. -You can also perform simple math in interpolations, allowing +You can also perform [simple math](#math) in interpolations, allowing you to write expressions such as `${count.index + 1}`. You can escape interpolation with double dollar signs: `$${foo}` @@ -26,11 +25,11 @@ will be rendered as a literal `${foo}`. **To reference user variables**, use the `var.` prefix followed by the variable name. For example, `${var.foo}` will interpolate the -`foo` variable value. If the variable is a mapping, then you +`foo` variable value. If the variable is a map, then you can reference static keys in the map with the syntax `var.MAP.KEY`. For example, `${var.amis.us-east-1}` would get the value of the `us-east-1` key within the `amis` variable -that is a mapping. +that is a map. **To reference attributes of your own resource**, the syntax is `self.ATTRIBUTE`. For example `${self.private_ip_address}` will @@ -177,7 +176,7 @@ The supported built-in functions are: * `${list("a", "b", "c")}` returns a list of `"a", "b", "c"`. * `${list()}` returns an empty list. - * `lookup(map, key [, default])` - Performs a dynamic lookup into a mapping + * `lookup(map, key [, default])` - Performs a dynamic lookup into a map variable. The `map` parameter should be another variable, such as `var.amis`. If `key` does not exist in `map`, the interpolation will fail unless you specify a third argument, `default`, which should be a @@ -306,6 +305,8 @@ use in combination with our list of `aws_instance.web` resources. ## Math + + Simple math can be performed in interpolations: ``` diff --git a/website/source/docs/configuration/modules.html.md b/website/source/docs/configuration/modules.html.md index 0cc47461a..3d94b41d0 100644 --- a/website/source/docs/configuration/modules.html.md +++ b/website/source/docs/configuration/modules.html.md @@ -45,10 +45,10 @@ in the [module section](/docs/modules/index.html). Other configuration within the module are dependent on the module itself. -Because module configuration maps directly to -[variables](/docs/configuration/variables.html) within the module, they -are always simple key and string values. Complex structures are not used -for modules. +Module configuration maps directly to +[variables](/docs/configuration/variables.html) within the module, so +parameters can have any of the data types that variables support, including +lists and maps. ## Syntax diff --git a/website/source/docs/configuration/outputs.html.md b/website/source/docs/configuration/outputs.html.md index 762e00fcb..f3bc68e6b 100644 --- a/website/source/docs/configuration/outputs.html.md +++ b/website/source/docs/configuration/outputs.html.md @@ -30,7 +30,7 @@ An output configuration looks like the following: ``` output "address" { - value = "${aws_instance.web.public_dns}" + value = "${aws_instance.web.public_dns}" } ``` @@ -44,9 +44,11 @@ the output variable. Within the block (the `{ }`) is configuration for the output. These are the parameters that can be set: - * `value` (required, string) - The value of the output. This must - be a string. This usually includes an interpolation since outputs - that are static aren't usually useful. + * `value` (required) - The value of the output. This can be a string, list, + or map. This usually includes an interpolation since outputs that are + static aren't usually useful. + + * `sensitive` (optional, boolean) - See below. ## Syntax @@ -54,7 +56,7 @@ The full syntax is: ``` output NAME { - value = VALUE + value = VALUE } ``` @@ -65,8 +67,8 @@ Outputs can be marked as containing sensitive material by setting the ``` output "sensitive" { - sensitive = true - value = VALUE + sensitive = true + value = VALUE } ``` @@ -80,4 +82,4 @@ displayed in place of their value. state, and available using the `terraform output` command, so cannot be relied on as a sole means of protecting values. * Sensitivity is not tracked internally, so if the output is interpolated in - another module into a resource, the value will be displayed. + another module into a resource, the value will be displayed. diff --git a/website/source/docs/configuration/resources.html.md b/website/source/docs/configuration/resources.html.md index 07cde03a6..8606ef640 100644 --- a/website/source/docs/configuration/resources.html.md +++ b/website/source/docs/configuration/resources.html.md @@ -122,7 +122,8 @@ When declaring multiple instances of a resource using [`count`](#count), it is common to want each instance to have a different value for a given attribute. You can use the `${count.index}` -[interpolation](/docs/configuration/interpolation.html) along with a mapping [variable](/docs/configuration/variables.html) to accomplish this. +[interpolation](/docs/configuration/interpolation.html) along with a map +[variable](/docs/configuration/variables.html) to accomplish this. For example, here's how you could create three [AWS Instances](/docs/providers/aws/r/instance.html) each with their own static IP address: diff --git a/website/source/docs/configuration/syntax.html.md b/website/source/docs/configuration/syntax.html.md index fee54f875..fc9be1faf 100644 --- a/website/source/docs/configuration/syntax.html.md +++ b/website/source/docs/configuration/syntax.html.md @@ -8,32 +8,35 @@ description: |- # Configuration Syntax -The syntax of Terraform configurations is custom. It is meant to -strike a balance between human readable and editable as well as being -machine-friendly. For machine-friendliness, Terraform can also -read JSON configurations. For general Terraform configurations, -however, we recommend using the Terraform syntax. + + +The syntax of Terraform configurations is called [HashiCorp Configuration +Language (HCL)](https://github.com/hashicorp/hcl). It is meant to strike a +balance between human readable and editable as well as being machine-friendly. +For machine-friendliness, Terraform can also read JSON configurations. For +general Terraform configurations, however, we recommend using the HCL Terraform +syntax. ## Terraform Syntax -Here is an example of Terraform syntax: +Here is an example of Terraform's HCL syntax: ``` # An AMI variable "ami" { - description = "the AMI to use" + description = "the AMI to use" } /* A multi line comment. */ resource "aws_instance" "web" { - ami = "${var.ami}" - count = 2 - source_dest_check = false + ami = "${var.ami}" + count = 2 + source_dest_check = false - connection { - user = "root" - } + connection { + user = "root" + } } ``` @@ -44,15 +47,14 @@ Basic bullet point reference: * Multi-line comments are wrapped with `/*` and `*/` * Values are assigned with the syntax of `key = value` (whitespace - doesn't matter). The value can be any primitive: a string, - number, or boolean. + doesn't matter). The value can be any primitive (string, + number, boolean), a list, or a map. * Strings are in double-quotes. * Strings can interpolate other values using syntax wrapped in `${}`, such as `${var.foo}`. The full syntax for interpolation - is - [documented here](/docs/configuration/interpolation.html). + is [documented here](/docs/configuration/interpolation.html). * Multiline strings can use shell-style "here doc" syntax, with the string starting with a marker like `< + Variables can be collected in files and passed all at once using the `-var-file=foo.tfvars` flag. The format for variables in `.tfvars` -files is: +files is [HCL](/docs/configuration/syntax.html#HCL), with top level key/value +pairs: + ``` foo = "bar" xyz = "abc" +somelist = [ + "one", + "two", +] +somemap = { + foo = "bar" + bax = "qux" +} ``` The flag can be used multiple times per command invocation: @@ -167,8 +219,8 @@ The flag can be used multiple times per command invocation: terraform apply -var-file=foo.tfvars -var-file=bar.tfvars ``` -**Note** If a variable is defined in more than one file passed, the last -variable file (reading left to right) will be the definition used. Put more +**Note** If a variable is defined in more than one file passed, the last +variable file (reading left to right) will be the definition used. Put more simply, the last time a variable is defined is the one which will be used. ### Precedence example: @@ -193,5 +245,3 @@ terraform apply -var-file=foo.tfvars -var-file=bar.tfvars The result will be that `baz` will contain the value `bar` because `bar.tfvars` has the last definition loaded. - - diff --git a/website/source/docs/internals/debugging.html.md b/website/source/docs/internals/debugging.html.md index cd50edf5e..b19000b39 100644 --- a/website/source/docs/internals/debugging.html.md +++ b/website/source/docs/internals/debugging.html.md @@ -3,12 +3,12 @@ layout: "docs" page_title: "Debugging" sidebar_current: "docs-internals-debug" description: |- - Terraform has detailed logs which can be enabled by setting the TF_LOG environmental variable to any value. This will cause detailed logs to appear on stderr + Terraform has detailed logs which can be enabled by setting the TF_LOG environment variable to any value. This will cause detailed logs to appear on stderr --- # Debugging Terraform -Terraform has detailed logs which can be enabled by setting the `TF_LOG` environmental variable to any value. This will cause detailed logs to appear on stderr. +Terraform has detailed logs which can be enabled by setting the `TF_LOG` environment variable to any value. This will cause detailed logs to appear on stderr. You can set `TF_LOG` to one of the log levels `TRACE`, `DEBUG`, `INFO`, `WARN` or `ERROR` to change the verbosity of the logs. `TRACE` is the most verbose and it is the default if `TF_LOG` is set to something other than a log level name. diff --git a/website/source/docs/modules/usage.html.markdown b/website/source/docs/modules/usage.html.markdown index 8dfb742fb..b36e6d89b 100644 --- a/website/source/docs/modules/usage.html.markdown +++ b/website/source/docs/modules/usage.html.markdown @@ -11,8 +11,8 @@ Using modules in Terraform is very similar to defining resources: ``` module "consul" { - source = "github.com/hashicorp/consul/terraform/aws" - servers = 3 + source = "github.com/hashicorp/consul/terraform/aws" + servers = 3 } ``` @@ -36,12 +36,12 @@ You can instantiate a module multiple times. # my_buckets.tf module "assets_bucket" { source = "./publish_bucket" - name = "assets" + name = "assets" } module "media_bucket" { source = "./publish_bucket" - name = "media" + name = "media" } ``` ``` @@ -50,10 +50,12 @@ module "media_bucket" { variable "name" {} # this is the input parameter of the module resource "aws_s3_bucket" "the_bucket" { -... + # ... +} resource "aws_iam_user" "deploy_user" { -... + # ... +} ``` In this example you can provide module implementation in the `./publish_bucket` @@ -71,7 +73,7 @@ The full name of the resulting resources will be `module.assets_bucket.aws_s3_bu and `module.assets_bucket.aws_iam_access_key.deploy_user`. So beware, if you extract your implementation to a module. The resource names will change and this will lead to destroying s3 buckets and creating new ones - so always -check with `tf plan` before running `tf apply`. +check with `tf plan` before running `tf apply`. ## Source @@ -101,28 +103,8 @@ above, map directly to [variables](/docs/configuration/variables.html) within the module itself. Therefore, you can quickly discover all the configuration for a module by inspecting the source of it very easily. -Additionally, because these map directly to variables, they're always simple -key/value pairs. Modules can't have complex variable inputs. - -## Dealing with parameters of the list type - -Variables are currently unable to hold the list type. Sometimes, though, it's -desirable to parameterize a module's resource with an attribute that is of the -list type, for example `aws_instance.security_groups`. - -Until a future release broadens the functionality of variables to include list -types, the way to work around this limitation is to pass a delimited string as -a module parameter, and then "unpack" that parameter using -[`split`](/docs/configuration/interpolation.html) interpolation function within -the module definition. - -Depending on the resource parameter in question, you may have to -indicate that the unpacked string is actually a list by using list notation. -For example: - -``` -resource_param = ["${split(",", var.CSV_STRING)}"] -``` +Additionally, because these map directly to variables, module configuration can +have any data type supported by variables, including maps and lists. ## Outputs @@ -132,8 +114,8 @@ For example: ``` resource "aws_instance" "client" { - ami = "ami-408c7f28" - instance_type = "t1.micro" + ami = "ami-408c7f28" + instance_type = "t1.micro" availability_zone = "${module.consul.server_availability_zone}" } ``` diff --git a/website/source/docs/providers/docker/r/container.html.markdown b/website/source/docs/providers/docker/r/container.html.markdown index c4beac8cc..4dbffefd8 100644 --- a/website/source/docs/providers/docker/r/container.html.markdown +++ b/website/source/docs/providers/docker/r/container.html.markdown @@ -48,7 +48,7 @@ The following arguments are supported: * `dns` - (Optional, set of strings) Set of DNS servers. * `dns_opts` - (Optional, set of strings) Set of DNS options used by the DNS provider(s), see `resolv.conf` documentation for valid list of options. * `dns_search` - (Optional, set of strings) Set of DNS search domains that are used when bare unqualified hostnames are used inside of the container. -* `env` - (Optional, set of strings) Environmental variables to set. +* `env` - (Optional, set of strings) Environment variables to set. * `labels` - (Optional, map of strings) Key/value pairs to set as labels on the container. * `links` - (Optional, set of strings) Set of links for link based diff --git a/website/source/intro/getting-started/variables.html.md b/website/source/intro/getting-started/variables.html.md index 156d39fcc..5c5b86f98 100644 --- a/website/source/intro/getting-started/variables.html.md +++ b/website/source/intro/getting-started/variables.html.md @@ -25,7 +25,7 @@ since Terraform loads all files ending in `.tf` in a directory. variable "access_key" {} variable "secret_key" {} variable "region" { - default = "us-east-1" + default = "us-east-1" } ``` @@ -41,9 +41,9 @@ Next, replace the AWS provider configuration with the following: ``` provider "aws" { - access_key = "${var.access_key}" - secret_key = "${var.secret_key}" - region = "${var.region}" + access_key = "${var.access_key}" + secret_key = "${var.secret_key}" + region = "${var.region}" } ``` @@ -57,11 +57,6 @@ There are multiple ways to assign variables. Below is also the order in which variable values are chosen. If they're found in an option first below, then the options below are ignored. -**UI Input:** If you execute `terraform plan` or apply without doing -anything, Terraform will ask you to input the variables interactively. -These variables are not saved, but provides a nice user experience for -getting started with Terraform. - **Command-line flags:** You can set it directly on the command-line with the `-var` flag. Any command in Terraform that inspects the configuration accepts this flag, such as `apply`, `plan`, and `refresh`: @@ -107,38 +102,51 @@ $ terraform plan \ -var-file="production.tfvars" ``` +**UI Input:** If you execute `terraform plan` or apply without doing +anything, Terraform will ask you to input the variables interactively. +These variables are not saved, but provides a nice user experience for +getting started with Terraform. (UI Input is only supported for string +variables - list and map variables must be populated via one of the +other mechanisms. + +**Variable Defaults**: If no value is assigned to a variable via any of these +methods and the variable has a `default` key in its declaration, that value +will be used for the variable. + -## Mappings + +## Maps We've replaced our sensitive strings with variables, but we still are hardcoding AMIs. Unfortunately, AMIs are specific to the region that is in use. One option is to just ask the user to input the proper AMI for the region, but Terraform can do better than that with -_mappings_. +_maps_. -Mappings are a way to create variables that are lookup tables. An example -will show this best. Let's extract our AMIs into a mapping and add +Maps are a way to create variables that are lookup tables. An example +will show this best. Let's extract our AMIs into a map and add support for the "us-west-2" region as well: ``` variable "amis" { - type = "map" - default = { - us-east-1 = "ami-13be557e" - us-west-2 = "ami-06b94666" - } + type = "map" + default = { + us-east-1 = "ami-13be557e" + us-west-2 = "ami-06b94666" + } } ``` -A variable becomes a mapping when it has a type of "map" assigned, or has a -default value that is a map like above. +A variable can have a "map" type assigned explicitly, or it can be implicitly +declared as a map by specifying a default value that is a map. The above +demonstrates both. Then, replace the "aws\_instance" with the following: ``` resource "aws_instance" "example" { - ami = "${lookup(var.amis, var.region)}" - instance_type = "t2.micro" + ami = "${lookup(var.amis, var.region)}" + instance_type = "t2.micro" } ``` @@ -148,39 +156,40 @@ key is `var.region`, which specifies that the value of the region variables is the key. While we don't use it in our example, it is worth noting that you -can also do a static lookup of a mapping directly with +can also do a static lookup of a map directly with `${var.amis["us-east-1"]}`. - -## Assigning Mappings + +## Assigning Maps -We set defaults above, but mappings can also be set using the `-var` and -`-var-file` values. For example, if the user wanted to specify an alternate AMI -for us-east-1: +We set defaults above, but maps can also be set using the `-var` and +`-var-file` values. For example: ``` -$ terraform plan -var 'amis.us-east-1=foo' +$ terraform plan -var 'amis={ us-east-1 = "foo", us-west-2 = "bar" }' ... ``` **Note**: even if every key will be assigned as input, the variable must be -established as a mapping by setting its default to `{}`. +established as a map by setting its default to `{}`. -Here is an example of setting a mapping's keys from a file. Starting with these +Here is an example of setting a map's keys from a file. Starting with these variable definitions: ``` variable "region" {} variable "amis" { - default = {} + type = "map" } ``` You can specify keys in a `terraform.tfvars` file: ``` -amis.us-east-1 = "ami-abc123" -amis.us-west-2 = "ami-def456" +amis = { + us-east-1 = "ami-abc123" + us-west-2 = "ami-def456" +} ``` And access them via `lookup()`: @@ -207,7 +216,7 @@ Outputs: ## Next Terraform provides variables for parameterizing your configurations. -Mappings let you build lookup tables in cases where that makes sense. +Maps let you build lookup tables in cases where that makes sense. Setting and using variables is uniform throughout your configurations. In the next section, we'll take a look at From 3d1802d5475a640151e3a75575828a90a27fceb7 Mon Sep 17 00:00:00 2001 From: Greg Aker Date: Thu, 28 Jul 2016 16:57:33 -0500 Subject: [PATCH 0473/1238] Add default value & acceptance test. --- .../resource_compute_backend_service.go | 1 + .../resource_compute_backend_service_test.go | 43 +++++++++++++++++++ 2 files changed, 44 insertions(+) diff --git a/builtin/providers/google/resource_compute_backend_service.go b/builtin/providers/google/resource_compute_backend_service.go index bef3e5b60..706e20f80 100644 --- a/builtin/providers/google/resource_compute_backend_service.go +++ b/builtin/providers/google/resource_compute_backend_service.go @@ -91,6 +91,7 @@ func resourceComputeBackendService() *schema.Resource { "enable_cdn": &schema.Schema{ Type: schema.TypeBool, Optional: true, + Default: false, }, "fingerprint": &schema.Schema{ diff --git a/builtin/providers/google/resource_compute_backend_service_test.go b/builtin/providers/google/resource_compute_backend_service_test.go index 845be9c73..01b0d3d38 100644 --- a/builtin/providers/google/resource_compute_backend_service_test.go +++ b/builtin/providers/google/resource_compute_backend_service_test.go @@ -121,6 +121,32 @@ func testAccCheckComputeBackendServiceExists(n string, svc *compute.BackendServi } } +func TestAccComputeBackendService_withCDNEnabled(t *testing.T) { + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var svc compute.BackendService + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeBackendServiceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeBackendService_withCDNEnabled( + serviceName, checkName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeBackendServiceExists( + "google_compute_backend_service.foobar", &svc), + ), + }, + }, + }) + + if svc.EnableCDN != true { + t.Errorf("Expected EnableCDN == true, got %t", svc.EnableCDN) + } +} + func testAccComputeBackendService_basic(serviceName, checkName string) string { return fmt.Sprintf(` resource "google_compute_backend_service" "foobar" { @@ -137,6 +163,23 @@ resource "google_compute_http_health_check" "zero" { `, serviceName, checkName) } +func testAccComputeBackendService_withCDNEnabled(serviceName, checkName string) string { + return fmt.Sprintf(` +resource "google_compute_backend_service" "foobar" { + name = "%s" + health_checks = ["${google_compute_http_health_check.zero.self_link}"] + enable_cdn = true +} + +resource "google_compute_http_health_check" "zero" { + name = "%s" + request_path = "/" + check_interval_sec = 1 + timeout_sec = 1 +} +`, serviceName, checkName) +} + func testAccComputeBackendService_basicModified(serviceName, checkOne, checkTwo string) string { return fmt.Sprintf(` resource "google_compute_backend_service" "foobar" { From 37b7a22db904a0017b263c7c2e36a4bb668f8594 Mon Sep 17 00:00:00 2001 From: stack72 Date: Fri, 29 Jul 2016 09:11:56 +0100 Subject: [PATCH 0474/1238] provider/aws: Bump SDK package version to v1.2.10 --- .../aws/aws-sdk-go/aws/awserr/error.go | 4 +- .../aws/aws-sdk-go/aws/awserr/types.go | 2 +- .../github.com/aws/aws-sdk-go/aws/config.go | 6 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../private/protocol/ec2query/build.go | 2 +- .../private/protocol/json/jsonutil/build.go | 2 +- .../private/protocol/jsonrpc/jsonrpc.go | 2 +- .../private/protocol/query/build.go | 2 +- .../private/protocol/restjson/restjson.go | 2 +- .../private/protocol/restxml/restxml.go | 2 +- .../private/protocol/xml/xmlutil/build.go | 2 +- .../aws/aws-sdk-go/service/apigateway/api.go | 13 +- .../service/applicationautoscaling/api.go | 10 +- .../service/applicationautoscaling/service.go | 10 + .../aws/aws-sdk-go/service/cloudwatch/api.go | 149 +++-- .../aws-sdk-go/service/codedeploy/waiters.go | 42 ++ .../service/directoryservice/api.go | 451 +++++++++++++- .../aws/aws-sdk-go/service/ec2/api.go | 87 +-- .../service/elasticsearchservice/api.go | 44 ++ .../service/elasticsearchservice/service.go | 2 +- .../service/route53/unmarshal_error.go | 2 +- .../aws/aws-sdk-go/service/s3/api.go | 2 +- .../aws-sdk-go/service/s3/unmarshal_error.go | 36 +- .../aws/aws-sdk-go/service/ses/api.go | 6 +- .../aws/aws-sdk-go/service/sts/api.go | 58 +- vendor/vendor.json | 570 +++++++++--------- 26 files changed, 1055 insertions(+), 455 deletions(-) create mode 100644 vendor/github.com/aws/aws-sdk-go/service/codedeploy/waiters.go diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go index e50771f80..56fdfc2bf 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -44,7 +44,7 @@ type Error interface { // BatchError is a batch of errors which also wraps lower level errors with // code, message, and original errors. Calling Error() will include all errors -// that occured in the batch. +// that occurred in the batch. // // Deprecated: Replaced with BatchedErrors. Only defined for backwards // compatibility. @@ -64,7 +64,7 @@ type BatchError interface { // BatchedErrors is a batch of errors which also wraps lower level errors with // code, message, and original errors. Calling Error() will include all errors -// that occured in the batch. +// that occurred in the batch. // // Replaces BatchError type BatchedErrors interface { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go index e2d333b84..0202a008f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -98,7 +98,7 @@ func (b baseError) OrigErr() error { return NewBatchError(err.Code(), err.Message(), b.errs[1:]) } return NewBatchError("BatchedErrors", - "multiple errors occured", b.errs) + "multiple errors occurred", b.errs) } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index da72935be..d3e889514 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -19,7 +19,7 @@ type RequestRetryer interface{} // all clients will use the {defaults.DefaultConfig} structure. type Config struct { // Enables verbose error printing of all credential chain errors. - // Should be used when wanting to see all errors while attempting to retreive + // Should be used when wanting to see all errors while attempting to retrieve // credentials. CredentialsChainVerboseErrors *bool @@ -112,8 +112,8 @@ type Config struct { // `ExpectContinueTimeout` for information on adjusting the continue wait timeout. // https://golang.org/pkg/net/http/#Transport // - // You should use this flag to disble 100-Continue if you experiance issues - // with proxies or thrid party S3 compatible services. + // You should use this flag to disble 100-Continue if you experience issues + // with proxies or third party S3 compatible services. S3Disable100Continue *bool // Set this to `true` to enable S3 Accelerate feature. For all operations compatible diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 438218867..90df0b23b 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.2.7" +const SDKVersion = "1.2.10" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go index 68e344d1f..bb0dae97a 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/ec2query/build.go @@ -1,4 +1,4 @@ -// Package ec2query provides serialisation of AWS EC2 requests and responses. +// Package ec2query provides serialization of AWS EC2 requests and responses. package ec2query //go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/ec2.json build_test.go diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go index 7ad674278..aedc4440f 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go @@ -1,4 +1,4 @@ -// Package jsonutil provides JSON serialisation of AWS requests and responses. +// Package jsonutil provides JSON serialization of AWS requests and responses. package jsonutil import ( diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go index 7aff0e0fa..d5490cd7f 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go @@ -1,4 +1,4 @@ -// Package jsonrpc provides JSON RPC utilities for serialisation of AWS +// Package jsonrpc provides JSON RPC utilities for serialization of AWS // requests and responses. package jsonrpc diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go index 56d69db05..c705481c3 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -1,4 +1,4 @@ -// Package query provides serialisation of AWS query requests, and responses. +// Package query provides serialization of AWS query requests, and responses. package query //go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go index 2c95a9858..9e98525bb 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restjson/restjson.go @@ -1,4 +1,4 @@ -// Package restjson provides RESTful JSON serialisation of AWS +// Package restjson provides RESTful JSON serialization of AWS // requests and responses. package restjson diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go index c74088bfe..c74b97e17 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go @@ -1,4 +1,4 @@ -// Package restxml provides RESTful XML serialisation of AWS +// Package restxml provides RESTful XML serialization of AWS // requests and responses. package restxml diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go index ceb4132c5..221029baf 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -1,4 +1,4 @@ -// Package xmlutil provides XML serialisation of AWS requests and responses. +// Package xmlutil provides XML serialization of AWS requests and responses. package xmlutil import ( diff --git a/vendor/github.com/aws/aws-sdk-go/service/apigateway/api.go b/vendor/github.com/aws/aws-sdk-go/service/apigateway/api.go index 4755978d7..f0a48786d 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/apigateway/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/apigateway/api.go @@ -4110,6 +4110,8 @@ type Authorizer struct { // [Required] The name of the authorizer. Name *string `locationName:"name" type:"string"` + ProviderARNs []*string `locationName:"providerARNs" type:"list"` + // [Required] The type of the authorizer. Currently, the only valid type is // TOKEN. Type *string `locationName:"type" type:"string" enum:"AuthorizerType"` @@ -4227,7 +4229,7 @@ type CreateAuthorizerInput struct { AuthorizerResultTtlInSeconds *int64 `locationName:"authorizerResultTtlInSeconds" type:"integer"` // [Required] Specifies the authorizer's Uniform Resource Identifier (URI). - AuthorizerUri *string `locationName:"authorizerUri" type:"string" required:"true"` + AuthorizerUri *string `locationName:"authorizerUri" type:"string"` // [Required] The source of the identity in an incoming request. IdentitySource *string `locationName:"identitySource" type:"string" required:"true"` @@ -4238,6 +4240,8 @@ type CreateAuthorizerInput struct { // [Required] The name of the authorizer. Name *string `locationName:"name" type:"string" required:"true"` + ProviderARNs []*string `locationName:"providerARNs" type:"list"` + // The RestApi identifier under which the Authorizer will be created. RestApiId *string `location:"uri" locationName:"restapi_id" type:"string" required:"true"` @@ -4258,9 +4262,6 @@ func (s CreateAuthorizerInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *CreateAuthorizerInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateAuthorizerInput"} - if s.AuthorizerUri == nil { - invalidParams.Add(request.NewErrParamRequired("AuthorizerUri")) - } if s.IdentitySource == nil { invalidParams.Add(request.NewErrParamRequired("IdentitySource")) } @@ -7777,6 +7778,8 @@ type TestInvokeAuthorizerOutput struct { Authorization map[string][]*string `locationName:"authorization" type:"map"` + Claims map[string]*string `locationName:"claims" type:"map"` + // The HTTP status code that the client would have received. Value is 0 if the // authorizer succeeded. ClientStatus *int64 `locationName:"clientStatus" type:"integer"` @@ -8530,6 +8533,8 @@ func (s *UpdateStageInput) Validate() error { const ( // @enum AuthorizerType AuthorizerTypeToken = "TOKEN" + // @enum AuthorizerType + AuthorizerTypeCognitoUserPools = "COGNITO_USER_POOLS" ) // Returns the size of the CacheCluster. diff --git a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/api.go b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/api.go index f2b3fe245..754ff5d36 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/api.go @@ -254,7 +254,7 @@ func (c *ApplicationAutoScaling) DescribeScalingActivitiesRequest(input *Describ } // Provides descriptive information for scaling activities with a specified -// service namespace. +// service namespace for the previous six weeks. // // You can filter the results in a service namespace with the ResourceId and // ScalableDimension parameters. @@ -928,7 +928,8 @@ type PutScalingPolicyInput struct { // The name of the scaling policy. PolicyName *string `min:"1" type:"string" required:"true"` - // The policy type. This parameter is required if you are creating a new policy. + // The policy type. If you are creating a new policy, this parameter is required. + // If you are updating an existing policy, this parameter is not required. PolicyType *string `type:"string" enum:"PolicyType"` // The unique resource identifier string for the scalable target that this scaling @@ -947,8 +948,9 @@ type PutScalingPolicyInput struct { // in the Amazon Web Services General Reference. ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` - // The configuration for the step scaling policy. This parameter is required - // if you are creating a new policy. For more information, see StepScalingPolicyConfiguration + // The configuration for the step scaling policy. If you are creating a new + // policy, this parameter is required. If you are updating an existing policy, + // this parameter is not required. For more information, see StepScalingPolicyConfiguration // and StepAdjustment. StepScalingPolicyConfiguration *StepScalingPolicyConfiguration `type:"structure"` } diff --git a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/service.go b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/service.go index d6e797ff2..c56efaded 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/service.go @@ -32,8 +32,18 @@ import ( // // us-east-1 // +// us-west-1 +// // us-west-2 // +// ap-southeast-1 +// +// ap-southeast-2 +// +// ap-northeast-1 +// +// eu-central-1 +// // eu-west-1 //The service client's operations are safe to be used concurrently. // It is not safe to mutate any of the client's properties though. diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go index e23db4766..79958fea3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatch/api.go @@ -426,8 +426,10 @@ func (c *CloudWatch) GetMetricStatisticsRequest(input *GetMetricStatisticsInput) // request is 1,440. If you make a request that generates more than 1,440 data // points, Amazon CloudWatch returns an error. In such a case, you can alter // the request by narrowing the specified time range or increasing the specified -// period. Alternatively, you can make multiple requests across adjacent time -// ranges. GetMetricStatistics does not return the data in chronological order. +// period. A period can be as short as one minute (60 seconds) or as long as +// one day (86,400 seconds). Alternatively, you can make multiple requests across +// adjacent time ranges. GetMetricStatistics does not return the data in chronological +// order. // // Amazon CloudWatch aggregates data points based on the length of the period // that you specify. For example, if you request statistics with a one-minute @@ -439,12 +441,15 @@ func (c *CloudWatch) GetMetricStatisticsRequest(input *GetMetricStatisticsInput) // query maximum of 50,850 when you call GetMetricStatistics on Amazon EC2 instances // with detailed (one-minute) monitoring enabled: // -// Statistics for up to 400 instances for a span of one hour Statistics for -// up to 35 instances over a span of 24 hours Statistics for up to 2 instances -// over a span of 2 weeks For information about the namespace, metric names, -// and dimensions that other Amazon Web Services products use to send metrics -// to CloudWatch, go to Amazon CloudWatch Metrics, Namespaces, and Dimensions -// Reference (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html) +// Statistics for up to 400 instances for a span of one hour +// +// Statistics for up to 35 instances over a span of 24 hours +// +// Statistics for up to 2 instances over a span of 2 weeks +// +// For information about the namespace, metric names, and dimensions that +// other Amazon Web Services products use to send metrics to CloudWatch, go +// to Amazon CloudWatch Metrics, Namespaces, and Dimensions Reference (http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html) // in the Amazon CloudWatch Developer Guide. func (c *CloudWatch) GetMetricStatistics(input *GetMetricStatisticsInput) (*GetMetricStatisticsOutput, error) { req, out := c.GetMetricStatisticsRequest(input) @@ -503,11 +508,12 @@ func (c *CloudWatch) ListMetricsRequest(input *ListMetricsInput) (req *request.R // metrics can be used with GetMetricStatistics to obtain statistical data for // a given metric. // -// Up to 500 results are returned for any one call. To retrieve further results, -// use returned NextToken values with subsequent ListMetrics operations. If -// you create a metric with the PutMetricData action, allow up to fifteen minutes -// for the metric to appear in calls to the ListMetrics action. Statistics about -// the metric, however, are available sooner using GetMetricStatistics. +// Up to 500 results are returned for any one call. To retrieve further results, +// use returned NextToken values with subsequent ListMetrics operations. +// +// If you create a metric with PutMetricData, allow up to fifteen minutes +// for the metric to appear in calls to ListMetrics. Statistics about the metric, +// however, are available sooner using GetMetricStatistics. func (c *CloudWatch) ListMetrics(input *ListMetricsInput) (*ListMetricsOutput, error) { req, out := c.ListMetricsRequest(input) err := req.Send() @@ -583,26 +589,35 @@ func (c *CloudWatch) PutMetricAlarmRequest(input *PutMetricAlarmInput) (req *req } // Creates or updates an alarm and associates it with the specified Amazon CloudWatch -// metric. Optionally, this operation can associate one or more Amazon Simple -// Notification Service resources with the alarm. +// metric. Optionally, this operation can associate one or more Amazon SNS resources +// with the alarm. // // When this operation creates an alarm, the alarm state is immediately set // to INSUFFICIENT_DATA. The alarm is evaluated and its StateValue is set appropriately. -// Any actions associated with the StateValue is then executed. +// Any actions associated with the StateValue are then executed. // -// When updating an existing alarm, its StateValue is left unchanged. If -// you are using an AWS Identity and Access Management (IAM) account to create -// or modify an alarm, you must have the following Amazon EC2 permissions: -// ec2:DescribeInstanceStatus and ec2:DescribeInstances for all alarms on Amazon -// EC2 instance status metrics. ec2:StopInstances for alarms with stop actions. -// ec2:TerminateInstances for alarms with terminate actions. ec2:DescribeInstanceRecoveryAttribute, -// and ec2:RecoverInstances for alarms with recover actions. If you have read/write -// permissions for Amazon CloudWatch but not for Amazon EC2, you can still create -// an alarm but the stop or terminate actions won't be performed on the Amazon -// EC2 instance. However, if you are later granted permission to use the associated -// Amazon EC2 APIs, the alarm actions you created earlier will be performed. -// For more information about IAM permissions, see Permissions and Policies -// (http://docs.aws.amazon.com//IAM/latest/UserGuide/PermissionsAndPolicies.html) +// When updating an existing alarm, its StateValue is left unchanged, but +// it completely overwrites the alarm's previous configuration. +// +// If you are using an AWS Identity and Access Management (IAM) account to +// create or modify an alarm, you must have the following Amazon EC2 permissions: +// +// ec2:DescribeInstanceStatus and ec2:DescribeInstances for all alarms on +// Amazon EC2 instance status metrics. +// +// ec2:StopInstances for alarms with stop actions. +// +// ec2:TerminateInstances for alarms with terminate actions. +// +// ec2:DescribeInstanceRecoveryAttribute, and ec2:RecoverInstances for alarms +// with recover actions. +// +// If you have read/write permissions for Amazon CloudWatch but not for Amazon +// EC2, you can still create an alarm but the stop or terminate actions won't +// be performed on the Amazon EC2 instance. However, if you are later granted +// permission to use the associated Amazon EC2 APIs, the alarm actions you created +// earlier will be performed. For more information about IAM permissions, see +// Permissions and Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/PermissionsAndPolicies.html) // in Using IAM. // // If you are using an IAM role (e.g., an Amazon EC2 instance profile), you @@ -666,18 +681,19 @@ func (c *CloudWatch) PutMetricDataRequest(input *PutMetricDataInput) (req *reque // the data points with the specified metric. If the specified metric does not // exist, Amazon CloudWatch creates the metric. When Amazon CloudWatch creates // a metric, it can take up to fifteen minutes for the metric to appear in calls -// to the ListMetrics action. +// to ListMetrics. // // Each PutMetricData request is limited to 8 KB in size for HTTP GET requests // and is limited to 40 KB in size for HTTP POST requests. // -// Although the Value parameter accepts numbers of type Double, Amazon CloudWatch +// Although the Value parameter accepts numbers of type Double, Amazon CloudWatch // rejects values that are either too small or too large. Values must be in // the range of 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 // (Base 2). In addition, special values (e.g., NaN, +Infinity, -Infinity) are -// not supported. Data that is timestamped 24 hours or more in the past may -// take in excess of 48 hours to become available from submission time using -// GetMetricStatistics. +// not supported. +// +// Data that is timestamped 24 hours or more in the past may take in excess +// of 48 hours to become available from submission time using GetMetricStatistics. func (c *CloudWatch) PutMetricData(input *PutMetricDataInput) (*PutMetricDataOutput, error) { req, out := c.PutMetricDataRequest(input) err := req.Send() @@ -727,15 +743,14 @@ func (c *CloudWatch) SetAlarmStateRequest(input *SetAlarmStateInput) (req *reque return } -// Temporarily sets the state of an alarm. When the updated StateValue differs -// from the previous value, the action configured for the appropriate state -// is invoked. For example, if your alarm is configured to send an Amazon SNS -// message when an alarm is triggered, temporarily changing the alarm's state -// to ALARM will send an Amazon SNS message. This is not a permanent change. -// The next periodic alarm check (in about a minute) will set the alarm to its -// actual state. Because the alarm state change happens very quickly, it is -// typically only visibile in the alarm's History tab in the Amazon CloudWatch -// console or through DescribeAlarmHistory. +// Temporarily sets the state of an alarm for testing purposes. When the updated +// StateValue differs from the previous value, the action configured for the +// appropriate state is invoked. For example, if your alarm is configured to +// send an Amazon SNS message when an alarm is triggered, temporarily changing +// the alarm's state to ALARM sends an Amazon SNS message. The alarm returns +// to its actual state (often within seconds). Because the alarm state change +// happens very quickly, it is typically only visible in the alarm's History +// tab in the Amazon CloudWatch console or through DescribeAlarmHistory. func (c *CloudWatch) SetAlarmState(input *SetAlarmStateInput) (*SetAlarmStateOutput, error) { req, out := c.SetAlarmStateRequest(input) err := req.Send() @@ -812,6 +827,7 @@ func (s Datapoint) GoString() string { return s.String() } +// Describes the inputs for DeleteAlarms. type DeleteAlarmsInput struct { _ struct{} `type:"structure"` @@ -856,6 +872,7 @@ func (s DeleteAlarmsOutput) GoString() string { return s.String() } +// Describes the inputs for DescribeAlarmHistory. type DescribeAlarmHistoryInput struct { _ struct{} `type:"structure"` @@ -905,7 +922,7 @@ func (s *DescribeAlarmHistoryInput) Validate() error { return nil } -// The output for the DescribeAlarmHistory action. +// The output for DescribeAlarmHistory. type DescribeAlarmHistoryOutput struct { _ struct{} `type:"structure"` @@ -926,6 +943,7 @@ func (s DescribeAlarmHistoryOutput) GoString() string { return s.String() } +// Describes the inputs for DescribeAlarmsForMetric. type DescribeAlarmsForMetricInput struct { _ struct{} `type:"structure"` @@ -995,7 +1013,7 @@ func (s *DescribeAlarmsForMetricInput) Validate() error { return nil } -// The output for the DescribeAlarmsForMetric action. +// The output for DescribeAlarmsForMetric. type DescribeAlarmsForMetricOutput struct { _ struct{} `type:"structure"` @@ -1013,6 +1031,7 @@ func (s DescribeAlarmsForMetricOutput) GoString() string { return s.String() } +// Describes the inputs for DescribeAlarms. type DescribeAlarmsInput struct { _ struct{} `type:"structure"` @@ -1066,7 +1085,7 @@ func (s *DescribeAlarmsInput) Validate() error { return nil } -// The output for the DescribeAlarms action. +// The output for DescribeAlarms. type DescribeAlarmsOutput struct { _ struct{} `type:"structure"` @@ -1220,6 +1239,7 @@ func (s DisableAlarmActionsOutput) GoString() string { return s.String() } +// Describes the inputs for EnableAlarmActions. type EnableAlarmActionsInput struct { _ struct{} `type:"structure"` @@ -1264,6 +1284,7 @@ func (s EnableAlarmActionsOutput) GoString() string { return s.String() } +// Describes the inputs for GetMetricStatistics. type GetMetricStatisticsInput struct { _ struct{} `type:"structure"` @@ -1281,8 +1302,9 @@ type GetMetricStatisticsInput struct { // The namespace of the metric, with or without spaces. Namespace *string `min:"1" type:"string" required:"true"` - // The granularity, in seconds, of the returned datapoints. Period must be at - // least 60 seconds and must be a multiple of 60. The default value is 60. + // The granularity, in seconds, of the returned datapoints. A Period can be + // as short as one minute (60 seconds) or as long as one day (86,400 seconds), + // and must be a multiple of 60. The default value is 60. Period *int64 `min:"60" type:"integer" required:"true"` // The time stamp to use for determining the first datapoint to return. The @@ -1292,9 +1314,10 @@ type GetMetricStatisticsInput struct { // The specified start time is rounded down to the nearest value. Datapoints // are returned for start times up to two weeks in the past. Specified start // times that are more than two weeks in the past will not return datapoints - // for metrics that are older than two weeks. Data that is timestamped 24 hours - // or more in the past may take in excess of 48 hours to become available from - // submission time using GetMetricStatistics. + // for metrics that are older than two weeks. + // + // Data that is timestamped 24 hours or more in the past may take in excess + // of 48 hours to become available from submission time using GetMetricStatistics. StartTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` // The metric statistics to return. For information about specific statistics @@ -1302,7 +1325,9 @@ type GetMetricStatisticsInput struct { // in the Amazon CloudWatch Developer Guide. Statistics []*string `min:"1" type:"list" required:"true"` - // The unit for the metric. + // The specific unit for a given metric. Metrics may be reported in multiple + // units. Not supplying a unit results in all units being returned. If the metric + // only ever reports one unit, specifying a unit will have no effect. Unit *string `type:"string" enum:"StandardUnit"` } @@ -1366,7 +1391,7 @@ func (s *GetMetricStatisticsInput) Validate() error { return nil } -// The output for the GetMetricStatistics action. +// The output for GetMetricStatistics. type GetMetricStatisticsOutput struct { _ struct{} `type:"structure"` @@ -1387,6 +1412,7 @@ func (s GetMetricStatisticsOutput) GoString() string { return s.String() } +// Describes the inputs for ListMetrics. type ListMetricsInput struct { _ struct{} `type:"structure"` @@ -1440,7 +1466,7 @@ func (s *ListMetricsInput) Validate() error { return nil } -// The output for the ListMetrics action. +// The output for ListMetrics. type ListMetricsOutput struct { _ struct{} `type:"structure"` @@ -1465,7 +1491,7 @@ func (s ListMetricsOutput) GoString() string { // call ListMetrics, Amazon CloudWatch returns information contained by this // data type. // -// The example in the Examples section publishes two metrics named buffers +// The example in the Examples section publishes two metrics named buffers // and latency. Both metrics are in the examples namespace. Both metrics have // two dimensions, InstanceID and InstanceType. type Metric struct { @@ -1531,7 +1557,7 @@ type MetricAlarm struct { // state from any other state. Each action is specified as an Amazon Resource // Name (ARN). // - // The current WSDL lists this attribute as UnknownActions. + // The current WSDL lists this attribute as UnknownActions. InsufficientDataActions []*string `type:"list"` // The name of the alarm's metric. @@ -1606,7 +1632,7 @@ type MetricDatum struct { // The value for the metric. // - // Although the Value parameter accepts numbers of type Double, Amazon CloudWatch + // Although the Value parameter accepts numbers of type Double, Amazon CloudWatch // rejects values that are either too small or too large. Values must be in // the range of 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 // (Base 2). In addition, special values (e.g., NaN, +Infinity, -Infinity) are @@ -1655,6 +1681,7 @@ func (s *MetricDatum) Validate() error { return nil } +// Describes the inputs for PutMetricAlarm. type PutMetricAlarmInput struct { _ struct{} `type:"structure"` @@ -1673,7 +1700,7 @@ type PutMetricAlarmInput struct { // | arn:aws:swf:us-east-1:{customer-account}:action/actions/AWS_EC2.InstanceId.Terminate/1.0 // | arn:aws:swf:us-east-1:{customer-account}:action/actions/AWS_EC2.InstanceId.Reboot/1.0 // - // Note: You must create at least one stop, terminate, or reboot alarm using + // Note: You must create at least one stop, terminate, or reboot alarm using // the Amazon EC2 or CloudWatch console to create the EC2ActionsAccess IAM role // for the first time. After this IAM role is created, you can create stop, // terminate, or reboot alarms using the CLI. @@ -1707,7 +1734,7 @@ type PutMetricAlarmInput struct { // | arn:aws:swf:us-east-1:{customer-account}:action/actions/AWS_EC2.InstanceId.Terminate/1.0 // | arn:aws:swf:us-east-1:{customer-account}:action/actions/AWS_EC2.InstanceId.Reboot/1.0 // - // Note: You must create at least one stop, terminate, or reboot alarm using + // Note: You must create at least one stop, terminate, or reboot alarm using // the Amazon EC2 or CloudWatch console to create the EC2ActionsAccess IAM role // for the first time. After this IAM role is created, you can create stop, // terminate, or reboot alarms using the CLI. @@ -1730,7 +1757,7 @@ type PutMetricAlarmInput struct { // | arn:aws:swf:us-east-1:{customer-account}:action/actions/AWS_EC2.InstanceId.Terminate/1.0 // | arn:aws:swf:us-east-1:{customer-account}:action/actions/AWS_EC2.InstanceId.Reboot/1.0 // - // Note: You must create at least one stop, terminate, or reboot alarm using + // Note: You must create at least one stop, terminate, or reboot alarm using // the Amazon EC2 or CloudWatch console to create the EC2ActionsAccess IAM role // for the first time. After this IAM role is created, you can create stop, // terminate, or reboot alarms using the CLI. @@ -1841,6 +1868,7 @@ func (s PutMetricAlarmOutput) GoString() string { return s.String() } +// Describes the inputs for PutMetricData. type PutMetricDataInput struct { _ struct{} `type:"structure"` @@ -1908,6 +1936,7 @@ func (s PutMetricDataOutput) GoString() string { return s.String() } +// Describes the inputs for SetAlarmState. type SetAlarmStateInput struct { _ struct{} `type:"structure"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/codedeploy/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/waiters.go new file mode 100644 index 000000000..ce19f2247 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/codedeploy/waiters.go @@ -0,0 +1,42 @@ +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +package codedeploy + +import ( + "github.com/aws/aws-sdk-go/private/waiter" +) + +func (c *CodeDeploy) WaitUntilDeploymentSuccessful(input *GetDeploymentInput) error { + waiterCfg := waiter.Config{ + Operation: "GetDeployment", + Delay: 15, + MaxAttempts: 120, + Acceptors: []waiter.WaitAcceptor{ + { + State: "success", + Matcher: "path", + Argument: "deploymentInfo.status", + Expected: "Succeeded", + }, + { + State: "failure", + Matcher: "path", + Argument: "deploymentInfo.status", + Expected: "Failed", + }, + { + State: "failure", + Matcher: "path", + Argument: "deploymentInfo.status", + Expected: "Stopped", + }, + }, + } + + w := waiter.Waiter{ + Client: c, + Input: input, + Config: waiterCfg, + } + return w.Wait() +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go index 0cd145ee6..05f2efed4 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go @@ -11,6 +11,58 @@ import ( "github.com/aws/aws-sdk-go/aws/request" ) +const opAddIpRoutes = "AddIpRoutes" + +// AddIpRoutesRequest generates a "aws/request.Request" representing the +// client's request for the AddIpRoutes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AddIpRoutes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AddIpRoutesRequest method. +// req, resp := client.AddIpRoutesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) AddIpRoutesRequest(input *AddIpRoutesInput) (req *request.Request, output *AddIpRoutesOutput) { + op := &request.Operation{ + Name: opAddIpRoutes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddIpRoutesInput{} + } + + req = c.newRequest(op, input, output) + output = &AddIpRoutesOutput{} + req.Data = output + return +} + +// If the DNS server for your on-premises domain uses a publicly addressable +// IP address, you must add a CIDR address block to correctly route traffic +// to and from your Microsoft AD on Amazon Web Services. AddIpRoutes adds this +// address block. You can also use AddIpRoutes to facilitate routing traffic +// that uses public IP ranges from your Microsoft AD on AWS to a peer VPC. +func (c *DirectoryService) AddIpRoutes(input *AddIpRoutesInput) (*AddIpRoutesOutput, error) { + req, out := c.AddIpRoutesRequest(input) + err := req.Send() + return out, err +} + const opAddTagsToResource = "AddTagsToResource" // AddTagsToResourceRequest generates a "aws/request.Request" representing the @@ -54,7 +106,7 @@ func (c *DirectoryService) AddTagsToResourceRequest(input *AddTagsToResourceInpu // Adds or overwrites one or more tags for the specified Amazon Directory Services // directory. Each directory can have a maximum of 10 tags. Each tag consists -// of a key and optional value. Tag keys must be unique per resource. +// of a key and optional value. Tag keys must be unique to each resource. func (c *DirectoryService) AddTagsToResource(input *AddTagsToResourceInput) (*AddTagsToResourceOutput, error) { req, out := c.AddTagsToResourceRequest(input) err := req.Send() @@ -1261,6 +1313,54 @@ func (c *DirectoryService) GetSnapshotLimits(input *GetSnapshotLimitsInput) (*Ge return out, err } +const opListIpRoutes = "ListIpRoutes" + +// ListIpRoutesRequest generates a "aws/request.Request" representing the +// client's request for the ListIpRoutes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListIpRoutes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListIpRoutesRequest method. +// req, resp := client.ListIpRoutesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) ListIpRoutesRequest(input *ListIpRoutesInput) (req *request.Request, output *ListIpRoutesOutput) { + op := &request.Operation{ + Name: opListIpRoutes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListIpRoutesInput{} + } + + req = c.newRequest(op, input, output) + output = &ListIpRoutesOutput{} + req.Data = output + return +} + +// Lists the address blocks that you have added to a directory. +func (c *DirectoryService) ListIpRoutes(input *ListIpRoutesInput) (*ListIpRoutesOutput, error) { + req, out := c.ListIpRoutesRequest(input) + err := req.Send() + return out, err +} + const opListTagsForResource = "ListTagsForResource" // ListTagsForResourceRequest generates a "aws/request.Request" representing the @@ -1362,6 +1462,54 @@ func (c *DirectoryService) RegisterEventTopic(input *RegisterEventTopicInput) (* return out, err } +const opRemoveIpRoutes = "RemoveIpRoutes" + +// RemoveIpRoutesRequest generates a "aws/request.Request" representing the +// client's request for the RemoveIpRoutes operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RemoveIpRoutes method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RemoveIpRoutesRequest method. +// req, resp := client.RemoveIpRoutesRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *DirectoryService) RemoveIpRoutesRequest(input *RemoveIpRoutesInput) (req *request.Request, output *RemoveIpRoutesOutput) { + op := &request.Operation{ + Name: opRemoveIpRoutes, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveIpRoutesInput{} + } + + req = c.newRequest(op, input, output) + output = &RemoveIpRoutesOutput{} + req.Data = output + return +} + +// Removes IP address blocks from a directory. +func (c *DirectoryService) RemoveIpRoutes(input *RemoveIpRoutesInput) (*RemoveIpRoutesOutput, error) { + req, out := c.RemoveIpRoutesRequest(input) + err := req.Send() + return out, err +} + const opRemoveTagsFromResource = "RemoveTagsFromResource" // RemoveTagsFromResourceRequest generates a "aws/request.Request" representing the @@ -1615,10 +1763,109 @@ func (c *DirectoryService) VerifyTrust(input *VerifyTrustInput) (*VerifyTrustOut return out, err } +type AddIpRoutesInput struct { + _ struct{} `type:"structure"` + + // Identifier (ID) of the directory to which to add the address block. + DirectoryId *string `type:"string" required:"true"` + + // IP address blocks, using CIDR format, of the traffic to route. This is often + // the IP address block of the DNS server used for your on-premises domain. + IpRoutes []*IpRoute `type:"list" required:"true"` + + // If set to true, updates the inbound and outbound rules of the security group + // that has the description: "AWS created security group for directory ID directory + // controllers." Following are the new rules: + // + // Inbound: + // + // Type: Custom UDP Rule, Protocol: UDP, Range: 88, Source: 0.0.0.0/0 + // + // Type: Custom UDP Rule, Protocol: UDP, Range: 123, Source: 0.0.0.0/0 + // + // Type: Custom UDP Rule, Protocol: UDP, Range: 138, Source: 0.0.0.0/0 + // + // Type: Custom UDP Rule, Protocol: UDP, Range: 389, Source: 0.0.0.0/0 + // + // Type: Custom UDP Rule, Protocol: UDP, Range: 464, Source: 0.0.0.0/0 + // + // Type: Custom UDP Rule, Protocol: UDP, Range: 445, Source: 0.0.0.0/0 + // + // Type: Custom TCP Rule, Protocol: TCP, Range: 88, Source: 0.0.0.0/0 + // + // Type: Custom TCP Rule, Protocol: TCP, Range: 135, Source: 0.0.0.0/0 + // + // Type: Custom TCP Rule, Protocol: TCP, Range: 445, Source: 0.0.0.0/0 + // + // Type: Custom TCP Rule, Protocol: TCP, Range: 464, Source: 0.0.0.0/0 + // + // Type: Custom TCP Rule, Protocol: TCP, Range: 636, Source: 0.0.0.0/0 + // + // Type: Custom TCP Rule, Protocol: TCP, Range: 1024-65535, Source: 0.0.0.0/0 + // + // Type: Custom TCP Rule, Protocol: TCP, Range: 3268-33269, Source: 0.0.0.0/0 + // + // Type: DNS (UDP), Protocol: UDP, Range: 53, Source: 0.0.0.0/0 + // + // Type: DNS (TCP), Protocol: TCP, Range: 53, Source: 0.0.0.0/0 + // + // Type: LDAP, Protocol: TCP, Range: 389, Source: 0.0.0.0/0 + // + // Type: All ICMP, Protocol: All, Range: N/A, Source: 0.0.0.0/0 + // + // Outbound: + // + // Type: All traffic, Protocol: All, Range: All, Destination: 0.0.0.0/0 + // + // These security rules impact an internal network interface that is not + // exposed publicly. + UpdateSecurityGroupForDirectoryControllers *bool `type:"boolean"` +} + +// String returns the string representation +func (s AddIpRoutesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddIpRoutesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddIpRoutesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AddIpRoutesInput"} + if s.DirectoryId == nil { + invalidParams.Add(request.NewErrParamRequired("DirectoryId")) + } + if s.IpRoutes == nil { + invalidParams.Add(request.NewErrParamRequired("IpRoutes")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AddIpRoutesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddIpRoutesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AddIpRoutesOutput) GoString() string { + return s.String() +} + type AddTagsToResourceInput struct { _ struct{} `type:"structure"` - // The ID of the directory to which to add the tag. + // Identifier (ID) for the directory to which to add the tag. ResourceId *string `type:"string" required:"true"` // The tags to be assigned to the Amazon Directory Services directory. @@ -3532,6 +3779,123 @@ func (s GetSnapshotLimitsOutput) GoString() string { return s.String() } +// IP address block. This is often the address block of the DNS server used +// for your on-premises domain. +type IpRoute struct { + _ struct{} `type:"structure"` + + // IP address block using CIDR format, for example 10.0.0.0/24. This is often + // the address block of the DNS server used for your on-premises domain. For + // a single IP address use a CIDR address block with /32. For example 10.0.0.0/32. + CidrIp *string `type:"string"` + + // Description of the address block. + Description *string `type:"string"` +} + +// String returns the string representation +func (s IpRoute) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IpRoute) GoString() string { + return s.String() +} + +// Information about one or more IP address blocks. +type IpRouteInfo struct { + _ struct{} `type:"structure"` + + // The date and time the address block was added to the directory. + AddedDateTime *time.Time `type:"timestamp" timestampFormat:"unix"` + + // IP address block in the IpRoute. + CidrIp *string `type:"string"` + + // Description of the IpRouteInfo. + Description *string `type:"string"` + + // Identifier (ID) of the directory associated with the IP addresses. + DirectoryId *string `type:"string"` + + // The status of the IP address block. + IpRouteStatusMsg *string `type:"string" enum:"IpRouteStatusMsg"` + + // The reason for the IpRouteStatusMsg. + IpRouteStatusReason *string `type:"string"` +} + +// String returns the string representation +func (s IpRouteInfo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IpRouteInfo) GoString() string { + return s.String() +} + +type ListIpRoutesInput struct { + _ struct{} `type:"structure"` + + // Identifier (ID) of the directory for which you want to retrieve the IP addresses. + DirectoryId *string `type:"string" required:"true"` + + // Maximum number of items to return. If this value is zero, the maximum number + // of items is specified by the limitations of the operation. + Limit *int64 `type:"integer"` + + // The ListIpRoutes.NextToken value from a previous call to ListIpRoutes. Pass + // null if this is the first call. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListIpRoutesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIpRoutesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListIpRoutesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListIpRoutesInput"} + if s.DirectoryId == nil { + invalidParams.Add(request.NewErrParamRequired("DirectoryId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ListIpRoutesOutput struct { + _ struct{} `type:"structure"` + + // A list of IpRoutes. + IpRoutesInfo []*IpRouteInfo `type:"list"` + + // If not null, more results are available. Pass this value for the NextToken + // parameter in a subsequent call to ListIpRoutes to retrieve the next set of + // items. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s ListIpRoutesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListIpRoutesOutput) GoString() string { + return s.String() +} + type ListTagsForResourceInput struct { _ struct{} `type:"structure"` @@ -3541,7 +3905,7 @@ type ListTagsForResourceInput struct { // Reserved for future use. NextToken *string `type:"string"` - // The ID of the directory for which you want to retrieve tags. + // Identifier (ID) of the directory for which you want to retrieve tags. ResourceId *string `type:"string" required:"true"` } @@ -3711,10 +4075,60 @@ func (s RegisterEventTopicOutput) GoString() string { return s.String() } +type RemoveIpRoutesInput struct { + _ struct{} `type:"structure"` + + // IP address blocks that you want to remove. + CidrIps []*string `type:"list" required:"true"` + + // Identifier (ID) of the directory from which you want to remove the IP addresses. + DirectoryId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RemoveIpRoutesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveIpRoutesInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveIpRoutesInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RemoveIpRoutesInput"} + if s.CidrIps == nil { + invalidParams.Add(request.NewErrParamRequired("CidrIps")) + } + if s.DirectoryId == nil { + invalidParams.Add(request.NewErrParamRequired("DirectoryId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RemoveIpRoutesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveIpRoutesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RemoveIpRoutesOutput) GoString() string { + return s.String() +} + type RemoveTagsFromResourceInput struct { _ struct{} `type:"structure"` - // The ID of the directory from which to remove the tag. + // Identifier (ID) of the directory from which to remove the tag. ResourceId *string `type:"string" required:"true"` // The tag key (name) of the tag to be removed. @@ -3869,16 +4283,14 @@ func (s SnapshotLimits) GoString() string { type Tag struct { _ struct{} `type:"structure"` - // A key is the required name of the tag. The string value can be from 1 to - // 128 Unicode characters in length and cannot be prefixed with "aws:". The - // string can only contain only the set of Unicode letters, digits, white-space, - // '_', '.', '/', '=', '+', '-' (Java regex: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$"). + // Required name of the tag. The string value can be Unicode characters and + // cannot be prefixed with "aws:". The string can contain only the set of Unicode + // letters, digits, white-space, '_', '.', '/', '=', '+', '-' (Java regex: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$"). Key *string `min:"1" type:"string" required:"true"` - // A value is the optional value of the tag. The string value can be from 1 - // to 256 Unicode characters in length. The string can only contain only the - // set of Unicode letters, digits, white-space, '_', '.', '/', '=', '+', '-' - // (Java regex: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$"). + // The optional value of the tag. The string value can be Unicode characters. + // The string can contain only the set of Unicode letters, digits, white-space, + // '_', '.', '/', '=', '+', '-' (Java regex: "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-]*)$"). Value *string `type:"string" required:"true"` } @@ -4167,6 +4579,21 @@ const ( DirectoryTypeMicrosoftAd = "MicrosoftAD" ) +const ( + // @enum IpRouteStatusMsg + IpRouteStatusMsgAdding = "Adding" + // @enum IpRouteStatusMsg + IpRouteStatusMsgAdded = "Added" + // @enum IpRouteStatusMsg + IpRouteStatusMsgRemoving = "Removing" + // @enum IpRouteStatusMsg + IpRouteStatusMsgRemoved = "Removed" + // @enum IpRouteStatusMsg + IpRouteStatusMsgAddFailed = "AddFailed" + // @enum IpRouteStatusMsg + IpRouteStatusMsgRemoveFailed = "RemoveFailed" +) + const ( // @enum RadiusAuthenticationProtocol RadiusAuthenticationProtocolPap = "PAP" diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index 170405fa1..bc903ec09 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -1416,6 +1416,9 @@ func (c *EC2) CopySnapshotRequest(input *CopySnapshotInput) (req *request.Reques // To copy an encrypted snapshot that has been shared from another account, // you must have permissions for the CMK used to encrypt the snapshot. // +// Snapshots created by the CopySnapshot action have an arbitrary volume +// ID that should not be used for any purpose. +// // For more information, see Copying an Amazon EBS Snapshot (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-copy-snapshot.html) // in the Amazon Elastic Compute Cloud User Guide. func (c *EC2) CopySnapshot(input *CopySnapshotInput) (*CopySnapshotOutput, error) { @@ -1546,17 +1549,20 @@ func (c *EC2) CreateDhcpOptionsRequest(input *CreateDhcpOptionsInput) (req *requ // domain-name-servers - The IP addresses of up to four domain name servers, // or AmazonProvidedDNS. The default DHCP option set specifies AmazonProvidedDNS. // If specifying more than one domain name server, specify the IP addresses -// in a single parameter, separated by commas. +// in a single parameter, separated by commas. If you want your instance to +// receive a custom DNS hostname as specified in domain-name, you must set domain-name-servers +// to a custom DNS server. // // domain-name - If you're using AmazonProvidedDNS in "us-east-1", specify // "ec2.internal". If you're using AmazonProvidedDNS in another region, specify // "region.compute.internal" (for example, "ap-northeast-1.compute.internal"). -// Otherwise, specify a domain name (for example, "MyCompany.com"). Important: -// Some Linux operating systems accept multiple domain names separated by spaces. -// However, Windows and other Linux operating systems treat the value as a single -// domain, which results in unexpected behavior. If your DHCP options set is -// associated with a VPC that has instances with multiple operating systems, -// specify only one domain name. +// Otherwise, specify a domain name (for example, "MyCompany.com"). This value +// is used to complete unqualified DNS hostnames. Important: Some Linux operating +// systems accept multiple domain names separated by spaces. However, Windows +// and other Linux operating systems treat the value as a single domain, which +// results in unexpected behavior. If your DHCP options set is associated with +// a VPC that has instances with multiple operating systems, specify only one +// domain name. // // ntp-servers - The IP addresses of up to four Network Time Protocol (NTP) // servers. @@ -9439,6 +9445,11 @@ func (c *EC2) ModifyImageAttributeRequest(input *ModifyImageAttributeInput) (req // // AWS Marketplace product codes cannot be modified. Images with an AWS Marketplace // product code cannot be made public. +// +// The SriovNetSupport enhanced networking attribute cannot be changed using +// this command. Instead, enable SriovNetSupport on an instance and create an +// AMI from the instance. This will result in an image with SriovNetSupport +// enabled. func (c *EC2) ModifyImageAttribute(input *ModifyImageAttributeInput) (*ModifyImageAttributeOutput, error) { req, out := c.ModifyImageAttributeRequest(input) err := req.Send() @@ -10060,12 +10071,15 @@ func (c *EC2) ModifyVpcPeeringConnectionOptionsRequest(input *ModifyVpcPeeringCo // Enable/disable communication over the peering connection between instances // in your VPC and an EC2-Classic instance that's linked to the peer VPC. // +// Enable/disable a local VPC to resolve public DNS hostnames to private +// IP addresses when queried from instances in the peer VPC. +// // If the peered VPCs are in different accounts, each owner must initiate -// a separate request to enable or disable communication in either direction, -// depending on whether their VPC was the requester or accepter for the VPC -// peering connection. If the peered VPCs are in the same account, you can modify -// the requester and accepter options in the same request. To confirm which -// VPC is the accepter and requester for a VPC peering connection, use the DescribeVpcPeeringConnections +// a separate request to modify the peering connection options, depending on +// whether their VPC was the requester or accepter for the VPC peering connection. +// If the peered VPCs are in the same account, you can modify the requester +// and accepter options in the same request. To confirm which VPC is the accepter +// and requester for a VPC peering connection, use the DescribeVpcPeeringConnections // command. func (c *EC2) ModifyVpcPeeringConnectionOptions(input *ModifyVpcPeeringConnectionOptionsInput) (*ModifyVpcPeeringConnectionOptionsOutput, error) { req, out := c.ModifyVpcPeeringConnectionOptionsRequest(input) @@ -12819,7 +12833,8 @@ type AuthorizeSecurityGroupIngressInput struct { IpPermissions []*IpPermission `locationNameList:"item" type:"list"` // The IP protocol name (tcp, udp, icmp) or number (see Protocol Numbers (http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)). - // (VPC only) Use -1 to specify all. + // (VPC only) Use -1 to specify all traffic. If you specify -1, traffic on all + // ports is allowed, regardless of any ports you specify. IpProtocol *string `type:"string"` // [EC2-Classic, default VPC] The name of the source security group. You can't @@ -26274,16 +26289,6 @@ func (s *ModifyVpcPeeringConnectionOptionsInput) Validate() error { if s.VpcPeeringConnectionId == nil { invalidParams.Add(request.NewErrParamRequired("VpcPeeringConnectionId")) } - if s.AccepterPeeringConnectionOptions != nil { - if err := s.AccepterPeeringConnectionOptions.Validate(); err != nil { - invalidParams.AddNested("AccepterPeeringConnectionOptions", err.(request.ErrInvalidParams)) - } - } - if s.RequesterPeeringConnectionOptions != nil { - if err := s.RequesterPeeringConnectionOptions.Validate(); err != nil { - invalidParams.AddNested("RequesterPeeringConnectionOptions", err.(request.ErrInvalidParams)) - } - } if invalidParams.Len() > 0 { return invalidParams @@ -26884,6 +26889,10 @@ func (s NewDhcpConfiguration) GoString() string { type PeeringConnectionOptions struct { _ struct{} `type:"structure"` + // If true, enables a local VPC to resolve public DNS hostnames to private IP + // addresses when queried from instances in the peer VPC. + AllowDnsResolutionFromRemoteVpc *bool `locationName:"allowDnsResolutionFromRemoteVpc" type:"boolean"` + // If true, enables outbound communication from an EC2-Classic instance that's // linked to a local VPC via ClassicLink to instances in a peer VPC. AllowEgressFromLocalClassicLinkToRemoteVpc *bool `locationName:"allowEgressFromLocalClassicLinkToRemoteVpc" type:"boolean"` @@ -26907,13 +26916,17 @@ func (s PeeringConnectionOptions) GoString() string { type PeeringConnectionOptionsRequest struct { _ struct{} `type:"structure"` + // If true, enables a local VPC to resolve public DNS hostnames to private IP + // addresses when queried from instances in the peer VPC. + AllowDnsResolutionFromRemoteVpc *bool `type:"boolean"` + // If true, enables outbound communication from an EC2-Classic instance that's // linked to a local VPC via ClassicLink to instances in a peer VPC. - AllowEgressFromLocalClassicLinkToRemoteVpc *bool `type:"boolean" required:"true"` + AllowEgressFromLocalClassicLinkToRemoteVpc *bool `type:"boolean"` // If true, enables outbound communication from instances in a local VPC to // an EC2-Classic instance that's linked to a peer VPC via ClassicLink. - AllowEgressFromLocalVpcToRemoteClassicLink *bool `type:"boolean" required:"true"` + AllowEgressFromLocalVpcToRemoteClassicLink *bool `type:"boolean"` } // String returns the string representation @@ -26926,22 +26939,6 @@ func (s PeeringConnectionOptionsRequest) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *PeeringConnectionOptionsRequest) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PeeringConnectionOptionsRequest"} - if s.AllowEgressFromLocalClassicLinkToRemoteVpc == nil { - invalidParams.Add(request.NewErrParamRequired("AllowEgressFromLocalClassicLinkToRemoteVpc")) - } - if s.AllowEgressFromLocalVpcToRemoteClassicLink == nil { - invalidParams.Add(request.NewErrParamRequired("AllowEgressFromLocalVpcToRemoteClassicLink")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - // Describes the placement for the instance. type Placement struct { _ struct{} `type:"structure"` @@ -30351,7 +30348,9 @@ type Snapshot struct { // Any tags assigned to the snapshot. Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` - // The ID of the volume that was used to create the snapshot. + // The ID of the volume that was used to create the snapshot. Snapshots created + // by the CopySnapshot action have an arbitrary volume ID that should not be + // used for any purpose. VolumeId *string `locationName:"volumeId" type:"string"` // The size of the volume, in GiB. @@ -32065,6 +32064,10 @@ func (s VpcPeeringConnection) GoString() string { type VpcPeeringConnectionOptionsDescription struct { _ struct{} `type:"structure"` + // Indicates whether a local VPC can resolve public DNS hostnames to private + // IP addresses when queried from instances in a peer VPC. + AllowDnsResolutionFromRemoteVpc *bool `locationName:"allowDnsResolutionFromRemoteVpc" type:"boolean"` + // Indicates whether a local ClassicLink connection can communicate with the // peer VPC over the VPC peering connection. AllowEgressFromLocalClassicLinkToRemoteVpc *bool `locationName:"allowEgressFromLocalClassicLinkToRemoteVpc" type:"boolean"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/api.go b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/api.go index df36f509f..bb46f2a4e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/api.go @@ -653,6 +653,12 @@ type CreateElasticsearchDomainInput struct { // type and number of instances in the domain cluster. ElasticsearchClusterConfig *ElasticsearchClusterConfig `type:"structure"` + // String of format X.Y to specify version for the Elasticsearch domain eg. + // "1.5" or "2.3". For more information, see Creating Elasticsearch Domains + // (http://docs.aws.amazon.com/elasticsearch-service/latest/developerguide/es-createupdatedomains.html#es-createdomains" + // target="_blank) in the Amazon Elasticsearch Service Developer Guide. + ElasticsearchVersion *string `type:"string"` + // Option to set time, in UTC format, of the daily automated snapshot. Default // value is 0 hours. SnapshotOptions *SnapshotOptions `type:"structure"` @@ -1062,6 +1068,9 @@ type ElasticsearchDomainConfig struct { // Specifies the ElasticsearchClusterConfig for the Elasticsearch domain. ElasticsearchClusterConfig *ElasticsearchClusterConfigStatus `type:"structure"` + // String of format X.Y to specify version for the Elasticsearch domain. + ElasticsearchVersion *ElasticsearchVersionStatus `type:"structure"` + // Specifies the SnapshotOptions for the Elasticsearch domain. SnapshotOptions *SnapshotOptionsStatus `type:"structure"` } @@ -1118,6 +1127,8 @@ type ElasticsearchDomainStatus struct { // The type and number of instances in the domain cluster. ElasticsearchClusterConfig *ElasticsearchClusterConfig `type:"structure" required:"true"` + ElasticsearchVersion *string `type:"string"` + // The Elasticsearch domain endpoint that you use to submit index and search // requests. Endpoint *string `type:"string"` @@ -1141,6 +1152,29 @@ func (s ElasticsearchDomainStatus) GoString() string { return s.String() } +// Status of the Elasticsearch version options for the specified Elasticsearch +// domain. +type ElasticsearchVersionStatus struct { + _ struct{} `type:"structure"` + + // Specifies the Elasticsearch version for the specified Elasticsearch domain. + Options *string `type:"string" required:"true"` + + // Specifies the status of the Elasticsearch version options for the specified + // Elasticsearch domain. + Status *OptionStatus `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ElasticsearchVersionStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ElasticsearchVersionStatus) GoString() string { + return s.String() +} + type ListDomainNamesInput struct { _ struct{} `type:"structure"` } @@ -1480,6 +1514,16 @@ const ( // @enum ESPartitionInstanceType ESPartitionInstanceTypeM32xlargeElasticsearch = "m3.2xlarge.elasticsearch" // @enum ESPartitionInstanceType + ESPartitionInstanceTypeM4LargeElasticsearch = "m4.large.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeM4XlargeElasticsearch = "m4.xlarge.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeM42xlargeElasticsearch = "m4.2xlarge.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeM44xlargeElasticsearch = "m4.4xlarge.elasticsearch" + // @enum ESPartitionInstanceType + ESPartitionInstanceTypeM410xlargeElasticsearch = "m4.10xlarge.elasticsearch" + // @enum ESPartitionInstanceType ESPartitionInstanceTypeT2MicroElasticsearch = "t2.micro.elasticsearch" // @enum ESPartitionInstanceType ESPartitionInstanceTypeT2SmallElasticsearch = "t2.small.elasticsearch" diff --git a/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go index 60f3971a9..701ba5b45 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elasticsearchservice/service.go @@ -16,7 +16,7 @@ import ( // // The endpoint for configuration service requests is region-specific: es.region.amazonaws.com. // For example, es.us-east-1.amazonaws.com. For a current list of supported -// regions and endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#cloudsearch_region" +// regions and endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#elasticsearch-service-regions" // target="_blank). //The service client's operations are safe to be used concurrently. // It is not safe to mutate any of the client's properties though. diff --git a/vendor/github.com/aws/aws-sdk-go/service/route53/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/route53/unmarshal_error.go index e91375dc4..266e9a8ba 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/route53/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/service/route53/unmarshal_error.go @@ -69,7 +69,7 @@ func unmarshalInvalidChangeBatchError(r *request.Request, requestBody []byte) { } r.Error = awserr.NewRequestFailure( - awserr.NewBatchError(errorCode, "ChangeBatch errors occured", errors), + awserr.NewBatchError(errorCode, "ChangeBatch errors occurred", errors), r.HTTPResponse.StatusCode, r.RequestID, ) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index 5132954f3..553b0e4a3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -1242,7 +1242,7 @@ func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req return } -// Deprecated, see the GetBucketReplicationConfiguration operation. +// Returns the replication configuration of a bucket. func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { req, out := c.GetBucketReplicationRequest(input) err := req.Send() diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go index 59e4950b8..ed91c5872 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go @@ -23,6 +23,8 @@ func unmarshalError(r *request.Request) { defer r.HTTPResponse.Body.Close() defer io.Copy(ioutil.Discard, r.HTTPResponse.Body) + // Bucket exists in a different region, and request needs + // to be made to the correct region. if r.HTTPResponse.StatusCode == http.StatusMovedPermanently { r.Error = awserr.NewRequestFailure( awserr.New("BucketRegionError", @@ -35,25 +37,29 @@ func unmarshalError(r *request.Request) { return } - if r.HTTPResponse.ContentLength == 0 { - // No body, use status code to generate an awserr.Error - r.Error = awserr.NewRequestFailure( - awserr.New(strings.Replace(r.HTTPResponse.Status, " ", "", -1), r.HTTPResponse.Status, nil), - r.HTTPResponse.StatusCode, - r.RequestID, - ) - return - } + var errCode, errMsg string + // Attempt to parse error from body if it is known resp := &xmlErrorResponse{} err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) if err != nil && err != io.EOF { - r.Error = awserr.New("SerializationError", "failed to decode S3 XML error response", nil) + errCode = "SerializationError" + errMsg = "failed to decode S3 XML error response" } else { - r.Error = awserr.NewRequestFailure( - awserr.New(resp.Code, resp.Message, nil), - r.HTTPResponse.StatusCode, - r.RequestID, - ) + errCode = resp.Code + errMsg = resp.Message } + + // Fallback to status code converted to message if still no error code + if len(errCode) == 0 { + statusText := http.StatusText(r.HTTPResponse.StatusCode) + errCode = strings.Replace(statusText, " ", "", -1) + errMsg = statusText + } + + r.Error = awserr.NewRequestFailure( + awserr.New(errCode, errMsg, nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) } diff --git a/vendor/github.com/aws/aws-sdk-go/service/ses/api.go b/vendor/github.com/aws/aws-sdk-go/service/ses/api.go index bf5b3fb76..dbc7d0c42 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ses/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ses/api.go @@ -1719,6 +1719,8 @@ func (c *SES) SendRawEmailRequest(input *SendRawEmailInput) (req *request.Reques // list. Note that each recipient in a group list counts towards the 50-recipient // limit. // +// Amazon SES overrides any Message-ID and Date headers you provide. +// // For every message that you send, the total number of recipients (To:, // CC: and BCC:) is counted against your sending quota - the maximum number // of emails you can send in a 24-hour period. For information about your sending @@ -4403,7 +4405,7 @@ type RawMessage struct { // The raw data of the message. The client must ensure that the message format // complies with Internet email standards regarding email header fields, MIME - // types, MIME encoding, and base64 encoding (if necessary). + // types, MIME encoding, and base64 encoding. // // The To:, CC:, and BCC: headers in the raw message can contain a group list. // @@ -5305,7 +5307,7 @@ type SendRawEmailInput struct { // MIME content types must be among those supported by Amazon SES. For more // information, go to the Amazon SES Developer Guide (http://docs.aws.amazon.com/ses/latest/DeveloperGuide/mime-types.html). // - // Content must be base64-encoded, if MIME requires it. + // Must be base64-encoded. RawMessage *RawMessage `type:"structure" required:"true"` // This parameter is used only for sending authorization. It is the ARN of the diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go index 5e4078ea8..f11e8675f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -215,13 +215,14 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re // returned by the operation have the permissions that are defined in the access // policy of the role that is being assumed. If you pass a policy to this operation, // the temporary security credentials that are returned by the operation have -// the permissions that are allowed by both the access policy of the role that -// is being assumed, and the policy that you pass. This gives you a way to -// further restrict the permissions for the resulting temporary security credentials. -// You cannot use the passed policy to grant permissions that are in excess -// of those allowed by the access policy of the role that is being assumed. -// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, -// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// the permissions that are allowed by the intersection of both the access policy +// of the role that is being assumed, and the policy that you pass. This means +// that both policies must grant the permission for the action to be allowed. +// This gives you a way to further restrict the permissions for the resulting +// temporary security credentials. You cannot use the passed policy to grant +// permissions that are in excess of those allowed by the access policy of the +// role that is being assumed. For more information, see Permissions for AssumeRole, +// AssumeRoleWithSAML, and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) // in the IAM User Guide. // // Before your application can call AssumeRoleWithSAML, you must configure @@ -743,6 +744,14 @@ type AssumeRoleInput struct { // The duration, in seconds, of the role session. The value can range from 900 // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set // to 3600 seconds. + // + // This is separate from the duration of a console session that you might + // request using the returned credentials. The request to the federation endpoint + // for a console sign-in token takes a SessionDuration parameter that specifies + // the maximum length of the console session, separately from the DurationSeconds + // parameter on this API. For more information, see Creating a URL that Enables + // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` // A unique identifier that is used by third parties when assuming roles in @@ -757,7 +766,8 @@ type AssumeRoleInput struct { // // The format for this parameter, as described by its regex pattern, is a string // of characters consisting of upper- and lower-case alphanumeric characters - // with no spaces. You can also include any of the following characters: =,.@:\/- + // with no spaces. You can also include underscores or any of the following + // characters: =,.@:\/- ExternalId *string `min:"2" type:"string"` // An IAM policy in JSON format. @@ -801,7 +811,8 @@ type AssumeRoleInput struct { // // The format for this parameter, as described by its regex pattern, is a string // of characters consisting of upper- and lower-case alphanumeric characters - // with no spaces. You can also include any of the following characters: =,.@- + // with no spaces. You can also include underscores or any of the following + // characters: =,.@- RoleSessionName *string `min:"2" type:"string" required:"true"` // The identification number of the MFA device that is associated with the user @@ -812,7 +823,8 @@ type AssumeRoleInput struct { // // The format for this parameter, as described by its regex pattern, is a string // of characters consisting of upper- and lower-case alphanumeric characters - // with no spaces. You can also include any of the following characters: =,.@- + // with no spaces. You can also include underscores or any of the following + // characters: =,.@- SerialNumber *string `min:"9" type:"string"` // The value provided by the MFA device, if the trust policy of the role being @@ -918,8 +930,13 @@ type AssumeRoleWithSAMLInput struct { // response's SessionNotOnOrAfter value. The actual expiration time is whichever // value is shorter. // - // The maximum duration for a session is 1 hour, and the minimum duration - // is 15 minutes, even if values outside this range are specified. + // This is separate from the duration of a console session that you might + // request using the returned credentials. The request to the federation endpoint + // for a console sign-in token takes a SessionDuration parameter that specifies + // the maximum length of the console session, separately from the DurationSeconds + // parameter on this API. For more information, see Enabling SAML 2.0 Federated + // Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-saml.html) + // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` // An IAM policy in JSON format. @@ -1078,6 +1095,14 @@ type AssumeRoleWithWebIdentityInput struct { // The duration, in seconds, of the role session. The value can range from 900 // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set // to 3600 seconds. + // + // This is separate from the duration of a console session that you might + // request using the returned credentials. The request to the federation endpoint + // for a console sign-in token takes a SessionDuration parameter that specifies + // the maximum length of the console session, separately from the DurationSeconds + // parameter on this API. For more information, see Creating a URL that Enables + // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. DurationSeconds *int64 `min:"900" type:"integer"` // An IAM policy in JSON format. @@ -1125,7 +1150,8 @@ type AssumeRoleWithWebIdentityInput struct { // // The format for this parameter, as described by its regex pattern, is a string // of characters consisting of upper- and lower-case alphanumeric characters - // with no spaces. You can also include any of the following characters: =,.@- + // with no spaces. You can also include underscores or any of the following + // characters: =,.@- RoleSessionName *string `min:"2" type:"string" required:"true"` // The OAuth 2.0 access token or OpenID Connect ID token that is provided by @@ -1432,7 +1458,8 @@ type GetFederationTokenInput struct { // // The format for this parameter, as described by its regex pattern, is a string // of characters consisting of upper- and lower-case alphanumeric characters - // with no spaces. You can also include any of the following characters: =,.@- + // with no spaces. You can also include underscores or any of the following + // characters: =,.@- Name *string `min:"2" type:"string" required:"true"` // An IAM policy in JSON format that is passed with the GetFederationToken call @@ -1556,7 +1583,8 @@ type GetSessionTokenInput struct { // // The format for this parameter, as described by its regex pattern, is a string // of characters consisting of upper- and lower-case alphanumeric characters - // with no spaces. You can also include any of the following characters: =,.@- + // with no spaces. You can also include underscores or any of the following + // characters: =,.@- SerialNumber *string `min:"9" type:"string"` // The value provided by the MFA device, if MFA is required. If any policy requires diff --git a/vendor/vendor.json b/vendor/vendor.json index bacde77c5..1859b8d03 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -277,596 +277,598 @@ "revision": "4239b77079c7b5d1243b7b4736304ce8ddb6f0f2" }, { - "checksumSHA1": "4AOg5/w5X4YYFGRV+V0pLKAhe8c=", + "checksumSHA1": "NuOPMyBrQF/R5cXmLo5zI2kIs7M=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { - "checksumSHA1": "AWg3FBA1NTPdIVZipaQf/rGx38o=", + "checksumSHA1": "Y9W+4GimK4Fuxq+vyIskVYFRnX4=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/awserr", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "dkfyy7aRNZ6BmUZ4ZdLIcMMXiPA=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/awsutil", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "RsYlRfQceaAgqjIrExwNsb/RBEM=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/client", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "ieAJ+Cvp/PKv1LpUEnUXpc3OI6E=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/client/metadata", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "gNWirlrTfSLbOe421hISBAhTqa4=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/corehandlers", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "EiauD48zRlXIFvAENgZ+PXSEnT0=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/credentials", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "KQiUK/zr3mqnAXD7x/X55/iNme0=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "NUJUTWlc1sV8b7WjfiYc4JZbXl0=", "path": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "svFeyM3oQkk0nfQ0pguDjMgV2M4=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/defaults", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "U0SthWum+t9ACanK7SDJOg3dO6M=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/ec2metadata", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "NyUg1P8ZS/LHAAQAk/4C5O4X3og=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/request", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "46SVikiXo5xuy/CS6mM1XVTUU7w=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/session", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "0HzXzMByDLiJSqrMEqbg5URAx0o=", "path": "github.com/aws/aws-sdk-go/aws/signer/v4", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "sgft7A0lRCVD7QBogydg46lr3NM=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/endpoints", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "wk7EyvDaHwb5qqoOP/4d3cV0708=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { - "checksumSHA1": "G1he3uSmd1h8ZRnKOIWuDrWp2zQ=", + "checksumSHA1": "uNmSKXAF8B9HWEciW+iyUwZ99qQ=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/ec2query", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { - "checksumSHA1": "gHqZ41fSrCEUftkImHKGW+cKxFk=", + "checksumSHA1": "L7xWYwx0jNQnzlYHwBS+1q6DcCI=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { - "checksumSHA1": "MPzz1x/qt6f2R/JW6aELbm/qT4k=", + "checksumSHA1": "H9TymcQkQnXSXSVfjggiiS4bpzM=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/jsonrpc", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { - "checksumSHA1": "nHHyS4+VgZOV7F3Xu87crArmbds=", + "checksumSHA1": "isoix7lTx4qIq2zI2xFADtti5SI=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/query", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "5xzix1R8prUyWxgLnzUQoxTsfik=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "TW/7U+/8ormL7acf6z2rv2hDD+s=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/rest", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { - "checksumSHA1": "ayzKZc+f+OrjOtE2bz4+lrlKR7c=", + "checksumSHA1": "oUOTWZIpPJiGjc9p/hntdBDvS10=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/restjson", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { - "checksumSHA1": "ttxyyPnlmMDqX+sY10BwbwwA+jo=", + "checksumSHA1": "Y6Db2GGfGD9LPpcJIPj8vXE8BbQ=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/restxml", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { - "checksumSHA1": "LsCIsjbzX2r3n/AhpNJvAC5ueNA=", + "checksumSHA1": "eUEkjyMPAuekKBE4ou+nM9tXEas=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "F6mth+G7dXN1GI+nktaGo8Lx8aE=", "path": "github.com/aws/aws-sdk-go/private/signer/v2", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/signer/v4", "revision": "2cc71659118a868dc7544a7ef0808eb42d487011", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "Eo9yODN5U99BK0pMzoqnBm7PCrY=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/waiter", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { - "checksumSHA1": "lD48Br3S98XvKfKID0QiTbBgC1M=", + "checksumSHA1": "j8CUd3jhZ8K+cI8fy785NmqJyzg=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/apigateway", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { - "checksumSHA1": "Td30Frd+lrCLlkMAirUTbjBXq5Q=", + "checksumSHA1": "qoTWohhN8wMZvdMAbwi+B5YhQJ0=", "path": "github.com/aws/aws-sdk-go/service/applicationautoscaling", - "revision": "90dec2183a5f5458ee79cbaf4b8e9ab910bc81a6", - "revisionTime": "2016-07-08T00:08:20Z" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "AUA6op9dlm0X4vv1YPFnIFs6404=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/autoscaling", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "vp/AYdsQnZtoPqtX86VsgmLIx1w=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/cloudformation", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "4deSd9La3EF2Cmq+tD5rcvhfTGQ=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/cloudfront", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "eCFTaV9GKqv/UEzwRgFFUaFz098=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/cloudtrail", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { - "checksumSHA1": "b9W5mR0lazSwYV6Pl8HNslokIpo=", + "checksumSHA1": "G9CmCfw00Bjz0TtJsEnxGE6mv/0=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/cloudwatch", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "mWNJKpt18ASs9/RhnIjILcsGlng=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/cloudwatchevents", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "Q6xeArbCzOunYsn2tFyTA5LN1Cg=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/cloudwatchlogs", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "p5a/DcdUvhTx0PCRR+/CRXk9g6c=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/codecommit", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { - "checksumSHA1": "p9BTPHO+J8OdzK2btdcGGAaTmhk=", + "checksumSHA1": "N8Sgq+xG2vYJdKBikM3yQuIBZfs=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/codedeploy", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { - "checksumSHA1": "t1fZO+x4OG6e7T8HIi2Yr2wR9D4=", + "checksumSHA1": "BiT1NC5G4H7OeNcI7jzkZUzlpr4=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/directoryservice", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "y+pZPK8hcTDwq1zHuRduWE14flw=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/dynamodb", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { - "checksumSHA1": "gqlYKqMKCuQ3fzNTyDw6jiG1sCs=", + "checksumSHA1": "Ao/Vq8RYiaW63HasBBPkNg/i7CM=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/ec2", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "IEHq+VLH1fud1oQ4MXj1nqfpgUY=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/ecr", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "sHPoLMWXO5tM63ipuxVXduuRypI=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/ecs", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "1vOgFGxLhjNe6BK3RJaV1OqisCs=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/efs", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "rjSScNzMTvEHv7Lk5KcxDpNU5EE=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/elasticache", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "RZF1yHtJhAqaMwbeAM/6BdLLavk=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/elasticbeanstalk", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { - "checksumSHA1": "TAuizMIsvgeuZhmGTYPA7LOXHvY=", + "checksumSHA1": "VAlXnW+WxxWRcCv4xsCoox2kgE0=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/elasticsearchservice", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "qHuJHGUAuuizD9834MP3gVupfdo=", "path": "github.com/aws/aws-sdk-go/service/elastictranscoder", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "1c9xsISLQWKSrORIpdokCCWCe2M=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/elb", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "bvVmHWxCOk0Cmw333zQ5jutPCZQ=", "comment": "v1.1.15", "path": "github.com/aws/aws-sdk-go/service/emr", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "TtIAgZ+evpkKB5bBYCB69k0wZoU=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/firehose", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "B1EtgBrv//gYqA+Sp6a/SK2zLO4=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/glacier", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "kXJ9ycLAIj0PFSFbfrA/LR/hIi8=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/iam", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "2n5/m0ClE4OyQRNdjfLwg+nSY3o=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/kinesis", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "/cFX1/Gr6M+r9232gLIV+4np7Po=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/kms", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "jM0EhAIybh0fyLHxrmVSmG3JLmU=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/lambda", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "aLwDFgrPzIBidURxso1ujcr2pDs=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/opsworks", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "w0aQAtZ42oGeVOqwwG15OBGoU1s=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/rds", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "mgImZ/bluUOY9GpQ/oAnscIXwrA=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/redshift", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { - "checksumSHA1": "6ejP+X+O9e6y40GICj9Vcn1MuBY=", + "checksumSHA1": "y6jKUvrpTJxj5uh6OqQ4FujhCHU=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/route53", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { - "checksumSHA1": "68YN+UopWOSISIcQQ6zSVbyaDzQ=", + "checksumSHA1": "+608jtc5uRpGqGu5ntpKhfWgwGc=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/s3", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { - "checksumSHA1": "X9g/Vdq939ijN2gcumwOyYfHM2U=", + "checksumSHA1": "o+bjuT6ycywUf+vXY9hYK4Z3okE=", "path": "github.com/aws/aws-sdk-go/service/ses", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "DW5kDRWLA2yAgYh9vsI+0uVqq/Q=", "path": "github.com/aws/aws-sdk-go/service/simpledb", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "+ic7vevBfganFLENR29pJaEf4Tw=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/sns", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "checksumSHA1": "oLAlquYlQzgYFS9ochS/iQ9+uXY=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/sqs", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { - "checksumSHA1": "6a2WM0r/rXUxFjxH73jYL88LBSw=", + "checksumSHA1": "nH/itbdeFHpl4ysegdtgww9bFSA=", "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/sts", - "revision": "565027b24171359f23f883d0fc48c228cdde301d", - "revisionTime": "2016-07-21T22:15:38Z", - "version": "v1.2.7", - "versionExact": "v1.2.7" + "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", + "revisionTime": "2016-07-29T00:51:21Z", + "version": "v1.2.10", + "versionExact": "v1.2.10" }, { "path": "github.com/bgentry/speakeasy", From 37557f945272299245967e7aa9d32b970a3e2e3c Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Fri, 29 Jul 2016 20:20:50 +0100 Subject: [PATCH 0475/1238] provider/aws: Fix up `aws_api_gateway_api_key` import test (#7873) This changes the behaviour of `aws_api_gateway_integration` to set the `passthrough_behaviour` to be computed as this was breaking the import test ``` % make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSAPIGatewayApiKey_importBasic' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSAPIGatewayApiKey_importBasic -timeout 120m === RUN TestAccAWSAPIGatewayApiKey_importBasic --- PASS: TestAccAWSAPIGatewayApiKey_importBasic (50.19s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 50.210s ``` ``` % make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSAPIGatewayIntegration_' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSAPIGatewayIntegration_ -timeout 120m === RUN TestAccAWSAPIGatewayIntegration_basic --- PASS: TestAccAWSAPIGatewayIntegration_basic (67.43s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 67.449s ``` --- builtin/providers/aws/resource_aws_api_gateway_integration.go | 1 + 1 file changed, 1 insertion(+) diff --git a/builtin/providers/aws/resource_aws_api_gateway_integration.go b/builtin/providers/aws/resource_aws_api_gateway_integration.go index d82d78e6d..2cb0c9818 100644 --- a/builtin/providers/aws/resource_aws_api_gateway_integration.go +++ b/builtin/providers/aws/resource_aws_api_gateway_integration.go @@ -83,6 +83,7 @@ func resourceAwsApiGatewayIntegration() *schema.Resource { "passthrough_behavior": &schema.Schema{ Type: schema.TypeString, Optional: true, + Computed: true, ValidateFunc: validateApiGatewayIntegrationPassthroughBehavior, }, }, From a9aaf44a87304d1ab5ae524ea34f09f6bc12624f Mon Sep 17 00:00:00 2001 From: Clint Date: Fri, 29 Jul 2016 15:05:57 -0500 Subject: [PATCH 0476/1238] fix make issues (supersedes #7868) (#7876) * Fixing the make error or invalid data type for errorf and printf * fix make errors --- .../aws/resource_aws_appautoscaling_policy.go | 2 +- builtin/providers/aws/structure_test.go | 10 +++++----- .../template/datasource_template_file_test.go | 4 ++-- plugin/resource_provider_test.go | 4 ++-- terraform/context.go | 2 +- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/builtin/providers/aws/resource_aws_appautoscaling_policy.go b/builtin/providers/aws/resource_aws_appautoscaling_policy.go index 44461e3a7..1ce898781 100644 --- a/builtin/providers/aws/resource_aws_appautoscaling_policy.go +++ b/builtin/providers/aws/resource_aws_appautoscaling_policy.go @@ -28,7 +28,7 @@ func resourceAwsAppautoscalingPolicy() *schema.Resource { // https://github.com/boto/botocore/blob/9f322b1/botocore/data/autoscaling/2011-01-01/service-2.json#L1862-L1873 value := v.(string) if len(value) > 255 { - errors = append(errors, fmt.Errorf("q cannot be longer than 255 characters", k)) + errors = append(errors, fmt.Errorf("%s cannot be longer than 255 characters", k)) } return }, diff --git a/builtin/providers/aws/structure_test.go b/builtin/providers/aws/structure_test.go index 937411af1..d83e458a4 100644 --- a/builtin/providers/aws/structure_test.go +++ b/builtin/providers/aws/structure_test.go @@ -477,7 +477,7 @@ func TestExpandParameters(t *testing.T) { } } -func TestexpandRedshiftParameters(t *testing.T) { +func TestExpandRedshiftParameters(t *testing.T) { expanded := []interface{}{ map[string]interface{}{ "name": "character_set_client", @@ -502,7 +502,7 @@ func TestexpandRedshiftParameters(t *testing.T) { } } -func TestexpandElasticacheParameters(t *testing.T) { +func TestExpandElasticacheParameters(t *testing.T) { expanded := []interface{}{ map[string]interface{}{ "name": "activerehashing", @@ -584,7 +584,7 @@ func TestFlattenParameters(t *testing.T) { } } -func TestflattenRedshiftParameters(t *testing.T) { +func TestFlattenRedshiftParameters(t *testing.T) { cases := []struct { Input []*redshift.Parameter Output []map[string]interface{} @@ -613,7 +613,7 @@ func TestflattenRedshiftParameters(t *testing.T) { } } -func TestflattenElasticacheParameters(t *testing.T) { +func TestFlattenElasticacheParameters(t *testing.T) { cases := []struct { Input []*elasticache.Parameter Output []map[string]interface{} @@ -774,7 +774,7 @@ func TestFlattenAttachmentWhenNoInstanceId(t *testing.T) { } } -func TestflattenStepAdjustments(t *testing.T) { +func TestFlattenStepAdjustments(t *testing.T) { expanded := []*autoscaling.StepAdjustment{ &autoscaling.StepAdjustment{ MetricIntervalLowerBound: aws.Float64(1.0), diff --git a/builtin/providers/template/datasource_template_file_test.go b/builtin/providers/template/datasource_template_file_test.go index 5e82382a6..7b13f69e6 100644 --- a/builtin/providers/template/datasource_template_file_test.go +++ b/builtin/providers/template/datasource_template_file_test.go @@ -122,7 +122,7 @@ func TestValidateVarsAttribute(t *testing.T) { func TestTemplateSharedMemoryRace(t *testing.T) { var wg sync.WaitGroup for i := 0; i < 100; i++ { - go func(wg sync.WaitGroup, t *testing.T, i int) { + go func(wg *sync.WaitGroup, t *testing.T, i int) { wg.Add(1) out, err := execute("don't panic!", map[string]interface{}{}) if err != nil { @@ -132,7 +132,7 @@ func TestTemplateSharedMemoryRace(t *testing.T) { t.Fatalf("bad output: %s", out) } wg.Done() - }(wg, t, i) + }(&wg, t, i) } wg.Wait() } diff --git a/plugin/resource_provider_test.go b/plugin/resource_provider_test.go index c20662b62..41997b132 100644 --- a/plugin/resource_provider_test.go +++ b/plugin/resource_provider_test.go @@ -444,8 +444,8 @@ func TestResourceProvider_datasources(t *testing.T) { provider := raw.(terraform.ResourceProvider) expected := []terraform.DataSource{ - {"foo"}, - {"bar"}, + {Name: "foo"}, + {Name: "bar"}, } p.DataSourcesReturn = expected diff --git a/terraform/context.go b/terraform/context.go index fd83d4afe..262b7ce38 100644 --- a/terraform/context.go +++ b/terraform/context.go @@ -689,6 +689,6 @@ func parseVariableAsHCL(name string, input interface{}, targetType config.Variab return nil, fmt.Errorf("Cannot parse value for variable %s (%q) as valid HCL. One value must be specified.", name, input) default: - panic(fmt.Errorf("unknown type %s", targetType)) + panic(fmt.Errorf("unknown type %s", targetType.Printable())) } } From 5802f76eaa44d168e4438a5f354811912fa7da8c Mon Sep 17 00:00:00 2001 From: James Bardin Date: Fri, 29 Jul 2016 13:17:48 -0400 Subject: [PATCH 0477/1238] Make all terraform package tests pass under -race This isn't a pretty refactor, but fixes the race issues in this package for now. Fix race on RawConfig.Config() fix command package races --- command/hook_count.go | 2 +- command/hook_ui.go | 8 +- config/raw_config.go | 2 + terraform/diff.go | 133 +++++++++++++++++++++--- terraform/eval_apply.go | 6 +- terraform/eval_check_prevent_destroy.go | 2 +- terraform/eval_diff.go | 36 +++---- terraform/eval_read_data.go | 8 +- terraform/graph_config_node_resource.go | 2 +- terraform/hook_mock.go | 46 ++++++++ terraform/resource_provisioner_mock.go | 9 ++ terraform/state.go | 2 +- terraform/transform_resource.go | 10 +- 13 files changed, 212 insertions(+), 54 deletions(-) diff --git a/command/hook_count.go b/command/hook_count.go index 3e642b254..150ae438e 100644 --- a/command/hook_count.go +++ b/command/hook_count.go @@ -47,7 +47,7 @@ func (h *CountHook) PreApply( } action := countHookActionChange - if d.Destroy { + if d.GetDestroy() { action = countHookActionRemove } else if s.ID == "" { action = countHookActionAdd diff --git a/command/hook_ui.go b/command/hook_ui.go index 2270a1dea..a41f40c98 100644 --- a/command/hook_ui.go +++ b/command/hook_ui.go @@ -84,8 +84,10 @@ func (h *UiHook) PreApply( // Get all the attributes that are changing, and sort them. Also // determine the longest key so that we can align them all. keyLen := 0 - keys := make([]string, 0, len(d.Attributes)) - for key, _ := range d.Attributes { + + dAttrs := d.CopyAttributes() + keys := make([]string, 0, len(dAttrs)) + for key, _ := range dAttrs { // Skip the ID since we do that specially if key == "id" { continue @@ -100,7 +102,7 @@ func (h *UiHook) PreApply( // Go through and output each attribute for _, attrK := range keys { - attrDiff := d.Attributes[attrK] + attrDiff, _ := d.GetAttribute(attrK) v := attrDiff.New u := attrDiff.Old diff --git a/config/raw_config.go b/config/raw_config.go index 18b9dcaf2..b4cb71450 100644 --- a/config/raw_config.go +++ b/config/raw_config.go @@ -93,6 +93,8 @@ func (r *RawConfig) Value() interface{} { // structure will always successfully decode into its ultimate // structure using something like mapstructure. func (r *RawConfig) Config() map[string]interface{} { + r.lock.Lock() + defer r.lock.Unlock() return r.config } diff --git a/terraform/diff.go b/terraform/diff.go index d5008c6c8..f3e7a092f 100644 --- a/terraform/diff.go +++ b/terraform/diff.go @@ -8,6 +8,7 @@ import ( "regexp" "sort" "strings" + "sync" ) // DiffChangeType is an enum with the kind of changes a diff has planned. @@ -216,9 +217,9 @@ func (d *ModuleDiff) String() string { crud := "UPDATE" switch { - case rdiff.RequiresNew() && (rdiff.Destroy || rdiff.DestroyTainted): + case rdiff.RequiresNew() && (rdiff.GetDestroy() || rdiff.GetDestroyTainted()): crud = "DESTROY/CREATE" - case rdiff.Destroy: + case rdiff.GetDestroy(): crud = "DESTROY" case rdiff.RequiresNew(): crud = "CREATE" @@ -230,8 +231,9 @@ func (d *ModuleDiff) String() string { name)) keyLen := 0 - keys := make([]string, 0, len(rdiff.Attributes)) - for key, _ := range rdiff.Attributes { + rdiffAttrs := rdiff.CopyAttributes() + keys := make([]string, 0, len(rdiffAttrs)) + for key, _ := range rdiffAttrs { if key == "id" { continue } @@ -244,7 +246,7 @@ func (d *ModuleDiff) String() string { sort.Strings(keys) for _, attrK := range keys { - attrDiff := rdiff.Attributes[attrK] + attrDiff, _ := rdiff.GetAttribute(attrK) v := attrDiff.New u := attrDiff.Old @@ -279,6 +281,7 @@ func (d *ModuleDiff) String() string { // InstanceDiff is the diff of a resource from some state to another. type InstanceDiff struct { + mu sync.Mutex Attributes map[string]*ResourceAttrDiff Destroy bool DestroyTainted bool @@ -324,6 +327,10 @@ func (d *InstanceDiff) init() { } } +func NewInstanceDiff() *InstanceDiff { + return &InstanceDiff{Attributes: make(map[string]*ResourceAttrDiff)} +} + // ChangeType returns the DiffChangeType represented by the diff // for this single instance. func (d *InstanceDiff) ChangeType() DiffChangeType { @@ -331,11 +338,11 @@ func (d *InstanceDiff) ChangeType() DiffChangeType { return DiffNone } - if d.RequiresNew() && (d.Destroy || d.DestroyTainted) { + if d.RequiresNew() && (d.GetDestroy() || d.GetDestroyTainted()) { return DiffDestroyCreate } - if d.Destroy { + if d.GetDestroy() { return DiffDestroy } @@ -352,6 +359,8 @@ func (d *InstanceDiff) Empty() bool { return true } + d.mu.Lock() + defer d.mu.Unlock() return !d.Destroy && len(d.Attributes) == 0 } @@ -366,6 +375,17 @@ func (d *InstanceDiff) RequiresNew() bool { return false } + d.mu.Lock() + defer d.mu.Unlock() + + return d.requiresNew() +} + +func (d *InstanceDiff) requiresNew() bool { + if d == nil { + return false + } + if d.DestroyTainted { return true } @@ -379,24 +399,103 @@ func (d *InstanceDiff) RequiresNew() bool { return false } +// These methods are properly locked, for use outside other InstanceDiff +// methods but everywhere else within in the terraform package. +// TODO refactor the locking scheme +func (d *InstanceDiff) SetTainted(b bool) { + d.mu.Lock() + defer d.mu.Unlock() + + d.DestroyTainted = b +} + +func (d *InstanceDiff) GetDestroyTainted() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.DestroyTainted +} + +func (d *InstanceDiff) SetDestroy(b bool) { + d.mu.Lock() + defer d.mu.Unlock() + + d.Destroy = b +} + +func (d *InstanceDiff) GetDestroy() bool { + d.mu.Lock() + defer d.mu.Unlock() + + return d.Destroy +} + +func (d *InstanceDiff) SetAttribute(key string, attr *ResourceAttrDiff) { + d.mu.Lock() + defer d.mu.Unlock() + + d.Attributes[key] = attr +} + +func (d *InstanceDiff) DelAttribute(key string) { + d.mu.Lock() + defer d.mu.Unlock() + + delete(d.Attributes, key) +} + +func (d *InstanceDiff) GetAttribute(key string) (*ResourceAttrDiff, bool) { + d.mu.Lock() + defer d.mu.Unlock() + + attr, ok := d.Attributes[key] + return attr, ok +} +func (d *InstanceDiff) GetAttributesLen() int { + d.mu.Lock() + defer d.mu.Unlock() + + return len(d.Attributes) +} + +// Safely copies the Attributes map +func (d *InstanceDiff) CopyAttributes() map[string]*ResourceAttrDiff { + d.mu.Lock() + defer d.mu.Unlock() + + attrs := make(map[string]*ResourceAttrDiff) + for k, v := range d.Attributes { + attrs[k] = v + } + + return attrs +} + // Same checks whether or not two InstanceDiff's are the "same". When // we say "same", it is not necessarily exactly equal. Instead, it is // just checking that the same attributes are changing, a destroy // isn't suddenly happening, etc. func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) { - if d == nil && d2 == nil { + // we can safely compare the pointers without a lock + switch { + case d == nil && d2 == nil: + return true, "" + case d == nil || d2 == nil: + return false, "one nil" + case d == d2: return true, "" - } else if d == nil || d2 == nil { - return false, "both nil" } - if d.Destroy != d2.Destroy { + d.mu.Lock() + defer d.mu.Unlock() + + if d.Destroy != d2.GetDestroy() { return false, fmt.Sprintf( - "diff: Destroy; old: %t, new: %t", d.Destroy, d2.Destroy) + "diff: Destroy; old: %t, new: %t", d.Destroy, d2.GetDestroy()) } - if d.RequiresNew() != d2.RequiresNew() { + if d.requiresNew() != d2.RequiresNew() { return false, fmt.Sprintf( - "diff RequiresNew; old: %t, new: %t", d.RequiresNew(), d2.RequiresNew()) + "diff RequiresNew; old: %t, new: %t", d.requiresNew(), d2.RequiresNew()) } // Go through the old diff and make sure the new diff has all the @@ -406,7 +505,7 @@ func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) { for k, _ := range d.Attributes { checkOld[k] = struct{}{} } - for k, _ := range d2.Attributes { + for k, _ := range d2.CopyAttributes() { checkNew[k] = struct{}{} } @@ -431,7 +530,7 @@ func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) { delete(checkOld, k) delete(checkNew, k) - _, ok := d2.Attributes[k] + _, ok := d2.GetAttribute(k) if !ok { // If there's no new attribute, and the old diff expected the attribute // to be removed, that's just fine. @@ -483,7 +582,7 @@ func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) { // Similarly, in a RequiresNew scenario, a list that shows up in the plan // diff can disappear from the apply diff, which is calculated from an // empty state. - if d.RequiresNew() && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { + if d.requiresNew() && (strings.HasSuffix(k, ".#") || strings.HasSuffix(k, ".%")) { ok = true } diff --git a/terraform/eval_apply.go b/terraform/eval_apply.go index fd687c5a1..5dced0136 100644 --- a/terraform/eval_apply.go +++ b/terraform/eval_apply.go @@ -35,9 +35,9 @@ func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) { } // Remove any output values from the diff - for k, ad := range diff.Attributes { + for k, ad := range diff.CopyAttributes() { if ad.Type == DiffAttrOutput { - delete(diff.Attributes, k) + diff.DelAttribute(k) } } @@ -49,7 +49,7 @@ func (n *EvalApply) Eval(ctx EvalContext) (interface{}, error) { // Flag if we're creating a new instance if n.CreateNew != nil { - *n.CreateNew = state.ID == "" && !diff.Destroy || diff.RequiresNew() + *n.CreateNew = state.ID == "" && !diff.GetDestroy() || diff.RequiresNew() } { diff --git a/terraform/eval_check_prevent_destroy.go b/terraform/eval_check_prevent_destroy.go index ae9dc4f82..aec0ae134 100644 --- a/terraform/eval_check_prevent_destroy.go +++ b/terraform/eval_check_prevent_destroy.go @@ -22,7 +22,7 @@ func (n *EvalCheckPreventDestroy) Eval(ctx EvalContext) (interface{}, error) { diff := *n.Diff preventDestroy := n.Resource.Lifecycle.PreventDestroy - if diff.Destroy && preventDestroy { + if diff.GetDestroy() && preventDestroy { return nil, fmt.Errorf(preventDestroyErrStr, n.Resource.Id()) } diff --git a/terraform/eval_diff.go b/terraform/eval_diff.go index 4a5027d60..8fe476688 100644 --- a/terraform/eval_diff.go +++ b/terraform/eval_diff.go @@ -28,16 +28,16 @@ func (n *EvalCompareDiff) Eval(ctx EvalContext) (interface{}, error) { two = new(InstanceDiff) two.init() } - oneId := one.Attributes["id"] - twoId := two.Attributes["id"] - delete(one.Attributes, "id") - delete(two.Attributes, "id") + oneId, _ := one.GetAttribute("id") + twoId, _ := two.GetAttribute("id") + one.DelAttribute("id") + two.DelAttribute("id") defer func() { if oneId != nil { - one.Attributes["id"] = oneId + one.SetAttribute("id", oneId) } if twoId != nil { - two.Attributes["id"] = twoId + two.SetAttribute("id", twoId) } }() @@ -114,12 +114,12 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) { // Preserve the DestroyTainted flag if n.Diff != nil { - diff.DestroyTainted = (*n.Diff).DestroyTainted + diff.SetTainted((*n.Diff).GetDestroyTainted()) } // Require a destroy if there is an ID and it requires new. if diff.RequiresNew() && state != nil && state.ID != "" { - diff.Destroy = true + diff.SetDestroy(true) } // If we're creating a new resource, compute its ID @@ -131,12 +131,12 @@ func (n *EvalDiff) Eval(ctx EvalContext) (interface{}, error) { // Add diff to compute new ID diff.init() - diff.Attributes["id"] = &ResourceAttrDiff{ + diff.SetAttribute("id", &ResourceAttrDiff{ Old: oldID, NewComputed: true, RequiresNew: true, Type: DiffAttrOutput, - } + }) } if err := n.processIgnoreChanges(diff); err != nil { @@ -187,7 +187,7 @@ func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error { ignorableAttrKeys := make(map[string]bool) for _, ignoredKey := range ignoreChanges { - for k := range diff.Attributes { + for k := range diff.CopyAttributes() { if strings.HasPrefix(k, ignoredKey) { ignorableAttrKeys[k] = true } @@ -200,7 +200,7 @@ func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error { // "". Filtering these out allows us to see if we might be able to // skip this diff altogether. if changeType == DiffDestroyCreate { - for k, v := range diff.Attributes { + for k, v := range diff.CopyAttributes() { if v.Empty() || v.NewComputed { ignorableAttrKeys[k] = true } @@ -210,7 +210,7 @@ func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error { // tweak, we ignore the "id" attribute diff that gets added by EvalDiff, // since that was added in reaction to RequiresNew being true. requiresNewAfterIgnores := false - for k, v := range diff.Attributes { + for k, v := range diff.CopyAttributes() { if k == "id" { continue } @@ -233,15 +233,15 @@ func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error { // attribute diff and the Destroy boolean field log.Printf("[DEBUG] Removing 'id' diff and setting Destroy to false " + "because after ignore_changes, this diff no longer requires replacement") - delete(diff.Attributes, "id") - diff.Destroy = false + diff.DelAttribute("id") + diff.SetDestroy(false) } // If we didn't hit any of our early exit conditions, we can filter the diff. for k := range ignorableAttrKeys { log.Printf("[DEBUG] [EvalIgnoreChanges] %s - Ignoring diff attribute: %s", n.Resource.Id(), k) - delete(diff.Attributes, k) + diff.DelAttribute(k) } return nil @@ -333,8 +333,8 @@ func (n *EvalFilterDiff) Eval(ctx EvalContext) (interface{}, error) { result := new(InstanceDiff) if n.Destroy { - if input.Destroy || input.RequiresNew() { - result.Destroy = true + if input.GetDestroy() || input.RequiresNew() { + result.SetDestroy(true) } } diff --git a/terraform/eval_read_data.go b/terraform/eval_read_data.go index 4fd843123..aeb2ebaef 100644 --- a/terraform/eval_read_data.go +++ b/terraform/eval_read_data.go @@ -30,7 +30,7 @@ func (n *EvalReadDataDiff) Eval(ctx EvalContext) (interface{}, error) { var diff *InstanceDiff - if n.Previous != nil && *n.Previous != nil && (*n.Previous).Destroy { + if n.Previous != nil && *n.Previous != nil && (*n.Previous).GetDestroy() { // If we're re-diffing for a diff that was already planning to // destroy, then we'll just continue with that plan. diff = &InstanceDiff{Destroy: true} @@ -49,12 +49,12 @@ func (n *EvalReadDataDiff) Eval(ctx EvalContext) (interface{}, error) { // id is always computed, because we're always "creating a new resource" diff.init() - diff.Attributes["id"] = &ResourceAttrDiff{ + diff.SetAttribute("id", &ResourceAttrDiff{ Old: "", NewComputed: true, RequiresNew: true, Type: DiffAttrOutput, - } + }) } err = ctx.Hook(func(h Hook) (HookAction, error) { @@ -97,7 +97,7 @@ func (n *EvalReadDataApply) Eval(ctx EvalContext) (interface{}, error) { // If the diff is for *destroying* this resource then we'll // just drop its state and move on, since data resources don't // support an actual "destroy" action. - if diff != nil && diff.Destroy { + if diff != nil && diff.GetDestroy() { if n.Output != nil { *n.Output = nil } diff --git a/terraform/graph_config_node_resource.go b/terraform/graph_config_node_resource.go index 36e1d9112..1c45289a9 100644 --- a/terraform/graph_config_node_resource.go +++ b/terraform/graph_config_node_resource.go @@ -485,7 +485,7 @@ func (n *graphNodeResourceDestroy) destroyInclude( if d != nil { for k, v := range d.Resources { match := k == prefix || strings.HasPrefix(k, prefix+".") - if match && v.Destroy { + if match && v.GetDestroy() { return true } } diff --git a/terraform/hook_mock.go b/terraform/hook_mock.go index d6c5fcb3f..3797a1e15 100644 --- a/terraform/hook_mock.go +++ b/terraform/hook_mock.go @@ -1,8 +1,12 @@ package terraform +import "sync" + // MockHook is an implementation of Hook that can be used for tests. // It records all of its function calls. type MockHook struct { + sync.Mutex + PreApplyCalled bool PreApplyInfo *InstanceInfo PreApplyDiff *InstanceDiff @@ -89,6 +93,9 @@ type MockHook struct { } func (h *MockHook) PreApply(n *InstanceInfo, s *InstanceState, d *InstanceDiff) (HookAction, error) { + h.Lock() + defer h.Unlock() + h.PreApplyCalled = true h.PreApplyInfo = n h.PreApplyDiff = d @@ -97,6 +104,9 @@ func (h *MockHook) PreApply(n *InstanceInfo, s *InstanceState, d *InstanceDiff) } func (h *MockHook) PostApply(n *InstanceInfo, s *InstanceState, e error) (HookAction, error) { + h.Lock() + defer h.Unlock() + h.PostApplyCalled = true h.PostApplyInfo = n h.PostApplyState = s @@ -105,6 +115,9 @@ func (h *MockHook) PostApply(n *InstanceInfo, s *InstanceState, e error) (HookAc } func (h *MockHook) PreDiff(n *InstanceInfo, s *InstanceState) (HookAction, error) { + h.Lock() + defer h.Unlock() + h.PreDiffCalled = true h.PreDiffInfo = n h.PreDiffState = s @@ -112,6 +125,9 @@ func (h *MockHook) PreDiff(n *InstanceInfo, s *InstanceState) (HookAction, error } func (h *MockHook) PostDiff(n *InstanceInfo, d *InstanceDiff) (HookAction, error) { + h.Lock() + defer h.Unlock() + h.PostDiffCalled = true h.PostDiffInfo = n h.PostDiffDiff = d @@ -119,6 +135,9 @@ func (h *MockHook) PostDiff(n *InstanceInfo, d *InstanceDiff) (HookAction, error } func (h *MockHook) PreProvisionResource(n *InstanceInfo, s *InstanceState) (HookAction, error) { + h.Lock() + defer h.Unlock() + h.PreProvisionResourceCalled = true h.PreProvisionResourceInfo = n h.PreProvisionInstanceState = s @@ -126,6 +145,9 @@ func (h *MockHook) PreProvisionResource(n *InstanceInfo, s *InstanceState) (Hook } func (h *MockHook) PostProvisionResource(n *InstanceInfo, s *InstanceState) (HookAction, error) { + h.Lock() + defer h.Unlock() + h.PostProvisionResourceCalled = true h.PostProvisionResourceInfo = n h.PostProvisionInstanceState = s @@ -133,6 +155,9 @@ func (h *MockHook) PostProvisionResource(n *InstanceInfo, s *InstanceState) (Hoo } func (h *MockHook) PreProvision(n *InstanceInfo, provId string) (HookAction, error) { + h.Lock() + defer h.Unlock() + h.PreProvisionCalled = true h.PreProvisionInfo = n h.PreProvisionProvisionerId = provId @@ -140,6 +165,9 @@ func (h *MockHook) PreProvision(n *InstanceInfo, provId string) (HookAction, err } func (h *MockHook) PostProvision(n *InstanceInfo, provId string) (HookAction, error) { + h.Lock() + defer h.Unlock() + h.PostProvisionCalled = true h.PostProvisionInfo = n h.PostProvisionProvisionerId = provId @@ -150,6 +178,9 @@ func (h *MockHook) ProvisionOutput( n *InstanceInfo, provId string, msg string) { + h.Lock() + defer h.Unlock() + h.ProvisionOutputCalled = true h.ProvisionOutputInfo = n h.ProvisionOutputProvisionerId = provId @@ -157,6 +188,9 @@ func (h *MockHook) ProvisionOutput( } func (h *MockHook) PreRefresh(n *InstanceInfo, s *InstanceState) (HookAction, error) { + h.Lock() + defer h.Unlock() + h.PreRefreshCalled = true h.PreRefreshInfo = n h.PreRefreshState = s @@ -164,6 +198,9 @@ func (h *MockHook) PreRefresh(n *InstanceInfo, s *InstanceState) (HookAction, er } func (h *MockHook) PostRefresh(n *InstanceInfo, s *InstanceState) (HookAction, error) { + h.Lock() + defer h.Unlock() + h.PostRefreshCalled = true h.PostRefreshInfo = n h.PostRefreshState = s @@ -171,6 +208,9 @@ func (h *MockHook) PostRefresh(n *InstanceInfo, s *InstanceState) (HookAction, e } func (h *MockHook) PreImportState(info *InstanceInfo, id string) (HookAction, error) { + h.Lock() + defer h.Unlock() + h.PreImportStateCalled = true h.PreImportStateInfo = info h.PreImportStateId = id @@ -178,6 +218,9 @@ func (h *MockHook) PreImportState(info *InstanceInfo, id string) (HookAction, er } func (h *MockHook) PostImportState(info *InstanceInfo, s []*InstanceState) (HookAction, error) { + h.Lock() + defer h.Unlock() + h.PostImportStateCalled = true h.PostImportStateInfo = info h.PostImportStateState = s @@ -185,6 +228,9 @@ func (h *MockHook) PostImportState(info *InstanceInfo, s []*InstanceState) (Hook } func (h *MockHook) PostStateUpdate(s *State) (HookAction, error) { + h.Lock() + defer h.Unlock() + h.PostStateUpdateCalled = true h.PostStateUpdateState = s return h.PostStateUpdateReturn, h.PostStateUpdateError diff --git a/terraform/resource_provisioner_mock.go b/terraform/resource_provisioner_mock.go index 2ba7220cd..be04e9814 100644 --- a/terraform/resource_provisioner_mock.go +++ b/terraform/resource_provisioner_mock.go @@ -1,8 +1,11 @@ package terraform +import "sync" + // MockResourceProvisioner implements ResourceProvisioner but mocks out all the // calls for testing purposes. type MockResourceProvisioner struct { + sync.Mutex // Anything you want, in case you need to store extra data with the mock. Meta interface{} @@ -21,6 +24,9 @@ type MockResourceProvisioner struct { } func (p *MockResourceProvisioner) Validate(c *ResourceConfig) ([]string, []error) { + p.Lock() + defer p.Unlock() + p.ValidateCalled = true p.ValidateConfig = c if p.ValidateFn != nil { @@ -33,6 +39,9 @@ func (p *MockResourceProvisioner) Apply( output UIOutput, state *InstanceState, c *ResourceConfig) error { + p.Lock() + defer p.Unlock() + p.ApplyCalled = true p.ApplyOutput = output p.ApplyState = state diff --git a/terraform/state.go b/terraform/state.go index a8d3dac89..5d7524c80 100644 --- a/terraform/state.go +++ b/terraform/state.go @@ -1331,7 +1331,7 @@ func (s *InstanceState) MergeDiff(d *InstanceDiff) *InstanceState { } } if d != nil { - for k, diff := range d.Attributes { + for k, diff := range d.CopyAttributes() { if diff.NewRemoved { delete(result.Attributes, k) continue diff --git a/terraform/transform_resource.go b/terraform/transform_resource.go index 2ab485cde..b877a6051 100644 --- a/terraform/transform_resource.go +++ b/terraform/transform_resource.go @@ -418,11 +418,11 @@ func (n *graphNodeExpandedResource) managedResourceEvalNodes(resource *Resource, return true, EvalEarlyExitError{} } - if diffApply.Destroy && len(diffApply.Attributes) == 0 { + if diffApply.GetDestroy() && diffApply.GetAttributesLen() == 0 { return true, EvalEarlyExitError{} } - diffApply.Destroy = false + diffApply.SetDestroy(false) return true, nil }, Then: EvalNoop{}, @@ -432,7 +432,7 @@ func (n *graphNodeExpandedResource) managedResourceEvalNodes(resource *Resource, If: func(ctx EvalContext) (bool, error) { destroy := false if diffApply != nil { - destroy = diffApply.Destroy || diffApply.RequiresNew() + destroy = diffApply.GetDestroy() || diffApply.RequiresNew() } createBeforeDestroyEnabled = @@ -762,7 +762,7 @@ func (n *graphNodeExpandedResource) dataResourceEvalNodes(resource *Resource, in return true, EvalEarlyExitError{} } - if len(diff.Attributes) == 0 { + if diff.GetAttributesLen() == 0 { return true, EvalEarlyExitError{} } @@ -887,7 +887,7 @@ func (n *graphNodeExpandedResourceDestroy) EvalTree() EvalNode { // If we're not destroying, then compare diffs &EvalIf{ If: func(ctx EvalContext) (bool, error) { - if diffApply != nil && diffApply.Destroy { + if diffApply != nil && diffApply.GetDestroy() { return true, nil } From 1bdc2f1c5feab35d684ae0069d69356e5ded0a06 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Fri, 29 Jul 2016 21:12:51 +0100 Subject: [PATCH 0478/1238] provider/aws: `aws_vpn_gateway` should be removed from state when in (#7861) deleted state Fixes #7859 When a VPN Gateway has been manually deleted, we should expect it to be added back to the plan ``` % make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSVpnGateway_' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSVpnGateway_ -timeout 120m === RUN TestAccAWSVpnGateway_importBasic --- PASS: TestAccAWSVpnGateway_importBasic (247.94s) === RUN TestAccAWSVpnGateway_basic --- PASS: TestAccAWSVpnGateway_basic (409.50s) === RUN TestAccAWSVpnGateway_reattach --- PASS: TestAccAWSVpnGateway_reattach (211.33s) === RUN TestAccAWSVpnGateway_delete --- PASS: TestAccAWSVpnGateway_delete (121.10s) === RUN TestAccAWSVpnGateway_tags --- PASS: TestAccAWSVpnGateway_tags (125.38s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 1115.274s ``` --- builtin/providers/aws/resource_aws_vpn_gateway.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_vpn_gateway.go b/builtin/providers/aws/resource_aws_vpn_gateway.go index ae8e67d8f..27f4a45f7 100644 --- a/builtin/providers/aws/resource_aws_vpn_gateway.go +++ b/builtin/providers/aws/resource_aws_vpn_gateway.go @@ -86,7 +86,7 @@ func resourceAwsVpnGatewayRead(d *schema.ResourceData, meta interface{}) error { return nil } - if len(vpnGateway.VpcAttachments) == 0 || *vpnGateway.VpcAttachments[0].State == "detached" { + if len(vpnGateway.VpcAttachments) == 0 || *vpnGateway.VpcAttachments[0].State == "detached" || *vpnGateway.VpcAttachments[0].State == "deleted" { // Gateway exists but not attached to the VPC d.Set("vpc_id", "") } else { From d6191be608fd8832d52fff884b2ce168df289013 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Fri, 29 Jul 2016 21:13:34 +0100 Subject: [PATCH 0479/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 868d97b47..b5a62545f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -250,6 +250,7 @@ BUG FIXES: * provider/aws: Fix issue updating ElasticBeanstalk Environment templates [GH-7811] * provider/aws: Restore Defaults to SQS Queues [GH-7818] * provider/aws: Don't delete Lambda function from state on initial call of the Read func [GH-7829] + * provider/aws: `aws_vpn_gateway` should be removed from state when in deleted state [GH-7861] * provider/azurerm: Fixes terraform crash when using SSH keys with `azurerm_virtual_machine` [GH-6766] * provider/azurerm: Fix a bug causing 'diffs do not match' on `azurerm_network_interface` resources [GH-6790] * provider/azurerm: Normalizes `availability_set_id` casing to avoid spurious diffs in `azurerm_virtual_machine` [GH-6768] From 0e4e94a86f883b762dc39033cf02bbcadb63dbb3 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Fri, 29 Jul 2016 14:13:50 -0500 Subject: [PATCH 0480/1238] core: Fix -module for terraform output command The behaviour whereby outputs for a particular nested module can be output was broken by the changes for lists and maps. This commit restores the previous behaviour by passing the module path into the outputsAsString function. We also add a new test of this since the code path for indivdual output vs all outputs for a module has diverged. --- command/apply.go | 6 +++--- command/output.go | 2 +- command/output_test.go | 49 ++++++++++++++++++++++++++++++++++++++++++ command/refresh.go | 4 +++- terraform/state.go | 3 +-- 5 files changed, 57 insertions(+), 7 deletions(-) diff --git a/command/apply.go b/command/apply.go index 83ae29a7f..659161aee 100644 --- a/command/apply.go +++ b/command/apply.go @@ -251,7 +251,7 @@ func (c *ApplyCommand) Run(args []string) int { } if !c.Destroy { - if outputs := outputsAsString(state, ctx.Module().Config().Outputs, true); outputs != "" { + if outputs := outputsAsString(state, terraform.RootModulePath, ctx.Module().Config().Outputs, true); outputs != "" { c.Ui.Output(c.Colorize().Color(outputs)) } } @@ -377,12 +377,12 @@ Options: return strings.TrimSpace(helpText) } -func outputsAsString(state *terraform.State, schema []*config.Output, includeHeader bool) string { +func outputsAsString(state *terraform.State, modPath []string, schema []*config.Output, includeHeader bool) string { if state == nil { return "" } - outputs := state.RootModule().Outputs + outputs := state.ModuleByPath(modPath).Outputs outputBuf := new(bytes.Buffer) if len(outputs) > 0 { schemaMap := make(map[string]*config.Output) diff --git a/command/output.go b/command/output.go index 9054dfb4d..a1cf7bf4e 100644 --- a/command/output.go +++ b/command/output.go @@ -88,7 +88,7 @@ func (c *OutputCommand) Run(args []string) int { c.Ui.Output(string(jsonOutputs)) return 0 } else { - c.Ui.Output(outputsAsString(state, nil, false)) + c.Ui.Output(outputsAsString(state, modPath, nil, false)) return 0 } } diff --git a/command/output_test.go b/command/output_test.go index 1487d41cb..01f5c034e 100644 --- a/command/output_test.go +++ b/command/output_test.go @@ -100,6 +100,55 @@ func TestModuleOutput(t *testing.T) { } } +func TestModuleOutputs(t *testing.T) { + originalState := &terraform.State{ + Modules: []*terraform.ModuleState{ + { + Path: []string{"root"}, + Outputs: map[string]*terraform.OutputState{ + "foo": { + Value: "bar", + Type: "string", + }, + }, + }, + { + Path: []string{"root", "my_module"}, + Outputs: map[string]*terraform.OutputState{ + "blah": { + Value: "tastatur", + Type: "string", + }, + }, + }, + }, + } + + statePath := testStateFile(t, originalState) + + ui := new(cli.MockUi) + c := &OutputCommand{ + Meta: Meta{ + ContextOpts: testCtxConfig(testProvider()), + Ui: ui, + }, + } + + args := []string{ + "-state", statePath, + "-module", "my_module", + } + + if code := c.Run(args); code != 0 { + t.Fatalf("bad: \n%s", ui.ErrorWriter.String()) + } + + actual := strings.TrimSpace(ui.OutputWriter.String()) + if actual != "blah = tastatur" { + t.Fatalf("bad: %#v", actual) + } +} + func TestOutput_nestedListAndMap(t *testing.T) { originalState := &terraform.State{ Modules: []*terraform.ModuleState{ diff --git a/command/refresh.go b/command/refresh.go index 0c41bcbe4..0f9206276 100644 --- a/command/refresh.go +++ b/command/refresh.go @@ -5,6 +5,8 @@ import ( "log" "os" "strings" + + "github.com/hashicorp/terraform/terraform" ) // RefreshCommand is a cli.Command implementation that refreshes the state @@ -109,7 +111,7 @@ func (c *RefreshCommand) Run(args []string) int { return 1 } - if outputs := outputsAsString(newState, ctx.Module().Config().Outputs, true); outputs != "" { + if outputs := outputsAsString(newState, terraform.RootModulePath, ctx.Module().Config().Outputs, true); outputs != "" { c.Ui.Output(c.Colorize().Color(outputs)) } diff --git a/terraform/state.go b/terraform/state.go index a8d3dac89..55f977c72 100644 --- a/terraform/state.go +++ b/terraform/state.go @@ -14,10 +14,9 @@ import ( "strings" "github.com/hashicorp/go-version" - "github.com/satori/go.uuid" - "github.com/hashicorp/terraform/config" "github.com/mitchellh/copystructure" + "github.com/satori/go.uuid" ) const ( From 24c45fcd5dfe196f3e268d884bc84ad61b1c02ed Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Fri, 29 Jul 2016 10:53:13 -0500 Subject: [PATCH 0481/1238] terraform: Filter untargeted variable nodes When targeting, only Addressable untargeted nodes were being removed from the graph. Variable nodes are not directly Addressable, so they were hanging around. This caused problems with module variables that referred to Resource nodes. The Resource node would be filtered out of the graph, but the module Variable node would not, so it would try to interpolate during the graph walk and be unable to find it's referent. This would present itself as strange "cannot find variable" errors for variables that were uninvolved with the currently targeted set of resources. Here, we introduce a new interface that can be implemented by graph nodes to indicate they should be filtered out from targeting even though they are not directly addressable themselves. --- terraform/context_plan_test.go | 37 +++++++++++++++++++ terraform/graph_config_node_variable.go | 8 ++++ .../child/main.tf | 5 +++ .../main.tf | 12 ++++++ terraform/transform_targets.go | 24 ++++++++++-- 5 files changed, 82 insertions(+), 4 deletions(-) create mode 100644 terraform/test-fixtures/plan-targeted-module-untargeted-variable/child/main.tf create mode 100644 terraform/test-fixtures/plan-targeted-module-untargeted-variable/main.tf diff --git a/terraform/context_plan_test.go b/terraform/context_plan_test.go index a3a38cc7f..65ba03566 100644 --- a/terraform/context_plan_test.go +++ b/terraform/context_plan_test.go @@ -2114,6 +2114,43 @@ module.child: } } +func TestContext2Plan_targetedModuleUntargetedVariable(t *testing.T) { + m := testModule(t, "plan-targeted-module-untargeted-variable") + p := testProvider("aws") + p.DiffFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Module: m, + Providers: map[string]ResourceProviderFactory{ + "aws": testProviderFuncFixed(p), + }, + Targets: []string{"aws_instance.blue", "module.blue_mod"}, + }) + + plan, err := ctx.Plan() + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(plan.String()) + expected := strings.TrimSpace(` +DIFF: + +CREATE: aws_instance.blue + +module.blue_mod: + CREATE: aws_instance.mod + type: "" => "aws_instance" + value: "" => "" + +STATE: + + +`) + if actual != expected { + t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) + } +} + // https://github.com/hashicorp/terraform/issues/4515 func TestContext2Plan_targetedOverTen(t *testing.T) { m := testModule(t, "plan-targeted-over-ten") diff --git a/terraform/graph_config_node_variable.go b/terraform/graph_config_node_variable.go index 6098042ef..ba62eb056 100644 --- a/terraform/graph_config_node_variable.go +++ b/terraform/graph_config_node_variable.go @@ -36,6 +36,14 @@ func (n *GraphNodeConfigVariable) DependableName() []string { return []string{n.Name()} } +// RemoveIfNotTargeted implements RemovableIfNotTargeted. +// When targeting is active, variables that are not targeted should be removed +// from the graph, because otherwise module variables trying to interpolate +// their references can fail when they're missing the referent resource node. +func (n *GraphNodeConfigVariable) RemoveIfNotTargeted() bool { + return true +} + func (n *GraphNodeConfigVariable) DependentOn() []string { // If we don't have any value set, we don't depend on anything if n.Value == nil { diff --git a/terraform/test-fixtures/plan-targeted-module-untargeted-variable/child/main.tf b/terraform/test-fixtures/plan-targeted-module-untargeted-variable/child/main.tf new file mode 100644 index 000000000..f7b424b84 --- /dev/null +++ b/terraform/test-fixtures/plan-targeted-module-untargeted-variable/child/main.tf @@ -0,0 +1,5 @@ +variable "id" {} + +resource "aws_instance" "mod" { + value = "${var.id}" +} diff --git a/terraform/test-fixtures/plan-targeted-module-untargeted-variable/main.tf b/terraform/test-fixtures/plan-targeted-module-untargeted-variable/main.tf new file mode 100644 index 000000000..90e44dceb --- /dev/null +++ b/terraform/test-fixtures/plan-targeted-module-untargeted-variable/main.tf @@ -0,0 +1,12 @@ +resource "aws_instance" "blue" { } +resource "aws_instance" "green" { } + +module "blue_mod" { + source = "./child" + id = "${aws_instance.blue.id}" +} + +module "green_mod" { + source = "./child" + id = "${aws_instance.green.id}" +} diff --git a/terraform/transform_targets.go b/terraform/transform_targets.go index db577b361..4e99badda 100644 --- a/terraform/transform_targets.go +++ b/terraform/transform_targets.go @@ -37,11 +37,16 @@ func (t *TargetsTransformer) Transform(g *Graph) error { } for _, v := range g.Vertices() { + removable := false if _, ok := v.(GraphNodeAddressable); ok { - if !targetedNodes.Include(v) { - log.Printf("[DEBUG] Removing %q, filtered by targeting.", dag.VertexName(v)) - g.Remove(v) - } + removable = true + } + if vr, ok := v.(RemovableIfNotTargeted); ok { + removable = vr.RemoveIfNotTargeted() + } + if removable && !targetedNodes.Include(v) { + log.Printf("[DEBUG] Removing %q, filtered by targeting.", dag.VertexName(v)) + g.Remove(v) } } } @@ -110,3 +115,14 @@ func (t *TargetsTransformer) nodeIsTarget( } return false } + +// RemovableIfNotTargeted is a special interface for graph nodes that +// aren't directly addressable, but need to be removed from the graph when they +// are not targeted. (Nodes that are not directly targeted end up in the set of +// targeted nodes because something that _is_ targeted depends on them.) The +// initial use case for this interface is GraphNodeConfigVariable, which was +// having trouble interpolating for module variables in targeted scenarios that +// filtered out the resource node being referenced. +type RemovableIfNotTargeted interface { + RemoveIfNotTargeted() bool +} From 074be9ae5658bad51d6fc2c77ae0c0be28783081 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Fri, 29 Jul 2016 18:35:54 -0400 Subject: [PATCH 0482/1238] Another race in resource.Retry --- helper/resource/wait.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/helper/resource/wait.go b/helper/resource/wait.go index 77a7a01b0..ca50e292f 100644 --- a/helper/resource/wait.go +++ b/helper/resource/wait.go @@ -20,13 +20,15 @@ func Retry(timeout time.Duration, f RetryFunc) error { MinTimeout: 500 * time.Millisecond, Refresh: func() (interface{}, string, error) { rerr := f() + + resultErrMu.Lock() + defer resultErrMu.Unlock() + if rerr == nil { resultErr = nil return 42, "success", nil } - resultErrMu.Lock() - defer resultErrMu.Unlock() resultErr = rerr.Err if rerr.Retryable { From 2eb00c918459b233c1490474aa92de9bf9839f5c Mon Sep 17 00:00:00 2001 From: James Nugent Date: Fri, 29 Jul 2016 18:26:22 -0500 Subject: [PATCH 0483/1238] build: Fix errors in FreeBSD build Fixes the following error when cross compiling: ``` --> freebsd/amd64 error: exit status 2 Stderr: # github.com/hashicorp/terraform/config/module config/module/inode.go:18: cannot use st.Ino (type uint32) as type uint64 in return argument ``` --- config/module/inode.go | 2 +- config/module/inode_freebsd.go | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) create mode 100644 config/module/inode_freebsd.go diff --git a/config/module/inode.go b/config/module/inode.go index a49e8a19d..20a951f06 100644 --- a/config/module/inode.go +++ b/config/module/inode.go @@ -1,4 +1,4 @@ -// +build !windows +// +build !windows !freebsd package module diff --git a/config/module/inode_freebsd.go b/config/module/inode_freebsd.go new file mode 100644 index 000000000..ff658761a --- /dev/null +++ b/config/module/inode_freebsd.go @@ -0,0 +1,19 @@ +package module + +import ( + "fmt" + "os" + "syscall" +) + +// lookup the inode of a file on posix systems +func inode(path string) (uint64, error) { + stat, err := os.Stat(path) + if err != nil { + return 0, err + } + if st, ok := stat.Sys().(*syscall.Stat_t); ok { + return uint64(st.Ino), nil + } + return 0, fmt.Errorf("could not determine file inode") +} From 796ce7bdd766ec2dd559fba24723738f94b54fd0 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Fri, 29 Jul 2016 18:40:31 -0500 Subject: [PATCH 0484/1238] build: Opt-in to building rather than opt-out --- config/module/inode.go | 2 +- config/module/inode_freebsd.go | 2 ++ config/module/inode_windows.go | 2 ++ 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/config/module/inode.go b/config/module/inode.go index 20a951f06..b9be7e385 100644 --- a/config/module/inode.go +++ b/config/module/inode.go @@ -1,4 +1,4 @@ -// +build !windows !freebsd +// +build linux darwin openbsd solaris package module diff --git a/config/module/inode_freebsd.go b/config/module/inode_freebsd.go index ff658761a..0d95730d9 100644 --- a/config/module/inode_freebsd.go +++ b/config/module/inode_freebsd.go @@ -1,3 +1,5 @@ +// +build freebsd + package module import ( diff --git a/config/module/inode_windows.go b/config/module/inode_windows.go index 3f5d5e7b1..c0cf45538 100644 --- a/config/module/inode_windows.go +++ b/config/module/inode_windows.go @@ -1,3 +1,5 @@ +// +build windows + package module // no syscall.Stat_t on windows, return 0 for inodes From abfd2c1daf914867b8737ac9419f3bd2ecc7a822 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Fri, 29 Jul 2016 23:40:54 +0000 Subject: [PATCH 0485/1238] v0.7.0-rc4 --- terraform/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/terraform/version.go b/terraform/version.go index d753ff8e7..1cf692aa6 100644 --- a/terraform/version.go +++ b/terraform/version.go @@ -12,7 +12,7 @@ const Version = "0.7.0" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "dev" +const VersionPrerelease = "rc4" // SemVersion is an instance of version.Version. This has the secondary // benefit of verifying during tests and init time that our version is a From 2e384672133d4141fd3d48b0dbb1db61dddd7685 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Sat, 30 Jul 2016 00:53:33 +0000 Subject: [PATCH 0486/1238] release: clean up after v0.7.0-rc4 --- terraform/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/terraform/version.go b/terraform/version.go index 1cf692aa6..d753ff8e7 100644 --- a/terraform/version.go +++ b/terraform/version.go @@ -12,7 +12,7 @@ const Version = "0.7.0" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "rc4" +const VersionPrerelease = "dev" // SemVersion is an instance of version.Version. This has the secondary // benefit of verifying during tests and init time that our version is a From 802f014d607b0a74d2ce7c2e53b3209f15f3bf1c Mon Sep 17 00:00:00 2001 From: Milad Irannejad Date: Sun, 31 Jul 2016 18:44:21 -0400 Subject: [PATCH 0487/1238] Update doc page for aws_elb (#7882) --- website/source/docs/providers/aws/r/elb.html.markdown | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/source/docs/providers/aws/r/elb.html.markdown b/website/source/docs/providers/aws/r/elb.html.markdown index 18486b528..2504d803f 100644 --- a/website/source/docs/providers/aws/r/elb.html.markdown +++ b/website/source/docs/providers/aws/r/elb.html.markdown @@ -88,13 +88,13 @@ The following arguments are supported: Exactly one of `availability_zones` or `subnets` must be specified: this determines if the ELB exists in a VPC or in EC2-classic. -Access Logs support the following: +Access Logs (`access_logs`) support the following: * `bucket` - (Required) The S3 bucket name to store the logs in. * `bucket_prefix` - (Optional) The S3 bucket prefix. Logs are stored in the root if not configured. * `interval` - (Optional) The publishing interval in minutes. Default: 60 minutes. -Listeners support the following: +Listeners (`listener`) support the following: * `instance_port` - (Required) The port on the instance to route to * `instance_protocol` - (Required) The protocol to use to the instance. Valid @@ -105,7 +105,7 @@ Listeners support the following: * `ssl_certificate_id` - (Optional) The ARN of an SSL certificate you have uploaded to AWS IAM. **Only valid when `lb_protocol` is either HTTPS or SSL** -Health Check supports the following: +Health Check (`health_check`) supports the following: * `healthy_threshold` - (Required) The number of checks before the instance is declared healthy. * `unhealthy_threshold` - (Required) The number of checks before the instance is declared unhealthy. @@ -135,4 +135,4 @@ ELBs can be imported using the `name`, e.g. ``` $ terraform import aws_elb.bar elb-production-12345 -``` \ No newline at end of file +``` From cc18e4d7caf0c0ee2e29b3a6506d7526fbb4e657 Mon Sep 17 00:00:00 2001 From: Peter McAtominey Date: Sun, 31 Jul 2016 23:46:15 +0100 Subject: [PATCH 0488/1238] provider/azurerm: add traffic manager resources (#7826) * provider/azurerm: vendor arm/trafficmanager package * provider/azurerm: add azurerm_traffic_manager_profile resource * provider/azurerm: add azurerm_traffic_manager_endpoint resource * provider/azurerm: document traffic manager resources * provider/azurerm: use short type argument for traffic manager endpoint The resource now takes the short type for example azureEndpoints instead of the long form Microsoft.Network/TrafficManagerProfiles/azureEndpoints. ``` TF_ACC=1 go test ./builtin/providers/azurerm -v -run TestAccAzureRMTrafficManagerEndpoint -timeout 120m === RUN TestAccAzureRMTrafficManagerEndpoint_basic --- PASS: TestAccAzureRMTrafficManagerEndpoint_basic (179.72s) === RUN TestAccAzureRMTrafficManagerEndpoint_basicDisableExternal --- PASS: TestAccAzureRMTrafficManagerEndpoint_basicDisableExternal (171.36s) === RUN TestAccAzureRMTrafficManagerEndpoint_updateWeight --- PASS: TestAccAzureRMTrafficManagerEndpoint_updateWeight (167.24s) === RUN TestAccAzureRMTrafficManagerEndpoint_updatePriority --- PASS: TestAccAzureRMTrafficManagerEndpoint_updatePriority (192.91s) === RUN TestAccAzureRMTrafficManagerEndpoint_nestedEndpoints --- PASS: TestAccAzureRMTrafficManagerEndpoint_nestedEndpoints (111.18s) PASS ok github.com/hashicorp/terraform/builtin/providers/azurerm 822.534s ``` * provider/azurerm: remove unnecesary dereferences in traffic manager resources ``` TF_ACC=1 go test ./builtin/providers/azurerm -v -run TestAccAzureRMTrafficManager -timeout 120m === RUN TestAccAzureRMTrafficManagerEndpoint_basic --- PASS: TestAccAzureRMTrafficManagerEndpoint_basic (176.08s) === RUN TestAccAzureRMTrafficManagerEndpoint_basicDisableExternal --- PASS: TestAccAzureRMTrafficManagerEndpoint_basicDisableExternal (172.28s) === RUN TestAccAzureRMTrafficManagerEndpoint_updateWeight --- PASS: TestAccAzureRMTrafficManagerEndpoint_updateWeight (148.97s) === RUN TestAccAzureRMTrafficManagerEndpoint_updatePriority --- PASS: TestAccAzureRMTrafficManagerEndpoint_updatePriority (101.18s) === RUN TestAccAzureRMTrafficManagerEndpoint_nestedEndpoints --- PASS: TestAccAzureRMTrafficManagerEndpoint_nestedEndpoints (88.33s) === RUN TestAccAzureRMTrafficManagerProfile_weighted --- PASS: TestAccAzureRMTrafficManagerProfile_weighted (80.92s) === RUN TestAccAzureRMTrafficManagerProfile_performance --- PASS: TestAccAzureRMTrafficManagerProfile_performance (82.98s) === RUN TestAccAzureRMTrafficManagerProfile_priority --- PASS: TestAccAzureRMTrafficManagerProfile_priority (81.07s) === RUN TestAccAzureRMTrafficManagerProfile_withTags --- PASS: TestAccAzureRMTrafficManagerProfile_withTags (102.50s) PASS ok github.com/hashicorp/terraform/builtin/providers/azurerm 1034.458s ``` --- builtin/providers/azurerm/config.go | 16 + builtin/providers/azurerm/provider.go | 2 + .../resource_arm_traffic_manager_endpoint.go | 251 ++++++++ ...ource_arm_traffic_manager_endpoint_test.go | 539 ++++++++++++++++++ .../resource_arm_traffic_manager_profile.go | 323 +++++++++++ ...source_arm_traffic_manager_profile_test.go | 303 ++++++++++ .../arm/trafficmanager/client.go | 52 ++ .../arm/trafficmanager/endpoints.go | 312 ++++++++++ .../arm/trafficmanager/models.go | 120 ++++ .../arm/trafficmanager/profiles.go | 481 ++++++++++++++++ .../arm/trafficmanager/version.go | 43 ++ vendor/vendor.json | 6 + .../r/traffic_manager_endpoint.html.markdown | 111 ++++ .../r/traffic_manager_profile.html.markdown | 104 ++++ website/source/layouts/azurerm.erb | 8 + 15 files changed, 2671 insertions(+) create mode 100644 builtin/providers/azurerm/resource_arm_traffic_manager_endpoint.go create mode 100644 builtin/providers/azurerm/resource_arm_traffic_manager_endpoint_test.go create mode 100644 builtin/providers/azurerm/resource_arm_traffic_manager_profile.go create mode 100644 builtin/providers/azurerm/resource_arm_traffic_manager_profile_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/endpoints.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/profiles.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/version.go create mode 100644 website/source/docs/providers/azurerm/r/traffic_manager_endpoint.html.markdown create mode 100644 website/source/docs/providers/azurerm/r/traffic_manager_profile.html.markdown diff --git a/builtin/providers/azurerm/config.go b/builtin/providers/azurerm/config.go index 97387629d..b85166d76 100644 --- a/builtin/providers/azurerm/config.go +++ b/builtin/providers/azurerm/config.go @@ -12,6 +12,7 @@ import ( "github.com/Azure/azure-sdk-for-go/arm/resources/resources" "github.com/Azure/azure-sdk-for-go/arm/scheduler" "github.com/Azure/azure-sdk-for-go/arm/storage" + "github.com/Azure/azure-sdk-for-go/arm/trafficmanager" mainStorage "github.com/Azure/azure-sdk-for-go/storage" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" @@ -61,6 +62,9 @@ type ArmClient struct { storageUsageClient storage.UsageOperationsClient deploymentsClient resources.DeploymentsClient + + trafficManagerProfilesClient trafficmanager.ProfilesClient + trafficManagerEndpointsClient trafficmanager.EndpointsClient } func withRequestLogging() autorest.SendDecorator { @@ -325,6 +329,18 @@ func (c *Config) getArmClient() (*ArmClient, error) { dc.Sender = autorest.CreateSender(withRequestLogging()) client.deploymentsClient = dc + tmpc := trafficmanager.NewProfilesClient(c.SubscriptionID) + setUserAgent(&tmpc.Client) + tmpc.Authorizer = spt + tmpc.Sender = autorest.CreateSender(withRequestLogging()) + client.trafficManagerProfilesClient = tmpc + + tmec := trafficmanager.NewEndpointsClient(c.SubscriptionID) + setUserAgent(&tmec.Client) + tmec.Authorizer = spt + tmec.Sender = autorest.CreateSender(withRequestLogging()) + client.trafficManagerEndpointsClient = tmec + return &client, nil } diff --git a/builtin/providers/azurerm/provider.go b/builtin/providers/azurerm/provider.go index 11f9576e8..dd85e7bfb 100644 --- a/builtin/providers/azurerm/provider.go +++ b/builtin/providers/azurerm/provider.go @@ -63,6 +63,8 @@ func Provider() terraform.ResourceProvider { "azurerm_storage_table": resourceArmStorageTable(), "azurerm_subnet": resourceArmSubnet(), "azurerm_template_deployment": resourceArmTemplateDeployment(), + "azurerm_traffic_manager_endpoint": resourceArmTrafficManagerEndpoint(), + "azurerm_traffic_manager_profile": resourceArmTrafficManagerProfile(), "azurerm_virtual_machine": resourceArmVirtualMachine(), "azurerm_virtual_machine_scale_set": resourceArmVirtualMachineScaleSet(), "azurerm_virtual_network": resourceArmVirtualNetwork(), diff --git a/builtin/providers/azurerm/resource_arm_traffic_manager_endpoint.go b/builtin/providers/azurerm/resource_arm_traffic_manager_endpoint.go new file mode 100644 index 000000000..922db271f --- /dev/null +++ b/builtin/providers/azurerm/resource_arm_traffic_manager_endpoint.go @@ -0,0 +1,251 @@ +package azurerm + +import ( + "fmt" + "log" + "net/http" + + "github.com/Azure/azure-sdk-for-go/arm/trafficmanager" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceArmTrafficManagerEndpoint() *schema.Resource { + return &schema.Resource{ + Create: resourceArmTrafficManagerEndpointCreate, + Read: resourceArmTrafficManagerEndpointRead, + Update: resourceArmTrafficManagerEndpointCreate, + Delete: resourceArmTrafficManagerEndpointDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateAzureRMTrafficManagerEndpointType, + }, + + "profile_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "target": { + Type: schema.TypeString, + Optional: true, + // when targeting an Azure resource the FQDN of that resource will be set as the target + Computed: true, + }, + + "target_resource_id": { + Type: schema.TypeString, + Optional: true, + }, + + "endpoint_status": { + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "weight": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validateAzureRMTrafficManagerEndpointWeight, + }, + + "priority": { + Type: schema.TypeInt, + Optional: true, + Computed: true, + ValidateFunc: validateAzureRMTrafficManagerEndpointPriority, + }, + + "endpoint_location": { + Type: schema.TypeString, + Optional: true, + // when targeting an Azure resource the location of that resource will be set on the endpoint + Computed: true, + StateFunc: azureRMNormalizeLocation, + }, + + "min_child_endpoints": { + Type: schema.TypeInt, + Optional: true, + }, + + "resource_group_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceArmTrafficManagerEndpointCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).trafficManagerEndpointsClient + + log.Printf("[INFO] preparing arguments for ARM TrafficManager Endpoint creation.") + + name := d.Get("name").(string) + endpointType := d.Get("type").(string) + fullEndpointType := fmt.Sprintf("Microsoft.Network/TrafficManagerProfiles/%s", endpointType) + profileName := d.Get("profile_name").(string) + resGroup := d.Get("resource_group_name").(string) + + params := trafficmanager.Endpoint{ + Name: &name, + Type: &fullEndpointType, + Properties: getArmTrafficManagerEndpointProperties(d), + } + + _, err := client.CreateOrUpdate(resGroup, profileName, endpointType, name, params) + if err != nil { + return err + } + + read, err := client.Get(resGroup, profileName, endpointType, name) + if err != nil { + return err + } + if read.ID == nil { + return fmt.Errorf("Cannot read TrafficManager endpoint %s (resource group %s) ID", name, resGroup) + } + + d.SetId(*read.ID) + + return resourceArmTrafficManagerEndpointRead(d, meta) +} + +func resourceArmTrafficManagerEndpointRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).trafficManagerEndpointsClient + + id, err := parseAzureResourceID(d.Id()) + if err != nil { + return err + } + resGroup := id.ResourceGroup + endpointType := d.Get("type").(string) + profileName := id.Path["trafficManagerProfiles"] + + // endpoint name is keyed by endpoint type in ARM ID + name := id.Path[endpointType] + + resp, err := client.Get(resGroup, profileName, endpointType, name) + if resp.StatusCode == http.StatusNotFound { + d.SetId("") + return nil + } + if err != nil { + return fmt.Errorf("Error making Read request on TrafficManager Endpoint %s: %s", name, err) + } + endpoint := *resp.Properties + + d.Set("name", resp.Name) + d.Set("endpoint_status", endpoint.EndpointStatus) + d.Set("target_resource_id", endpoint.TargetResourceID) + d.Set("target", endpoint.Target) + d.Set("weight", endpoint.Weight) + d.Set("priority", endpoint.Priority) + d.Set("endpoint_location", endpoint.EndpointLocation) + d.Set("endpoint_monitor_status", endpoint.EndpointMonitorStatus) + d.Set("min_child_endpoints", endpoint.MinChildEndpoints) + + return nil +} + +func resourceArmTrafficManagerEndpointDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).trafficManagerEndpointsClient + + id, err := parseAzureResourceID(d.Id()) + if err != nil { + return err + } + resGroup := id.ResourceGroup + endpointType := d.Get("type").(string) + profileName := id.Path["trafficManagerProfiles"] + + // endpoint name is keyed by endpoint type in ARM ID + name := id.Path[endpointType] + + _, err = client.Delete(resGroup, profileName, endpointType, name) + + return err +} + +func getArmTrafficManagerEndpointProperties(d *schema.ResourceData) *trafficmanager.EndpointProperties { + var endpointProps trafficmanager.EndpointProperties + + if targetResID := d.Get("target_resource_id").(string); targetResID != "" { + endpointProps.TargetResourceID = &targetResID + } + + if target := d.Get("target").(string); target != "" { + endpointProps.Target = &target + } + + if status := d.Get("endpoint_status").(string); status != "" { + endpointProps.EndpointStatus = &status + } + + if weight := d.Get("weight").(int); weight != 0 { + w64 := int64(weight) + endpointProps.Weight = &w64 + } + + if priority := d.Get("priority").(int); priority != 0 { + p64 := int64(priority) + endpointProps.Priority = &p64 + } + + if location := d.Get("endpoint_location").(string); location != "" { + endpointProps.EndpointLocation = &location + } + + if minChildEndpoints := d.Get("min_child_endpoints").(int); minChildEndpoints != 0 { + mci64 := int64(minChildEndpoints) + endpointProps.MinChildEndpoints = &mci64 + } + + return &endpointProps +} + +func validateAzureRMTrafficManagerEndpointType(i interface{}, k string) (s []string, errors []error) { + valid := map[string]struct{}{ + "azureEndpoints": struct{}{}, + "externalEndpoints": struct{}{}, + "nestedEndpoints": struct{}{}, + } + + if _, ok := valid[i.(string)]; !ok { + errors = append(errors, fmt.Errorf("endpoint type invalid, got %s", i.(string))) + } + return +} + +func validateAzureRMTrafficManagerEndpointWeight(i interface{}, k string) (s []string, errors []error) { + w := i.(int) + if w < 1 || w > 1000 { + errors = append(errors, fmt.Errorf("endpoint weight must be between 1-1000 inclusive")) + } + return +} + +func validateAzureRMTrafficManagerEndpointPriority(i interface{}, k string) (s []string, errors []error) { + p := i.(int) + if p < 1 || p > 1000 { + errors = append(errors, fmt.Errorf("endpoint priority must be between 1-1000 inclusive")) + } + return +} diff --git a/builtin/providers/azurerm/resource_arm_traffic_manager_endpoint_test.go b/builtin/providers/azurerm/resource_arm_traffic_manager_endpoint_test.go new file mode 100644 index 000000000..091107521 --- /dev/null +++ b/builtin/providers/azurerm/resource_arm_traffic_manager_endpoint_test.go @@ -0,0 +1,539 @@ +package azurerm + +import ( + "fmt" + "path" + "testing" + + "github.com/Azure/azure-sdk-for-go/core/http" + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAzureRMTrafficManagerEndpoint_basic(t *testing.T) { + ri := acctest.RandInt() + config := fmt.Sprintf(testAccAzureRMTrafficManagerEndpoint_basic, ri, ri, ri, ri, ri, ri, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMTrafficManagerEndpointDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMTrafficManagerEndpointExists("azurerm_traffic_manager_endpoint.testAzure"), + testCheckAzureRMTrafficManagerEndpointExists("azurerm_traffic_manager_endpoint.testExternal"), + resource.TestCheckResourceAttr("azurerm_traffic_manager_endpoint.testAzure", "endpoint_status", "Enabled"), + resource.TestCheckResourceAttr("azurerm_traffic_manager_endpoint.testExternal", "endpoint_status", "Enabled"), + ), + }, + }, + }) +} + +func TestAccAzureRMTrafficManagerEndpoint_basicDisableExternal(t *testing.T) { + ri := acctest.RandInt() + preConfig := fmt.Sprintf(testAccAzureRMTrafficManagerEndpoint_basic, ri, ri, ri, ri, ri, ri, ri) + postConfig := fmt.Sprintf(testAccAzureRMTrafficManagerEndpoint_basicDisableExternal, ri, ri, ri, ri, ri, ri, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMTrafficManagerEndpointDestroy, + Steps: []resource.TestStep{ + { + Config: preConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMTrafficManagerEndpointExists("azurerm_traffic_manager_endpoint.testAzure"), + testCheckAzureRMTrafficManagerEndpointExists("azurerm_traffic_manager_endpoint.testExternal"), + resource.TestCheckResourceAttr("azurerm_traffic_manager_endpoint.testAzure", "endpoint_status", "Enabled"), + resource.TestCheckResourceAttr("azurerm_traffic_manager_endpoint.testExternal", "endpoint_status", "Enabled"), + ), + }, + { + Config: postConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMTrafficManagerEndpointExists("azurerm_traffic_manager_endpoint.testAzure"), + testCheckAzureRMTrafficManagerEndpointExists("azurerm_traffic_manager_endpoint.testExternal"), + resource.TestCheckResourceAttr("azurerm_traffic_manager_endpoint.testAzure", "endpoint_status", "Enabled"), + resource.TestCheckResourceAttr("azurerm_traffic_manager_endpoint.testExternal", "endpoint_status", "Disabled"), + ), + }, + }, + }) +} + +// Altering weight might be used to ramp up migration traffic +func TestAccAzureRMTrafficManagerEndpoint_updateWeight(t *testing.T) { + ri := acctest.RandInt() + preConfig := fmt.Sprintf(testAccAzureRMTrafficManagerEndpoint_weight, ri, ri, ri, ri, ri) + postConfig := fmt.Sprintf(testAccAzureRMTrafficManagerEndpoint_updateWeight, ri, ri, ri, ri, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMTrafficManagerEndpointDestroy, + Steps: []resource.TestStep{ + { + Config: preConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMTrafficManagerEndpointExists("azurerm_traffic_manager_endpoint.testExternal"), + testCheckAzureRMTrafficManagerEndpointExists("azurerm_traffic_manager_endpoint.testExternalNew"), + resource.TestCheckResourceAttr("azurerm_traffic_manager_endpoint.testExternal", "weight", "50"), + resource.TestCheckResourceAttr("azurerm_traffic_manager_endpoint.testExternalNew", "weight", "50"), + ), + }, + { + Config: postConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMTrafficManagerEndpointExists("azurerm_traffic_manager_endpoint.testExternal"), + testCheckAzureRMTrafficManagerEndpointExists("azurerm_traffic_manager_endpoint.testExternalNew"), + resource.TestCheckResourceAttr("azurerm_traffic_manager_endpoint.testExternal", "weight", "25"), + resource.TestCheckResourceAttr("azurerm_traffic_manager_endpoint.testExternalNew", "weight", "75"), + ), + }, + }, + }) +} + +// Altering priority might be used to switch failover/active roles +func TestAccAzureRMTrafficManagerEndpoint_updatePriority(t *testing.T) { + ri := acctest.RandInt() + preConfig := fmt.Sprintf(testAccAzureRMTrafficManagerEndpoint_priority, ri, ri, ri, ri, ri) + postConfig := fmt.Sprintf(testAccAzureRMTrafficManagerEndpoint_updatePriority, ri, ri, ri, ri, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMTrafficManagerEndpointDestroy, + Steps: []resource.TestStep{ + { + Config: preConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMTrafficManagerEndpointExists("azurerm_traffic_manager_endpoint.testExternal"), + testCheckAzureRMTrafficManagerEndpointExists("azurerm_traffic_manager_endpoint.testExternalNew"), + resource.TestCheckResourceAttr("azurerm_traffic_manager_endpoint.testExternal", "priority", "1"), + resource.TestCheckResourceAttr("azurerm_traffic_manager_endpoint.testExternalNew", "priority", "2"), + ), + }, + { + Config: postConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMTrafficManagerEndpointExists("azurerm_traffic_manager_endpoint.testExternal"), + testCheckAzureRMTrafficManagerEndpointExists("azurerm_traffic_manager_endpoint.testExternalNew"), + resource.TestCheckResourceAttr("azurerm_traffic_manager_endpoint.testExternal", "priority", "3"), + resource.TestCheckResourceAttr("azurerm_traffic_manager_endpoint.testExternalNew", "priority", "2"), + ), + }, + }, + }) +} + +func TestAccAzureRMTrafficManagerEndpoint_nestedEndpoints(t *testing.T) { + ri := acctest.RandInt() + config := fmt.Sprintf(testAccAzureRMTrafficManagerEndpoint_nestedEndpoints, ri, ri, ri, ri, ri, ri, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMTrafficManagerEndpointDestroy, + Steps: []resource.TestStep{ + { + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMTrafficManagerEndpointExists("azurerm_traffic_manager_endpoint.nested"), + testCheckAzureRMTrafficManagerEndpointExists("azurerm_traffic_manager_endpoint.externalChild"), + ), + }, + }, + }) +} + +func testCheckAzureRMTrafficManagerEndpointExists(name string) resource.TestCheckFunc { + return func(s *terraform.State) error { + // Ensure we have enough information in state to look up in API + rs, ok := s.RootModule().Resources[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + name := rs.Primary.Attributes["name"] + endpointType := rs.Primary.Attributes["type"] + profileName := rs.Primary.Attributes["profile_name"] + resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] + if !hasResourceGroup { + return fmt.Errorf("Bad: no resource group found in state for Traffic Manager Profile: %s", name) + } + + // Ensure resource group/virtual network combination exists in API + conn := testAccProvider.Meta().(*ArmClient).trafficManagerEndpointsClient + + resp, err := conn.Get(resourceGroup, profileName, path.Base(endpointType), name) + if err != nil { + return fmt.Errorf("Bad: Get on trafficManagerEndpointsClient: %s", err) + } + + if resp.StatusCode == http.StatusNotFound { + return fmt.Errorf("Bad: Traffic Manager Endpoint %q (resource group: %q) does not exist", name, resourceGroup) + } + + return nil + } +} + +func testCheckAzureRMTrafficManagerEndpointDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*ArmClient).trafficManagerEndpointsClient + + for _, rs := range s.RootModule().Resources { + if rs.Type != "azurerm_traffic_manager_endpoint" { + continue + } + + name := rs.Primary.Attributes["name"] + endpointType := rs.Primary.Attributes["type"] + profileName := rs.Primary.Attributes["profile_name"] + resourceGroup := rs.Primary.Attributes["resource_group_name"] + + resp, err := conn.Get(resourceGroup, profileName, path.Base(endpointType), name) + if err != nil { + return nil + } + + if resp.StatusCode != http.StatusNotFound { + return fmt.Errorf("Traffic Manager Endpoint sitll exists:\n%#v", resp.Properties) + } + } + + return nil +} + +var testAccAzureRMTrafficManagerEndpoint_basic = ` +resource "azurerm_resource_group" "test" { + name = "acctestrg-%d" + location = "West US" +} + +resource "azurerm_traffic_manager_profile" "test" { + name = "acctesttmp%d" + resource_group_name = "${azurerm_resource_group.test.name}" + traffic_routing_method = "Weighted" + + dns_config { + relative_name = "acctesttmp%d" + ttl = 30 + } + + monitor_config { + protocol = "https" + port = 443 + path = "/" + } +} + +resource "azurerm_public_ip" "test" { + name = "acctestpublicip-%d" + location = "West US" + resource_group_name = "${azurerm_resource_group.test.name}" + public_ip_address_allocation = "static" + domain_name_label = "acctestpublicip-%d" +} + +resource "azurerm_traffic_manager_endpoint" "testAzure" { + name = "acctestend-azure%d" + type = "azureEndpoints" + target_resource_id = "${azurerm_public_ip.test.id}" + weight = 3 + profile_name = "${azurerm_traffic_manager_profile.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" +} + +resource "azurerm_traffic_manager_endpoint" "testExternal" { + name = "acctestend-external%d" + type = "externalEndpoints" + target = "terraform.io" + weight = 3 + profile_name = "${azurerm_traffic_manager_profile.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" +} +` + +var testAccAzureRMTrafficManagerEndpoint_basicDisableExternal = ` +resource "azurerm_resource_group" "test" { + name = "acctestrg-%d" + location = "West US" +} + +resource "azurerm_traffic_manager_profile" "test" { + name = "acctesttmp%d" + resource_group_name = "${azurerm_resource_group.test.name}" + traffic_routing_method = "Weighted" + + dns_config { + relative_name = "acctesttmp%d" + ttl = 30 + } + + monitor_config { + protocol = "https" + port = 443 + path = "/" + } +} + +resource "azurerm_public_ip" "test" { + name = "acctestpublicip-%d" + location = "West US" + resource_group_name = "${azurerm_resource_group.test.name}" + public_ip_address_allocation = "static" + domain_name_label = "acctestpublicip-%d" +} + +resource "azurerm_traffic_manager_endpoint" "testAzure" { + name = "acctestend-azure%d" + type = "azureEndpoints" + target_resource_id = "${azurerm_public_ip.test.id}" + weight = 3 + profile_name = "${azurerm_traffic_manager_profile.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" +} + +resource "azurerm_traffic_manager_endpoint" "testExternal" { + name = "acctestend-external%d" + endpoint_status = "Disabled" + type = "externalEndpoints" + target = "terraform.io" + weight = 3 + profile_name = "${azurerm_traffic_manager_profile.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" +} +` + +var testAccAzureRMTrafficManagerEndpoint_weight = ` +resource "azurerm_resource_group" "test" { + name = "acctestrg-%d" + location = "West US" +} + +resource "azurerm_traffic_manager_profile" "test" { + name = "acctesttmp%d" + resource_group_name = "${azurerm_resource_group.test.name}" + traffic_routing_method = "Weighted" + + dns_config { + relative_name = "acctesttmp%d" + ttl = 30 + } + + monitor_config { + protocol = "https" + port = 443 + path = "/" + } +} + +resource "azurerm_traffic_manager_endpoint" "testExternal" { + name = "acctestend-external%d" + type = "externalEndpoints" + target = "terraform.io" + weight = 50 + profile_name = "${azurerm_traffic_manager_profile.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" +} + +resource "azurerm_traffic_manager_endpoint" "testExternalNew" { + name = "acctestend-external%d-2" + type = "externalEndpoints" + target = "www.terraform.io" + weight = 50 + profile_name = "${azurerm_traffic_manager_profile.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" +} +` + +var testAccAzureRMTrafficManagerEndpoint_updateWeight = ` +resource "azurerm_resource_group" "test" { + name = "acctestrg-%d" + location = "West US" +} + +resource "azurerm_traffic_manager_profile" "test" { + name = "acctesttmp%d" + resource_group_name = "${azurerm_resource_group.test.name}" + traffic_routing_method = "Weighted" + + dns_config { + relative_name = "acctesttmp%d" + ttl = 30 + } + + monitor_config { + protocol = "https" + port = 443 + path = "/" + } +} + +resource "azurerm_traffic_manager_endpoint" "testExternal" { + name = "acctestend-external%d" + type = "externalEndpoints" + target = "terraform.io" + weight = 25 + profile_name = "${azurerm_traffic_manager_profile.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" +} + +resource "azurerm_traffic_manager_endpoint" "testExternalNew" { + name = "acctestend-external%d-2" + type = "externalEndpoints" + target = "www.terraform.io" + weight = 75 + profile_name = "${azurerm_traffic_manager_profile.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" +} +` + +var testAccAzureRMTrafficManagerEndpoint_priority = ` +resource "azurerm_resource_group" "test" { + name = "acctestrg-%d" + location = "West US" +} + +resource "azurerm_traffic_manager_profile" "test" { + name = "acctesttmp%d" + resource_group_name = "${azurerm_resource_group.test.name}" + traffic_routing_method = "Priority" + + dns_config { + relative_name = "acctesttmp%d" + ttl = 30 + } + + monitor_config { + protocol = "https" + port = 443 + path = "/" + } +} + +resource "azurerm_traffic_manager_endpoint" "testExternal" { + name = "acctestend-external%d" + type = "externalEndpoints" + target = "terraform.io" + priority = 1 + profile_name = "${azurerm_traffic_manager_profile.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" +} + +resource "azurerm_traffic_manager_endpoint" "testExternalNew" { + name = "acctestend-external%d-2" + type = "externalEndpoints" + target = "www.terraform.io" + priority = 2 + profile_name = "${azurerm_traffic_manager_profile.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" +} +` + +var testAccAzureRMTrafficManagerEndpoint_updatePriority = ` +resource "azurerm_resource_group" "test" { + name = "acctestrg-%d" + location = "West US" +} + +resource "azurerm_traffic_manager_profile" "test" { + name = "acctesttmp%d" + resource_group_name = "${azurerm_resource_group.test.name}" + traffic_routing_method = "Priority" + + dns_config { + relative_name = "acctesttmp%d" + ttl = 30 + } + + monitor_config { + protocol = "https" + port = 443 + path = "/" + } +} + +resource "azurerm_traffic_manager_endpoint" "testExternal" { + name = "acctestend-external%d" + type = "externalEndpoints" + target = "terraform.io" + priority = 3 + profile_name = "${azurerm_traffic_manager_profile.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" +} + +resource "azurerm_traffic_manager_endpoint" "testExternalNew" { + name = "acctestend-external%d-2" + type = "externalEndpoints" + target = "www.terraform.io" + priority = 2 + profile_name = "${azurerm_traffic_manager_profile.test.name}" + resource_group_name = "${azurerm_resource_group.test.name}" +} +` + +var testAccAzureRMTrafficManagerEndpoint_nestedEndpoints = ` +resource "azurerm_resource_group" "test" { + name = "acctestrg-%d" + location = "West US" +} + +resource "azurerm_traffic_manager_profile" "parent" { + name = "acctesttmpparent%d" + resource_group_name = "${azurerm_resource_group.test.name}" + traffic_routing_method = "Priority" + + dns_config { + relative_name = "acctestparent%d" + ttl = 30 + } + + monitor_config { + protocol = "https" + port = 443 + path = "/" + } +} + +resource "azurerm_traffic_manager_profile" "child" { + name = "acctesttmpchild%d" + resource_group_name = "${azurerm_resource_group.test.name}" + traffic_routing_method = "Priority" + + dns_config { + relative_name = "acctesttmpchild%d" + ttl = 30 + } + + monitor_config { + protocol = "https" + port = 443 + path = "/" + } +} + +resource "azurerm_traffic_manager_endpoint" "nested" { + name = "acctestend-parent%d" + type = "nestedEndpoints" + target_resource_id = "${azurerm_traffic_manager_profile.child.id}" + priority = 1 + profile_name = "${azurerm_traffic_manager_profile.parent.name}" + resource_group_name = "${azurerm_resource_group.test.name}" + min_child_endpoints = 1 +} + +resource "azurerm_traffic_manager_endpoint" "externalChild" { + name = "acctestend-child%d" + type = "externalEndpoints" + target = "terraform.io" + priority = 1 + profile_name = "${azurerm_traffic_manager_profile.child.name}" + resource_group_name = "${azurerm_resource_group.test.name}" +} +` diff --git a/builtin/providers/azurerm/resource_arm_traffic_manager_profile.go b/builtin/providers/azurerm/resource_arm_traffic_manager_profile.go new file mode 100644 index 000000000..53013631f --- /dev/null +++ b/builtin/providers/azurerm/resource_arm_traffic_manager_profile.go @@ -0,0 +1,323 @@ +package azurerm + +import ( + "bytes" + "fmt" + "log" + "net/http" + "strings" + + "github.com/Azure/azure-sdk-for-go/arm/trafficmanager" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceArmTrafficManagerProfile() *schema.Resource { + return &schema.Resource{ + Create: resourceArmTrafficManagerProfileCreate, + Read: resourceArmTrafficManagerProfileRead, + Update: resourceArmTrafficManagerProfileCreate, + Delete: resourceArmTrafficManagerProfileDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "profile_status": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateAzureRMTrafficManagerStatus, + }, + + "traffic_routing_method": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAzureRMTrafficManagerRoutingMethod, + }, + + "dns_config": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "relative_name": { + Type: schema.TypeString, + ForceNew: true, + Required: true, + }, + "ttl": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validateAzureRMTrafficManagerTTL, + }, + }, + }, + Set: resourceAzureRMTrafficManagerDNSConfigHash, + }, + + // inlined from dns_config for ease of use + "fqdn": { + Type: schema.TypeString, + Computed: true, + }, + + "monitor_config": { + Type: schema.TypeSet, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "protocol": { + Type: schema.TypeString, + Required: true, + ValidateFunc: validateAzureRMTrafficManagerMonitorProtocol, + }, + "port": { + Type: schema.TypeInt, + Required: true, + ValidateFunc: validateAzureRMTrafficManagerMonitorPort, + }, + "path": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + Set: resourceAzureRMTrafficManagerMonitorConfigHash, + }, + + "resource_group_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceArmTrafficManagerProfileCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).trafficManagerProfilesClient + + log.Printf("[INFO] preparing arguments for Azure ARM virtual network creation.") + + name := d.Get("name").(string) + // must be provided in request + location := "global" + resGroup := d.Get("resource_group_name").(string) + tags := d.Get("tags").(map[string]interface{}) + + profile := trafficmanager.Profile{ + Name: &name, + Location: &location, + Properties: getArmTrafficManagerProfileProperties(d), + Tags: expandTags(tags), + } + + _, err := client.CreateOrUpdate(resGroup, name, profile) + if err != nil { + return err + } + + read, err := client.Get(resGroup, name) + if err != nil { + return err + } + if read.ID == nil { + return fmt.Errorf("Cannot read TrafficManager profile %s (resource group %s) ID", name, resGroup) + } + + d.SetId(*read.ID) + + return resourceArmTrafficManagerProfileRead(d, meta) +} + +func resourceArmTrafficManagerProfileRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).trafficManagerProfilesClient + + id, err := parseAzureResourceID(d.Id()) + if err != nil { + return err + } + resGroup := id.ResourceGroup + name := id.Path["trafficManagerProfiles"] + + resp, err := client.Get(resGroup, name) + if resp.StatusCode == http.StatusNotFound { + d.SetId("") + return nil + } + if err != nil { + return fmt.Errorf("Error making Read request on Traffic Manager Profile %s: %s", name, err) + } + profile := *resp.Properties + + // update appropriate values + d.Set("name", resp.Name) + d.Set("profile_status", profile.ProfileStatus) + d.Set("traffic_routing_method", profile.TrafficRoutingMethod) + + dnsFlat := flattenAzureRMTrafficManagerProfileDNSConfig(profile.DNSConfig) + d.Set("dns_config", schema.NewSet(resourceAzureRMTrafficManagerDNSConfigHash, dnsFlat)) + + // fqdn is actually inside DNSConfig, inlined for simpler reference + d.Set("fqdn", profile.DNSConfig.Fqdn) + + monitorFlat := flattenAzureRMTrafficManagerProfileMonitorConfig(profile.MonitorConfig) + d.Set("monitor_config", schema.NewSet(resourceAzureRMTrafficManagerMonitorConfigHash, monitorFlat)) + + flattenAndSetTags(d, resp.Tags) + + return nil +} + +func resourceArmTrafficManagerProfileDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).trafficManagerProfilesClient + + id, err := parseAzureResourceID(d.Id()) + if err != nil { + return err + } + resGroup := id.ResourceGroup + name := id.Path["trafficManagerProfiles"] + + _, err = client.Delete(resGroup, name) + + return err +} + +func getArmTrafficManagerProfileProperties(d *schema.ResourceData) *trafficmanager.ProfileProperties { + routingMethod := d.Get("traffic_routing_method").(string) + props := &trafficmanager.ProfileProperties{ + TrafficRoutingMethod: &routingMethod, + DNSConfig: expandArmTrafficManagerDNSConfig(d), + MonitorConfig: expandArmTrafficManagerMonitorConfig(d), + } + + if status, ok := d.GetOk("profile_status"); ok { + s := status.(string) + props.ProfileStatus = &s + } + + return props +} + +func expandArmTrafficManagerMonitorConfig(d *schema.ResourceData) *trafficmanager.MonitorConfig { + monitorSets := d.Get("monitor_config").(*schema.Set).List() + monitor := monitorSets[0].(map[string]interface{}) + + proto := monitor["protocol"].(string) + port := int64(monitor["port"].(int)) + path := monitor["path"].(string) + + return &trafficmanager.MonitorConfig{ + Protocol: &proto, + Port: &port, + Path: &path, + } +} + +func expandArmTrafficManagerDNSConfig(d *schema.ResourceData) *trafficmanager.DNSConfig { + dnsSets := d.Get("dns_config").(*schema.Set).List() + dns := dnsSets[0].(map[string]interface{}) + + name := dns["relative_name"].(string) + ttl := int64(dns["ttl"].(int)) + + return &trafficmanager.DNSConfig{ + RelativeName: &name, + TTL: &ttl, + } +} + +func flattenAzureRMTrafficManagerProfileDNSConfig(dns *trafficmanager.DNSConfig) []interface{} { + result := make(map[string]interface{}) + + result["relative_name"] = *dns.RelativeName + result["ttl"] = int(*dns.TTL) + + return []interface{}{result} +} + +func flattenAzureRMTrafficManagerProfileMonitorConfig(cfg *trafficmanager.MonitorConfig) []interface{} { + result := make(map[string]interface{}) + + result["protocol"] = *cfg.Protocol + result["port"] = int(*cfg.Port) + result["path"] = *cfg.Path + + return []interface{}{result} +} + +func resourceAzureRMTrafficManagerDNSConfigHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + + buf.WriteString(fmt.Sprintf("%s-", m["relative_name"].(string))) + buf.WriteString(fmt.Sprintf("%d-", m["ttl"].(int))) + + return hashcode.String(buf.String()) +} + +func resourceAzureRMTrafficManagerMonitorConfigHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + + buf.WriteString(fmt.Sprintf("%s-", strings.ToLower(m["protocol"].(string)))) + buf.WriteString(fmt.Sprintf("%d-", m["port"].(int))) + buf.WriteString(fmt.Sprintf("%s-", m["path"].(string))) + + return hashcode.String(buf.String()) +} + +func validateAzureRMTrafficManagerStatus(i interface{}, k string) (s []string, errors []error) { + status := strings.ToLower(i.(string)) + if status != "enabled" && status != "disabled" { + errors = append(errors, fmt.Errorf("%s must be one of: Enabled, Disabled", k)) + } + return +} + +func validateAzureRMTrafficManagerRoutingMethod(i interface{}, k string) (s []string, errors []error) { + valid := map[string]struct{}{ + "Performance": struct{}{}, + "Weighted": struct{}{}, + "Priority": struct{}{}, + } + + if _, ok := valid[i.(string)]; !ok { + errors = append(errors, fmt.Errorf("traffic_routing_method must be one of (Performance, Weighted, Priority), got %s", i.(string))) + } + return +} + +func validateAzureRMTrafficManagerTTL(i interface{}, k string) (s []string, errors []error) { + ttl := i.(int) + if ttl < 30 || ttl > 999999 { + errors = append(errors, fmt.Errorf("ttl must be between 30 and 999,999 inclusive")) + } + return +} + +func validateAzureRMTrafficManagerMonitorProtocol(i interface{}, k string) (s []string, errors []error) { + p := i.(string) + if p != "http" && p != "https" { + errors = append(errors, fmt.Errorf("monitor_config.protocol must be one of: http, https")) + } + return +} + +func validateAzureRMTrafficManagerMonitorPort(i interface{}, k string) (s []string, errors []error) { + p := i.(int) + if p < 1 || p > 65535 { + errors = append(errors, fmt.Errorf("monitor_config.port must be between 1 - 65535 inclusive")) + } + return +} diff --git a/builtin/providers/azurerm/resource_arm_traffic_manager_profile_test.go b/builtin/providers/azurerm/resource_arm_traffic_manager_profile_test.go new file mode 100644 index 000000000..ef4b37479 --- /dev/null +++ b/builtin/providers/azurerm/resource_arm_traffic_manager_profile_test.go @@ -0,0 +1,303 @@ +package azurerm + +import ( + "fmt" + "log" + "testing" + + "github.com/Azure/azure-sdk-for-go/core/http" + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAzureRMTrafficManagerProfile_weighted(t *testing.T) { + ri := acctest.RandInt() + config := fmt.Sprintf(testAccAzureRMTrafficManagerProfile_weighted, ri, ri, ri) + + fqdn := fmt.Sprintf("acctesttmp%d.trafficmanager.net", ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMTrafficManagerProfileDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMTrafficManagerProfileExists("azurerm_traffic_manager_profile.test"), + resource.TestCheckResourceAttr("azurerm_traffic_manager_profile.test", "traffic_routing_method", "Weighted"), + resource.TestCheckResourceAttr("azurerm_traffic_manager_profile.test", "fqdn", fqdn), + ), + }, + }, + }) +} + +func TestAccAzureRMTrafficManagerProfile_performance(t *testing.T) { + ri := acctest.RandInt() + config := fmt.Sprintf(testAccAzureRMTrafficManagerProfile_performance, ri, ri, ri) + + fqdn := fmt.Sprintf("acctesttmp%d.trafficmanager.net", ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMTrafficManagerProfileDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMTrafficManagerProfileExists("azurerm_traffic_manager_profile.test"), + resource.TestCheckResourceAttr("azurerm_traffic_manager_profile.test", "traffic_routing_method", "Performance"), + resource.TestCheckResourceAttr("azurerm_traffic_manager_profile.test", "fqdn", fqdn), + ), + }, + }, + }) +} + +func TestAccAzureRMTrafficManagerProfile_priority(t *testing.T) { + ri := acctest.RandInt() + config := fmt.Sprintf(testAccAzureRMTrafficManagerProfile_priority, ri, ri, ri) + + fqdn := fmt.Sprintf("acctesttmp%d.trafficmanager.net", ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMTrafficManagerProfileDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMTrafficManagerProfileExists("azurerm_traffic_manager_profile.test"), + resource.TestCheckResourceAttr("azurerm_traffic_manager_profile.test", "traffic_routing_method", "Priority"), + resource.TestCheckResourceAttr("azurerm_traffic_manager_profile.test", "fqdn", fqdn), + ), + }, + }, + }) +} + +func TestAccAzureRMTrafficManagerProfile_withTags(t *testing.T) { + ri := acctest.RandInt() + preConfig := fmt.Sprintf(testAccAzureRMTrafficManagerProfile_withTags, ri, ri, ri) + postConfig := fmt.Sprintf(testAccAzureRMTrafficManagerProfile_withTagsUpdated, ri, ri, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMTrafficManagerProfileDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: preConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMTrafficManagerProfileExists("azurerm_traffic_manager_profile.test"), + resource.TestCheckResourceAttr( + "azurerm_traffic_manager_profile.test", "tags.%", "2"), + resource.TestCheckResourceAttr( + "azurerm_traffic_manager_profile.test", "tags.environment", "Production"), + resource.TestCheckResourceAttr( + "azurerm_traffic_manager_profile.test", "tags.cost_center", "MSFT"), + ), + }, + + resource.TestStep{ + Config: postConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMTrafficManagerProfileExists("azurerm_traffic_manager_profile.test"), + resource.TestCheckResourceAttr( + "azurerm_traffic_manager_profile.test", "tags.%", "1"), + resource.TestCheckResourceAttr( + "azurerm_traffic_manager_profile.test", "tags.environment", "staging"), + ), + }, + }, + }) +} + +func testCheckAzureRMTrafficManagerProfileExists(name string) resource.TestCheckFunc { + return func(s *terraform.State) error { + // Ensure we have enough information in state to look up in API + rs, ok := s.RootModule().Resources[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + name := rs.Primary.Attributes["name"] + resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] + if !hasResourceGroup { + return fmt.Errorf("Bad: no resource group found in state for Traffic Manager Profile: %s", name) + } + + // Ensure resource group/virtual network combination exists in API + conn := testAccProvider.Meta().(*ArmClient).trafficManagerProfilesClient + + resp, err := conn.Get(resourceGroup, name) + if err != nil { + return fmt.Errorf("Bad: Get on trafficManagerProfilesClient: %s", err) + } + + if resp.StatusCode == http.StatusNotFound { + return fmt.Errorf("Bad: Traffic Manager %q (resource group: %q) does not exist", name, resourceGroup) + } + + return nil + } +} + +func testCheckAzureRMTrafficManagerProfileDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*ArmClient).trafficManagerProfilesClient + + for _, rs := range s.RootModule().Resources { + if rs.Type != "azurerm_traffic_manager_profile" { + continue + } + + log.Printf("[TRACE] test_profile %#v", rs) + + name := rs.Primary.Attributes["name"] + resourceGroup := rs.Primary.Attributes["resource_group_name"] + + resp, err := conn.Get(resourceGroup, name) + if err != nil { + return nil + } + + if resp.StatusCode != http.StatusNotFound { + return fmt.Errorf("Traffic Manager profile sitll exists:\n%#v", resp.Properties) + } + } + + return nil +} + +var testAccAzureRMTrafficManagerProfile_weighted = ` +resource "azurerm_resource_group" "test" { + name = "acctestrg-%d" + location = "West US" +} + +resource "azurerm_traffic_manager_profile" "test" { + name = "acctesttmp%d" + resource_group_name = "${azurerm_resource_group.test.name}" + traffic_routing_method = "Weighted" + + dns_config { + relative_name = "acctesttmp%d" + ttl = 30 + } + + monitor_config { + protocol = "https" + port = 443 + path = "/" + } +} +` + +var testAccAzureRMTrafficManagerProfile_performance = ` +resource "azurerm_resource_group" "test" { + name = "acctestrg-%d" + location = "West US" +} + +resource "azurerm_traffic_manager_profile" "test" { + name = "acctesttmp%d" + resource_group_name = "${azurerm_resource_group.test.name}" + traffic_routing_method = "Performance" + + dns_config { + relative_name = "acctesttmp%d" + ttl = 30 + } + + monitor_config { + protocol = "https" + port = 443 + path = "/" + } +} +` + +var testAccAzureRMTrafficManagerProfile_priority = ` +resource "azurerm_resource_group" "test" { + name = "acctestrg-%d" + location = "West US" +} + +resource "azurerm_traffic_manager_profile" "test" { + name = "acctesttmp%d" + resource_group_name = "${azurerm_resource_group.test.name}" + traffic_routing_method = "Priority" + + dns_config { + relative_name = "acctesttmp%d" + ttl = 30 + } + + monitor_config { + protocol = "https" + port = 443 + path = "/" + } +} +` + +var testAccAzureRMTrafficManagerProfile_withTags = ` +resource "azurerm_resource_group" "test" { + name = "acctestrg-%d" + location = "West US" +} + +resource "azurerm_traffic_manager_profile" "test" { + name = "acctesttmp%d" + resource_group_name = "${azurerm_resource_group.test.name}" + traffic_routing_method = "Priority" + + dns_config { + relative_name = "acctesttmp%d" + ttl = 30 + } + + monitor_config { + protocol = "https" + port = 443 + path = "/" + } + + tags { + environment = "Production" + cost_center = "MSFT" + } +} +` + +var testAccAzureRMTrafficManagerProfile_withTagsUpdated = ` +resource "azurerm_resource_group" "test" { + name = "acctestrg-%d" + location = "West US" +} + +resource "azurerm_traffic_manager_profile" "test" { + name = "acctesttmp%d" + resource_group_name = "${azurerm_resource_group.test.name}" + traffic_routing_method = "Priority" + + dns_config { + relative_name = "acctesttmp%d" + ttl = 30 + } + + monitor_config { + protocol = "https" + port = 443 + path = "/" + } + + tags { + environment = "staging" + } +} +` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/client.go new file mode 100644 index 000000000..9a65c768c --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/client.go @@ -0,0 +1,52 @@ +// Package trafficmanager implements the Azure ARM Trafficmanager service API +// version 2015-11-01. +// +package trafficmanager + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // APIVersion is the version of the Trafficmanager + APIVersion = "2015-11-01" + + // DefaultBaseURI is the default URI used for the service Trafficmanager + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Trafficmanager. +type ManagementClient struct { + autorest.Client + BaseURI string + APIVersion string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: DefaultBaseURI, + APIVersion: APIVersion, + SubscriptionID: subscriptionID, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/endpoints.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/endpoints.go new file mode 100644 index 000000000..f8fc7d1b6 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/endpoints.go @@ -0,0 +1,312 @@ +package trafficmanager + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// EndpointsClient is the client for the Endpoints methods of the +// Trafficmanager service. +type EndpointsClient struct { + ManagementClient +} + +// NewEndpointsClient creates an instance of the EndpointsClient client. +func NewEndpointsClient(subscriptionID string) EndpointsClient { + return EndpointsClient{New(subscriptionID)} +} + +// CreateOrUpdate create or update a Traffic Manager endpoint. +// +// resourceGroupName is the name of the resource group containing the Traffic +// Manager endpoint to be created or updated. profileName is the name of the +// Traffic Manager profile. endpointType is the type of the Traffic Manager +// endpoint to be created or updated. endpointName is the name of the Traffic +// Manager endpoint to be created or updated. parameters is the Traffic +// Manager endpoint parameters supplied to the CreateOrUpdate operation. +func (client EndpointsClient) CreateOrUpdate(resourceGroupName string, profileName string, endpointType string, endpointName string, parameters Endpoint) (result Endpoint, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, profileName, endpointType, endpointName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "trafficmanager.EndpointsClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "trafficmanager.EndpointsClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "trafficmanager.EndpointsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client EndpointsClient) CreateOrUpdatePreparer(resourceGroupName string, profileName string, endpointType string, endpointName string, parameters Endpoint) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": autorest.Encode("path", endpointName), + "endpointType": autorest.Encode("path", endpointType), + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficmanagerprofiles/{profileName}/{endpointType}/{endpointName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client EndpointsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client EndpointsClient) CreateOrUpdateResponder(resp *http.Response) (result Endpoint, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a Traffic Manager endpoint. +// +// resourceGroupName is the name of the resource group containing the Traffic +// Manager endpoint to be deleted. profileName is the name of the Traffic +// Manager profile. endpointType is the type of the Traffic Manager endpoint +// to be deleted. endpointName is the name of the Traffic Manager endpoint to +// be deleted. +func (client EndpointsClient) Delete(resourceGroupName string, profileName string, endpointType string, endpointName string) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, profileName, endpointType, endpointName) + if err != nil { + return result, autorest.NewErrorWithError(err, "trafficmanager.EndpointsClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "trafficmanager.EndpointsClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "trafficmanager.EndpointsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client EndpointsClient) DeletePreparer(resourceGroupName string, profileName string, endpointType string, endpointName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": autorest.Encode("path", endpointName), + "endpointType": autorest.Encode("path", endpointType), + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficmanagerprofiles/{profileName}/{endpointType}/{endpointName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client EndpointsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client EndpointsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets a Traffic Manager endpoint. +// +// resourceGroupName is the name of the resource group containing the Traffic +// Manager endpoint. profileName is the name of the Traffic Manager profile. +// endpointType is the type of the Traffic Manager endpoint. endpointName is +// the name of the Traffic Manager endpoint. +func (client EndpointsClient) Get(resourceGroupName string, profileName string, endpointType string, endpointName string) (result Endpoint, err error) { + req, err := client.GetPreparer(resourceGroupName, profileName, endpointType, endpointName) + if err != nil { + return result, autorest.NewErrorWithError(err, "trafficmanager.EndpointsClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "trafficmanager.EndpointsClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "trafficmanager.EndpointsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client EndpointsClient) GetPreparer(resourceGroupName string, profileName string, endpointType string, endpointName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": autorest.Encode("path", endpointName), + "endpointType": autorest.Encode("path", endpointType), + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficmanagerprofiles/{profileName}/{endpointType}/{endpointName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client EndpointsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client EndpointsClient) GetResponder(resp *http.Response) (result Endpoint, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update update a Traffic Manager endpoint. +// +// resourceGroupName is the name of the resource group containing the Traffic +// Manager endpoint to be updated. profileName is the name of the Traffic +// Manager profile. endpointType is the type of the Traffic Manager endpoint +// to be updated. endpointName is the name of the Traffic Manager endpoint to +// be updated. parameters is the Traffic Manager endpoint parameters supplied +// to the Update operation. +func (client EndpointsClient) Update(resourceGroupName string, profileName string, endpointType string, endpointName string, parameters Endpoint) (result Endpoint, err error) { + req, err := client.UpdatePreparer(resourceGroupName, profileName, endpointType, endpointName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "trafficmanager.EndpointsClient", "Update", nil, "Failure preparing request") + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "trafficmanager.EndpointsClient", "Update", resp, "Failure sending request") + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "trafficmanager.EndpointsClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client EndpointsClient) UpdatePreparer(resourceGroupName string, profileName string, endpointType string, endpointName string, parameters Endpoint) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "endpointName": autorest.Encode("path", endpointName), + "endpointType": autorest.Encode("path", endpointType), + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficmanagerprofiles/{profileName}/{endpointType}/{endpointName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client EndpointsClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client EndpointsClient) UpdateResponder(resp *http.Response) (result Endpoint, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/models.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/models.go new file mode 100644 index 000000000..349bee2d5 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/models.go @@ -0,0 +1,120 @@ +package trafficmanager + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +// CheckTrafficManagerRelativeDNSNameAvailabilityParameters is parameters +// supplied to check Traffic Manager name operation. +type CheckTrafficManagerRelativeDNSNameAvailabilityParameters struct { + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` +} + +// DNSConfig is class containing DNS settings in a Traffic Manager profile. +type DNSConfig struct { + RelativeName *string `json:"relativeName,omitempty"` + Fqdn *string `json:"fqdn,omitempty"` + TTL *int64 `json:"ttl,omitempty"` +} + +// Endpoint is class respresenting a Traffic Manager endpoint. +type Endpoint struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Properties *EndpointProperties `json:"properties,omitempty"` +} + +// EndpointProperties is class respresenting a Traffic Manager endpoint +// properties. +type EndpointProperties struct { + TargetResourceID *string `json:"targetResourceId,omitempty"` + Target *string `json:"target,omitempty"` + EndpointStatus *string `json:"endpointStatus,omitempty"` + Weight *int64 `json:"weight,omitempty"` + Priority *int64 `json:"priority,omitempty"` + EndpointLocation *string `json:"endpointLocation,omitempty"` + EndpointMonitorStatus *string `json:"endpointMonitorStatus,omitempty"` + MinChildEndpoints *int64 `json:"minChildEndpoints,omitempty"` +} + +// MonitorConfig is class containing endpoint monitoring settings in a Traffic +// Manager profile. +type MonitorConfig struct { + ProfileMonitorStatus *string `json:"profileMonitorStatus,omitempty"` + Protocol *string `json:"protocol,omitempty"` + Port *int64 `json:"port,omitempty"` + Path *string `json:"path,omitempty"` +} + +// NameAvailability is class representing a Traffic Manager Name Availability +// response. +type NameAvailability struct { + autorest.Response `json:"-"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + NameAvailable *bool `json:"nameAvailable,omitempty"` + Reason *string `json:"reason,omitempty"` + Message *string `json:"message,omitempty"` +} + +// Profile is class representing a Traffic Manager profile. +type Profile struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *ProfileProperties `json:"properties,omitempty"` +} + +// ProfileListResult is the list Traffic Manager profiles operation response. +type ProfileListResult struct { + autorest.Response `json:"-"` + Value *[]Profile `json:"value,omitempty"` +} + +// ProfileProperties is class representing the Traffic Manager profile +// properties. +type ProfileProperties struct { + ProfileStatus *string `json:"profileStatus,omitempty"` + TrafficRoutingMethod *string `json:"trafficRoutingMethod,omitempty"` + DNSConfig *DNSConfig `json:"dnsConfig,omitempty"` + MonitorConfig *MonitorConfig `json:"monitorConfig,omitempty"` + Endpoints *[]Endpoint `json:"endpoints,omitempty"` +} + +// Resource is +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// SubResource is +type SubResource struct { + ID *string `json:"id,omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/profiles.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/profiles.go new file mode 100644 index 000000000..eb25c9df0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/profiles.go @@ -0,0 +1,481 @@ +package trafficmanager + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// ProfilesClient is the client for the Profiles methods of the Trafficmanager +// service. +type ProfilesClient struct { + ManagementClient +} + +// NewProfilesClient creates an instance of the ProfilesClient client. +func NewProfilesClient(subscriptionID string) ProfilesClient { + return ProfilesClient{New(subscriptionID)} +} + +// CheckTrafficManagerRelativeDNSNameAvailability checks the availability of a +// Traffic Manager Relative DNS name. +// +// parameters is the Traffic Manager name parameters supplied to the +// CheckTrafficManagerNameAvailability operation. +func (client ProfilesClient) CheckTrafficManagerRelativeDNSNameAvailability(parameters CheckTrafficManagerRelativeDNSNameAvailabilityParameters) (result NameAvailability, err error) { + req, err := client.CheckTrafficManagerRelativeDNSNameAvailabilityPreparer(parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "CheckTrafficManagerRelativeDNSNameAvailability", nil, "Failure preparing request") + } + + resp, err := client.CheckTrafficManagerRelativeDNSNameAvailabilitySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "CheckTrafficManagerRelativeDNSNameAvailability", resp, "Failure sending request") + } + + result, err = client.CheckTrafficManagerRelativeDNSNameAvailabilityResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "CheckTrafficManagerRelativeDNSNameAvailability", resp, "Failure responding to request") + } + + return +} + +// CheckTrafficManagerRelativeDNSNameAvailabilityPreparer prepares the CheckTrafficManagerRelativeDNSNameAvailability request. +func (client ProfilesClient) CheckTrafficManagerRelativeDNSNameAvailabilityPreparer(parameters CheckTrafficManagerRelativeDNSNameAvailabilityParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/providers/Microsoft.Network/checkTrafficManagerNameAvailability", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CheckTrafficManagerRelativeDNSNameAvailabilitySender sends the CheckTrafficManagerRelativeDNSNameAvailability request. The method will close the +// http.Response Body if it receives an error. +func (client ProfilesClient) CheckTrafficManagerRelativeDNSNameAvailabilitySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CheckTrafficManagerRelativeDNSNameAvailabilityResponder handles the response to the CheckTrafficManagerRelativeDNSNameAvailability request. The method always +// closes the http.Response Body. +func (client ProfilesClient) CheckTrafficManagerRelativeDNSNameAvailabilityResponder(resp *http.Response) (result NameAvailability, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateOrUpdate create or update a Traffic Manager profile. +// +// resourceGroupName is the name of the resource group containing the Traffic +// Manager profile. profileName is the name of the Traffic Manager profile. +// parameters is the Traffic Manager profile parameters supplied to the +// CreateOrUpdate operation. +func (client ProfilesClient) CreateOrUpdate(resourceGroupName string, profileName string, parameters Profile) (result Profile, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, profileName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ProfilesClient) CreateOrUpdatePreparer(resourceGroupName string, profileName string, parameters Profile) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficmanagerprofiles/{profileName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ProfilesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ProfilesClient) CreateOrUpdateResponder(resp *http.Response) (result Profile, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a Traffic Manager profile. +// +// resourceGroupName is the name of the resource group containing the Traffic +// Manager profile to be deleted. profileName is the name of the Traffic +// Manager profile to be deleted. +func (client ProfilesClient) Delete(resourceGroupName string, profileName string) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, profileName) + if err != nil { + return result, autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ProfilesClient) DeletePreparer(resourceGroupName string, profileName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficmanagerprofiles/{profileName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ProfilesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ProfilesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets a Traffic Manager profile. +// +// resourceGroupName is the name of the resource group containing the Traffic +// Manager profile. profileName is the name of the Traffic Manager profile. +func (client ProfilesClient) Get(resourceGroupName string, profileName string) (result Profile, err error) { + req, err := client.GetPreparer(resourceGroupName, profileName) + if err != nil { + return result, autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ProfilesClient) GetPreparer(resourceGroupName string, profileName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficmanagerprofiles/{profileName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ProfilesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ProfilesClient) GetResponder(resp *http.Response) (result Profile, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAll lists all Traffic Manager profiles within a subscription. +func (client ProfilesClient) ListAll() (result ProfileListResult, err error) { + req, err := client.ListAllPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "ListAll", nil, "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "ListAll", resp, "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "ListAll", resp, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client ProfilesClient) ListAllPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/trafficmanagerprofiles", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client ProfilesClient) ListAllSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client ProfilesClient) ListAllResponder(resp *http.Response) (result ProfileListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllInResourceGroup lists all Traffic Manager profiles within a resource +// group. +// +// resourceGroupName is the name of the resource group containing the Traffic +// Manager profiles to be listed. +func (client ProfilesClient) ListAllInResourceGroup(resourceGroupName string) (result ProfileListResult, err error) { + req, err := client.ListAllInResourceGroupPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "ListAllInResourceGroup", nil, "Failure preparing request") + } + + resp, err := client.ListAllInResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "ListAllInResourceGroup", resp, "Failure sending request") + } + + result, err = client.ListAllInResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "ListAllInResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListAllInResourceGroupPreparer prepares the ListAllInResourceGroup request. +func (client ProfilesClient) ListAllInResourceGroupPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficmanagerprofiles", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAllInResourceGroupSender sends the ListAllInResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client ProfilesClient) ListAllInResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAllInResourceGroupResponder handles the response to the ListAllInResourceGroup request. The method always +// closes the http.Response Body. +func (client ProfilesClient) ListAllInResourceGroupResponder(resp *http.Response) (result ProfileListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update update a Traffic Manager profile. +// +// resourceGroupName is the name of the resource group containing the Traffic +// Manager profile. profileName is the name of the Traffic Manager profile. +// parameters is the Traffic Manager profile parameters supplied to the +// Update operation. +func (client ProfilesClient) Update(resourceGroupName string, profileName string, parameters Profile) (result Profile, err error) { + req, err := client.UpdatePreparer(resourceGroupName, profileName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "Update", nil, "Failure preparing request") + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "Update", resp, "Failure sending request") + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "trafficmanager.ProfilesClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client ProfilesClient) UpdatePreparer(resourceGroupName string, profileName string, parameters Profile) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "profileName": autorest.Encode("path", profileName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/trafficmanagerprofiles/{profileName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client ProfilesClient) UpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client ProfilesClient) UpdateResponder(resp *http.Response) (result Profile, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/version.go new file mode 100644 index 000000000..7b1fb23d0 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/version.go @@ -0,0 +1,43 @@ +package trafficmanager + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "3" + minor = "0" + patch = "0" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "trafficmanager", "2015-11-01") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff --git a/vendor/vendor.json b/vendor/vendor.json index bacde77c5..daf25824c 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -44,6 +44,12 @@ "revision": "2cdbb8553a20830507e4178b4d0803794136dde7", "revisionTime": "2016-06-29T16:19:23Z" }, + { + "checksumSHA1": "2qCsDmxUSe6LzkuC9+rTK9wEPBg=", + "path": "github.com/Azure/azure-sdk-for-go/arm/trafficmanager", + "revision": "2cdbb8553a20830507e4178b4d0803794136dde7", + "revisionTime": "2016-06-29T16:19:23Z" + }, { "checksumSHA1": "Q+0Zz0iylSKMck4JhYc8XR83i8M=", "comment": "v2.1.1-beta-8-gca4d906", diff --git a/website/source/docs/providers/azurerm/r/traffic_manager_endpoint.html.markdown b/website/source/docs/providers/azurerm/r/traffic_manager_endpoint.html.markdown new file mode 100644 index 000000000..458cb8beb --- /dev/null +++ b/website/source/docs/providers/azurerm/r/traffic_manager_endpoint.html.markdown @@ -0,0 +1,111 @@ +--- +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_traffic_manager_endpoint" +sidebar_current: "docs-azurerm-resource-network-traffic-manager-endpoint" +description: |- + Creates a Traffic Manager Endpoint. +--- + +# azurerm\_traffic\_manager\_endpoint + +Creates a Traffic Manager Endpoint. + +## Example Usage + +``` +resource "azurerm_traffic_manager_profile" "test" { + name = "profile1" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "West US" + + traffic_routing_method = "Weighted" + + dns_config { + relative_name = "profile1" + ttl = 100 + } + + monitor_config { + protocol = "http" + port = 80 + path = "/" + } + + tags { + environment = "Production" + } +} + +resource "azurerm_traffic_manager_endpoint" "test" { + name = "profile1" + resource_group_name = "${azurerm_resource_group.test.name}" + profile_name = "${azurerm_traffic_manager_profile.test.name}" + target = "terraform.io" + type = "externalEndpoints" + weight = 100 +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the virtual network. Changing this forces a + new resource to be created. + +* `resource_group_name` - (Required) The name of the resource group in which to + create the virtual network. + +* `profile_name` - (Required) The name of the Traffic Manager Profile to attach + create the virtual network. + +* `endpoint_status` - (Optional) The status of the Endpoint, can be set to + either `Enabled` or `Disabled`. Defaults to `Enabled`. + +* `type` - (Required) The Endpoint type, must be one of: + - `azureEndpoints` + - `externalEndpoints` + - `nestedEndpoints` + +* `target` - (Optional) The FQDN DNS name of the target. This argument must be + provided for an endpoint of type `externalEndpoints`, for other types it + will be computed. + +* `target_resource_id` - (Optional) The resource id of an Azure resource to + target. This argument must be provided for an endpoint of type + `azureEndpoints`. + +* `weight` - (Optional) Specifies how much traffic should be distributed to this + endpoint, this must be specified for Profiles using the `Weighted` traffic + routing method. Supports values between 1 and 1000. + +* `priority` - (Optional) Specifies the priority of this Endpoint, this must be + specified for Profiles using the `Priority` traffic routing method. Supports + values between 1 and 1000, with no Endpoints sharing the same value. If + omitted the value will be computed in order of creation. + +* `endpoint_location` - (Optional) Specifies the Azure location of the Endpoint, + this must be specified for Profiles using the `Performance` routing method + if the Endpoint is of either type `nestedEndpoints` or `externalEndpoints`. + For Endpoints of type `azureEndpoints` the value will be taken from the + location of the Azure target resource. + +* `min_child_endpoints` - (Optional) This argument specifies the minimum number + of endpoints that must be ‘online’ in the child profile in order for the + parent profile to direct traffic to any of the endpoints in that child + profile. This argument only applies to Endpoints of type `nestedEndpoints` + and defaults to `1`. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The Traffic Manager Endpoint id. + +## Import + +Traffic Manager Endpoints can be imported using the `resource id`, e.g. + +``` +terraform import azurerm_traffic_manager_endpoint.testEndpoints /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/trafficManagerProfiles/mytrafficmanagerprofile1/azureEndpoints/mytrafficmanagerendpoint +``` \ No newline at end of file diff --git a/website/source/docs/providers/azurerm/r/traffic_manager_profile.html.markdown b/website/source/docs/providers/azurerm/r/traffic_manager_profile.html.markdown new file mode 100644 index 000000000..fce1f68de --- /dev/null +++ b/website/source/docs/providers/azurerm/r/traffic_manager_profile.html.markdown @@ -0,0 +1,104 @@ +--- +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_traffic_manager_profile" +sidebar_current: "docs-azurerm-resource-network-traffic-manager-profile" +description: |- + Creates a Traffic Manager Profile. +--- + +# azurerm\_traffic\_manager\_profile + +Creates a Traffic Manager Profile to which multiple endpoints can be attached. + +## Example Usage + +``` +resource "azurerm_traffic_manager_profile" "test" { + name = "profile1" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "West US" + + traffic_routing_method = "Weighted" + + dns_config { + relative_name = "profile1" + ttl = 100 + } + + monitor_config { + protocol = "http" + port = 80 + path = "/" + } + + tags { + environment = "Production" + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the virtual network. Changing this forces a + new resource to be created. + +* `resource_group_name` - (Required) The name of the resource group in which to + create the virtual network. + +* `profile_status` - (Optional) The status of the profile, can be set to either + `Enabled` or `Disabled`. Defaults to `Enabled`. + +* `traffic_routing_method` - (Required) Specifies the algorithm used to route + traffic, possible values are: + - `Performance`- Traffic is routed via the User's closest Endpoint + - `Weighted` - Traffic is spread across Endpoints proportional to their + `weight` value. + - `Priority` - Traffic is routed to the Endpoint with the lowest + `priority` value. + +* `dns_config` - (Required) This block specifies the DNS configuration of the + Profile, it supports the fields documented below. + +* `monitor_config` - (Required) This block specifies the Endpoint monitoring + configuration for the Profile, it supports the fields documented below. + +* `tags` - (Optional) A mapping of tags to assign to the resource. + +The `dns_config` block supports: + +* `relative_name` - (Required) The relative domain name, this is combined with + the domain name used by Traffic Manager to form the FQDN which is exported + as documented below. Changing this forces a new resource to be created. + +* `ttl` - (Required) The TTL value of the Profile used by Local DNS resolvers + and clients. + +The `monitor_config` block supports: + +* `http` - (Required) The protocol used by the monitoring checks, supported + values are `http` or `https`. + +* `port` - (Required) The port number used by the monitoring checks. + +* `path` - (Required) The path used by the monitoring checks. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The Traffic Manager Profile id. +* `fqdn` - The FQDN of the created Profile. + +## Notes + +The Traffic Manager is created with the location `global`. + +## Import + +Traffic Manager Profiles can be imported using the `resource id`, e.g. + +``` +terraform import azurerm_traffic_manager_profile.testProfile /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/trafficManagerProfiles/mytrafficmanagerprofile1 +``` \ No newline at end of file diff --git a/website/source/layouts/azurerm.erb b/website/source/layouts/azurerm.erb index d2ace25b8..df3bcea46 100644 --- a/website/source/layouts/azurerm.erb +++ b/website/source/layouts/azurerm.erb @@ -114,6 +114,14 @@ azurerm_route + > + azurerm_traffic_manager_profile + + + > + azurerm_traffic_manager_endpoint + + From d85007b55a3b7553b7f855241a392e0596895144 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 1 Aug 2016 08:50:34 +1000 Subject: [PATCH 0489/1238] Update CHANGELOG.md --- CHANGELOG.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b5a62545f..2f4319aee 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -83,7 +83,6 @@ FEATURES: * **New Resource:** `openstack_lb_monitor_v2` [GH-7012] * **New Resource:** `vsphere_virtual_disk` [GH-6273] * **New Resource:** `github_repository_collaborator` [GH-6861] - * **New Resource:** `azurerm_virtual_machine_scale_set` [GH-6711] * **New Resource:** `datadog_timeboard` [GH-6900] * **New Resource:** `digitalocean_tag` [GH-7500] * **New Resource:** `digitalocean_volume` [GH-7560] @@ -94,6 +93,9 @@ FEATURES: * **New Resource:** `mysql_grant` [GH-7656] * **New Resource:** `mysql_user` [GH-7656] * **New Resource:** `azurerm_storage_table` [GH-7327] + * **New Resource:** `azurerm_virtual_machine_scale_set` [GH-6711] + * **New Resource:** `azurerm_traffic_manager_endpoint` [GH-7826] + * **New Resource:** `azurerm_traffic_manager_profile` [GH-7826] * core: Tainted resources now show up in the plan and respect dependency ordering [GH-6600] * core: The `lookup` interpolation function can now have a default fall-back value specified [GH-6884] * core: The `terraform plan` command no longer persists state. [GH-6811] From 91596b4a7190042c1bdd07172e21f9aef88afc68 Mon Sep 17 00:00:00 2001 From: stack72 Date: Tue, 2 Aug 2016 01:36:17 +1000 Subject: [PATCH 0490/1238] provider/aws: Support Import of `aws_db_security_group` This test overrides the AWS_DEFAULT_REGION parameter as the security groups are created in us-east-1 (due to classic VPC requirements) ``` % make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSDBSecurityGroup_importBasic' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSDBSecurityGroup_importBasic -timeout 120m === RUN TestAccAWSDBSecurityGroup_importBasic --- PASS: TestAccAWSDBSecurityGroup_importBasic (49.46s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 49.487s ``` --- .../aws/import_aws_db_security_group_test.go | 33 +++++++++++++++++++ .../aws/resource_aws_db_security_group.go | 3 ++ .../aws/r/db_security_group.html.markdown | 8 +++++ .../aws/r/rds_cluster_instance.html.markdown | 2 +- .../r/rds_cluster_parameter_group.markdown | 2 +- 5 files changed, 46 insertions(+), 2 deletions(-) create mode 100644 builtin/providers/aws/import_aws_db_security_group_test.go diff --git a/builtin/providers/aws/import_aws_db_security_group_test.go b/builtin/providers/aws/import_aws_db_security_group_test.go new file mode 100644 index 000000000..57447c5a5 --- /dev/null +++ b/builtin/providers/aws/import_aws_db_security_group_test.go @@ -0,0 +1,33 @@ +package aws + +import ( + "os" + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAWSDBSecurityGroup_importBasic(t *testing.T) { + oldvar := os.Getenv("AWS_DEFAULT_REGION") + os.Setenv("AWS_DEFAULT_REGION", "us-east-1") + defer os.Setenv("AWS_DEFAULT_REGION", oldvar) + + resourceName := "aws_db_security_group.bar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSDBSecurityGroupDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSDBSecurityGroupConfig, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/aws/resource_aws_db_security_group.go b/builtin/providers/aws/resource_aws_db_security_group.go index 0b396f65b..3cb9693ef 100644 --- a/builtin/providers/aws/resource_aws_db_security_group.go +++ b/builtin/providers/aws/resource_aws_db_security_group.go @@ -23,6 +23,9 @@ func resourceAwsDbSecurityGroup() *schema.Resource { Read: resourceAwsDbSecurityGroupRead, Update: resourceAwsDbSecurityGroupUpdate, Delete: resourceAwsDbSecurityGroupDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "arn": &schema.Schema{ diff --git a/website/source/docs/providers/aws/r/db_security_group.html.markdown b/website/source/docs/providers/aws/r/db_security_group.html.markdown index 3faf674c6..feb88142e 100644 --- a/website/source/docs/providers/aws/r/db_security_group.html.markdown +++ b/website/source/docs/providers/aws/r/db_security_group.html.markdown @@ -49,3 +49,11 @@ The following attributes are exported: * `id` - The db security group ID. * `arn` - The arn of the DB security group. + +## Import + +DB Security groups can be imported using the `name`, e.g. + +``` +$ terraform import aws_db_security_group.default aws_rds_sg-1 +``` diff --git a/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown b/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown index 53df2a3c1..79fca6445 100644 --- a/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown +++ b/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown @@ -95,7 +95,7 @@ this instance is a read replica ## Import -Redshift Cluster Instances can be imported using the `identifier`, e.g. +RDS Cluster Instances can be imported using the `identifier`, e.g. ``` $ terraform import aws_rds_cluster_instance.prod_instance_1 aurora-cluster-instance-1 diff --git a/website/source/docs/providers/aws/r/rds_cluster_parameter_group.markdown b/website/source/docs/providers/aws/r/rds_cluster_parameter_group.markdown index 085dd38a6..191c7b2b4 100644 --- a/website/source/docs/providers/aws/r/rds_cluster_parameter_group.markdown +++ b/website/source/docs/providers/aws/r/rds_cluster_parameter_group.markdown @@ -56,7 +56,7 @@ The following attributes are exported: ## Import -Redshift Clusters can be imported using the `name`, e.g. +RDS Cluster Parameter Groups can be imported using the `name`, e.g. ``` $ terraform import aws_rds_cluster_parameter_group.cluster_pg production-pg-1 From 8bb7e619b59153e4734aaf9d4fe9deb21a673f0c Mon Sep 17 00:00:00 2001 From: stack72 Date: Tue, 2 Aug 2016 02:01:20 +1000 Subject: [PATCH 0491/1238] provider/aws: Support Import of `aws_redshift_security_group` ``` % make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSRedshiftSecurityGroup_' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSRedshiftSecurityGroup_ -timeout 120m === RUN TestAccAWSRedshiftSecurityGroup_importBasic --- PASS: TestAccAWSRedshiftSecurityGroup_importBasic (30.87s) === RUN TestAccAWSRedshiftSecurityGroup_ingressCidr --- PASS: TestAccAWSRedshiftSecurityGroup_ingressCidr (30.45s) === RUN TestAccAWSRedshiftSecurityGroup_updateIngressCidr --- PASS: TestAccAWSRedshiftSecurityGroup_updateIngressCidr (72.78s) === RUN TestAccAWSRedshiftSecurityGroup_ingressSecurityGroup --- PASS: TestAccAWSRedshiftSecurityGroup_ingressSecurityGroup (49.73s) === RUN TestAccAWSRedshiftSecurityGroup_updateIngressSecurityGroup --- PASS: TestAccAWSRedshiftSecurityGroup_updateIngressSecurityGroup (92.44s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 276.291s ``` --- ...import_aws_redshift_security_group_test.go | 33 +++++++++++++++++++ .../resource_aws_redshift_security_group.go | 3 ++ .../r/redshift_security_group.html.markdown | 7 ++++ 3 files changed, 43 insertions(+) create mode 100644 builtin/providers/aws/import_aws_redshift_security_group_test.go diff --git a/builtin/providers/aws/import_aws_redshift_security_group_test.go b/builtin/providers/aws/import_aws_redshift_security_group_test.go new file mode 100644 index 000000000..1d304b5b6 --- /dev/null +++ b/builtin/providers/aws/import_aws_redshift_security_group_test.go @@ -0,0 +1,33 @@ +package aws + +import ( + "os" + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAWSRedshiftSecurityGroup_importBasic(t *testing.T) { + oldvar := os.Getenv("AWS_DEFAULT_REGION") + os.Setenv("AWS_DEFAULT_REGION", "us-east-1") + defer os.Setenv("AWS_DEFAULT_REGION", oldvar) + + resourceName := "aws_redshift_security_group.bar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSRedshiftSecurityGroupDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSRedshiftSecurityGroupConfig_ingressCidr, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/aws/resource_aws_redshift_security_group.go b/builtin/providers/aws/resource_aws_redshift_security_group.go index 81195c164..84de59669 100644 --- a/builtin/providers/aws/resource_aws_redshift_security_group.go +++ b/builtin/providers/aws/resource_aws_redshift_security_group.go @@ -22,6 +22,9 @@ func resourceAwsRedshiftSecurityGroup() *schema.Resource { Read: resourceAwsRedshiftSecurityGroupRead, Update: resourceAwsRedshiftSecurityGroupUpdate, Delete: resourceAwsRedshiftSecurityGroupDelete, + Importer: &schema.ResourceImporter{ + State: resourceAwsRedshiftClusterImport, + }, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ diff --git a/website/source/docs/providers/aws/r/redshift_security_group.html.markdown b/website/source/docs/providers/aws/r/redshift_security_group.html.markdown index 79918c632..d3a83c4ce 100644 --- a/website/source/docs/providers/aws/r/redshift_security_group.html.markdown +++ b/website/source/docs/providers/aws/r/redshift_security_group.html.markdown @@ -43,3 +43,10 @@ The following attributes are exported: * `id` - The Redshift security group ID. +## Import + +Redshift security groups can be imported using the `name`, e.g. + +``` +$ terraform import aws_redshift_security_group.testgroup1 redshift_test_group +``` From c15c0eb0cbac2158d442b31985890d3e9cf6dafa Mon Sep 17 00:00:00 2001 From: James Bardin Date: Mon, 1 Aug 2016 15:06:20 -0400 Subject: [PATCH 0492/1238] Disallow strings as arguments to concat The concat interpolation function now only accepts list arguments. Strings are no longer supported, for concatenation or appending to lists. All arguments must be a list, and single elements can be promoted with the `list` interpolation function. --- config/interpolate_funcs.go | 4 +--- config/interpolate_funcs_test.go | 25 ++++++++++++++++++------- 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/config/interpolate_funcs.go b/config/interpolate_funcs.go index 87e9de778..8470b38e2 100644 --- a/config/interpolate_funcs.go +++ b/config/interpolate_funcs.go @@ -320,8 +320,6 @@ func interpolationFuncConcat() ast.Function { for _, arg := range args { switch arg := arg.(type) { - case string: - outputList = append(outputList, ast.Variable{Type: ast.TypeString, Value: arg}) case []ast.Variable: for _, v := range arg { switch v.Type { @@ -337,7 +335,7 @@ func interpolationFuncConcat() ast.Function { } default: - return nil, fmt.Errorf("concat() does not support %T", arg) + return nil, fmt.Errorf("concat() does not support type %T", arg) } } diff --git a/config/interpolate_funcs_test.go b/config/interpolate_funcs_test.go index 11450f668..0d4f38d9c 100644 --- a/config/interpolate_funcs_test.go +++ b/config/interpolate_funcs_test.go @@ -362,17 +362,19 @@ func TestInterpolateFuncConcat(t *testing.T) { testFunction(t, testFunctionConfig{ Cases: []testFunctionCase{ // String + list + // no longer supported, now returns an error { `${concat("a", split(",", "b,c"))}`, - []interface{}{"a", "b", "c"}, - false, + nil, + true, }, // List + string + // no longer supported, now returns an error { `${concat(split(",", "a,b"), "c")}`, - []interface{}{"a", "b", "c"}, - false, + nil, + true, }, // Single list @@ -427,6 +429,14 @@ func TestInterpolateFuncConcat(t *testing.T) { false, }, + // multiple strings + // no longer supported, now returns an error + { + `${concat("string1", "string2")}`, + nil, + true, + }, + // mismatched types { `${concat("${var.lists}", "${var.maps}")}`, @@ -1532,15 +1542,16 @@ type testFunctionCase struct { func testFunction(t *testing.T, config testFunctionConfig) { for i, tc := range config.Cases { + fmt.Println("running", i) ast, err := hil.Parse(tc.Input) if err != nil { - t.Fatalf("Case #%d: input: %#v\nerr: %s", i, tc.Input, err) + t.Fatalf("Case #%d: input: %#v\nerr: %v", i, tc.Input, err) } result, err := hil.Eval(ast, langEvalConfig(config.Vars)) - t.Logf("err: %s", err) + t.Logf("err: %v", err) if err != nil != tc.Error { - t.Fatalf("Case #%d:\ninput: %#v\nerr: %s", i, tc.Input, err) + t.Fatalf("Case #%d:\ninput: %#v\nerr: %v", i, tc.Input, err) } if !reflect.DeepEqual(result.Value, tc.Result) { From c9e15221035670f75fd094696ebe704e42eb3aae Mon Sep 17 00:00:00 2001 From: James Bardin Date: Mon, 1 Aug 2016 15:24:18 -0400 Subject: [PATCH 0493/1238] Use HIL to limit concat to ast.TypeList we can remove some type checks in the concat function --- config/interpolate_funcs.go | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-) diff --git a/config/interpolate_funcs.go b/config/interpolate_funcs.go index 8470b38e2..099369064 100644 --- a/config/interpolate_funcs.go +++ b/config/interpolate_funcs.go @@ -311,31 +311,25 @@ func interpolationFuncCoalesce() ast.Function { // multiple lists. func interpolationFuncConcat() ast.Function { return ast.Function{ - ArgTypes: []ast.Type{ast.TypeAny}, + ArgTypes: []ast.Type{ast.TypeList}, ReturnType: ast.TypeList, Variadic: true, - VariadicType: ast.TypeAny, + VariadicType: ast.TypeList, Callback: func(args []interface{}) (interface{}, error) { var outputList []ast.Variable for _, arg := range args { - switch arg := arg.(type) { - case []ast.Variable: - for _, v := range arg { - switch v.Type { - case ast.TypeString: - outputList = append(outputList, v) - case ast.TypeList: - outputList = append(outputList, v) - case ast.TypeMap: - outputList = append(outputList, v) - default: - return nil, fmt.Errorf("concat() does not support lists of %s", v.Type.Printable()) - } + for _, v := range arg.([]ast.Variable) { + switch v.Type { + case ast.TypeString: + outputList = append(outputList, v) + case ast.TypeList: + outputList = append(outputList, v) + case ast.TypeMap: + outputList = append(outputList, v) + default: + return nil, fmt.Errorf("concat() does not support lists of %s", v.Type.Printable()) } - - default: - return nil, fmt.Errorf("concat() does not support type %T", arg) } } From 8c48fb94a57de5baeb9a876db053b523813b8c02 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Mon, 1 Aug 2016 14:37:43 -0500 Subject: [PATCH 0494/1238] website: v0.7 Upgrade Guide --- website/source/layouts/downloads.erb | 8 ++ .../source/upgrade-guides/0-7.html.markdown | 135 ++++++++++++++++++ .../source/upgrade-guides/index.html.markdown | 13 ++ 3 files changed, 156 insertions(+) create mode 100644 website/source/upgrade-guides/0-7.html.markdown create mode 100644 website/source/upgrade-guides/index.html.markdown diff --git a/website/source/layouts/downloads.erb b/website/source/layouts/downloads.erb index db718c0e2..27b73b0fa 100644 --- a/website/source/layouts/downloads.erb +++ b/website/source/layouts/downloads.erb @@ -5,6 +5,14 @@ > Download Terraform + + > + Upgrade Guides + <% end %> diff --git a/website/source/upgrade-guides/0-7.html.markdown b/website/source/upgrade-guides/0-7.html.markdown new file mode 100644 index 000000000..cf873fab1 --- /dev/null +++ b/website/source/upgrade-guides/0-7.html.markdown @@ -0,0 +1,135 @@ +--- +layout: "downloads" +page_title: "Upgrading to Terraform 0.7" +sidebar_current: "upgrade-guides-0-7" +description: |- + Upgrading to Terraform v0.7 +--- + +# Upgrading to Terraform v0.7 + +Terraform v0.7 is a major release, and thus includes some backwards incompatibilities that you'll need to consider when upgrading. This guide is meant to help with that process. + +The goal of this guide is to cover the most common upgrade concerns and issues that would benefit from more explanation and background. The exhaustive list of changes will always be the [Terraform Changelog](https://github.com/hashicorp/terraform/blob/master/CHANGELOG.md). After reviewing this guide, review the Changelog to check on specific notes about the resources and providers you use. + +## Plugin Binaries + +Before v0.7, Terraform's built-in plugins for providers and provisioners were each distributed as separate binaries. + +``` +terraform # core binary +terraform-provider-* # provider plugins +terraform-provisioner-* # provisioner plugins +``` + +These binaries needed to all be extracted to somewhere on your `$PATH` for Terraform to work. + +As of v0.7, these plugins all ship embedded in a single binary. This means that if you just extract the v0.7 archive into a path, you may still have the old separate binaries in your `$PATH`. You'll need to remove them manually. + +For example, if you keep Terraform binaries in `/usr/local/bin` you can clear out the old external binaries like this: + +``` +rm /usr/local/bin/terraform-* +``` + +External plugin binaries continue to work using the same pattern, but due to updates to the RPC protocol, they will need to be recompiled to be compatible with Terraform v0.7. + +## Maps in Displayed Plans + +When displaying a plan, Terraform now distinguishes attributes of type map by using a `%` character for the "length field". + +Here is an example showing a diff that includes both a list and a map: + +``` +somelist.#: "0" => "1" +somelist.0: "" => "someitem" +somemap.%: "0" => "1" +somemap.foo: "" => "bar" +``` + +## Interpolation Changes + +There are a few changes to Terraform's interpolation language that may require updates to your configs. + +### String Concatenation + +The `concat()` interpolation function used to work for both lists and strings. It now only works for lists. + +``` +"${concat(var.foo, "-suffix")}" # => Error! No longer supported. +``` + +Instead, you can use nested interpolation braces for string concatenation. + +``` +"${"${var.foo}-suffix"}" +``` + +### Nested Quotes and Escaping + +Escaped quotes inside of interpolations were supported to retain backwards compatibility with older versions of Terraform that allowed them. + +Now, escaped quotes will no longer work in the interpolation context: + +``` +"${lookup(var.somemap, \"somekey\")}" # => Syntax Error! +``` + +Instead, treat each set of interpolation braces (`${}`) as a new quoting context: + +``` +"${lookup(var.somemap, "somekey")}" +``` + +This allows double quote characters to be expressed properly within strings inside of interpolation expressions: + +``` +"${upper("\"quoted\"")}" # => "QUOTED" +``` + +## Safer `terraform plan` Behavior + +Prior to v0.7, the `terraform plan` command had the potential to write updates to the state if changes were detected during the Refresh step (which happens by default during `plan`). Some configurations have metadata that changes which every read, so Refresh would always result in changes to the state, and therefore a write. + +In collaborative enviroments with shared remote state, this potential side effect of `plan` would cause unnecessary contention over the state, and potentially even interfere with active `apply` operations if they were happening simultaneously elsewhere. + +Terraform v0.7 addresses this by changing the Refresh process that is run during `terraform plan` to always be an in-memory only refresh. New state information detected during this step will not be persisted to permanent state storage. + +If the `-out` flag is used to produce a Plan File, the updated state information _will_ be encoded into that file, so that the resulting `terraform apply` operation can detect if any changes occurred that might invalidate the plan. + +For most users, this change will not affect your day-to-day usage of Terraform. For users with automation that relies on the old side effect of `plan`, you can use the `terraform refresh` command, which will still persist any changes it discovers. + +## Migrating to Data Sources + +With the addition of [Data Sources](/docs/configuration/data-sources.html), there are several resources that were acting as Data Sources that are now deprecated. + + * `atlas_artifact` + * `template_file` + * `template_cloudinit_config` + * `tls_cert_request` + +Migrating to the equivalent Data Source is as simple as changing the `resource` keyword to `data` in your declaration and prepending `data.` to attribute references elsewhere in your config. + +For example, given a config like: + +``` +resource "template_file" "example" { + template = "someconfig" +} +resource "aws_instance" "example" { + user_data = "${template_file.example.rendered}" + # ... +} +``` + +A config using the equivalent Data Source would look like this: + +``` +data "template_file" "example" { + template = "someconfig" +} +resource "aws_instance" "example" { + user_data = "${data.template_file.example.rendered}" + # ... +} +``` diff --git a/website/source/upgrade-guides/index.html.markdown b/website/source/upgrade-guides/index.html.markdown new file mode 100644 index 000000000..f8308dbb4 --- /dev/null +++ b/website/source/upgrade-guides/index.html.markdown @@ -0,0 +1,13 @@ +--- +layout: "downloads" +page_title: "Upgrade Guides" +sidebar_current: "upgrade-guides" +description: |- + Upgrade Guides +--- + +# Upgrade Guides + +Terraform's major releases can include an upgrade guide to help upgrading users +walk through backwards compatibility issues and changes to expect. See the +navigation for the available upgrade guides. From 1af7ee87a27e8ce19e92a5cee14aa7279e434d25 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Mon, 1 Aug 2016 17:16:22 -0400 Subject: [PATCH 0495/1238] Silence log output when not verbose Set the default log package output to iotuil.Discard during tests if the `-v` flag isn't set. If we are verbose, then apply the filter according to the TF_LOG env variable. --- command/command_test.go | 16 ++++++++++++++++ config/config_test.go | 19 +++++++++++++++++++ dag/dag_test.go | 19 +++++++++++++++++++ helper/logging/logging.go | 16 ++++++++++++++++ terraform/terraform_test.go | 16 ++++++++++++++++ 5 files changed, 86 insertions(+) diff --git a/command/command_test.go b/command/command_test.go index 14e19ee3b..174d439ac 100644 --- a/command/command_test.go +++ b/command/command_test.go @@ -1,7 +1,9 @@ package command import ( + "flag" "io/ioutil" + "log" "os" "path/filepath" "strings" @@ -9,6 +11,7 @@ import ( "github.com/hashicorp/go-getter" "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/helper/logging" "github.com/hashicorp/terraform/terraform" ) @@ -27,6 +30,19 @@ func init() { } } +func TestMain(m *testing.M) { + flag.Parse() + if testing.Verbose() { + // if we're verbose, use the logging requested by TF_LOG + logging.SetOutput() + } else { + // otherwise silence all logs + log.SetOutput(ioutil.Discard) + } + + os.Exit(m.Run()) +} + func tempDir(t *testing.T) string { dir, err := ioutil.TempDir("", "tf") if err != nil { diff --git a/config/config_test.go b/config/config_test.go index 6d6de1422..66f712f67 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1,15 +1,34 @@ package config import ( + "flag" + "io/ioutil" + "log" + "os" "path/filepath" "reflect" "strings" "testing" + + "github.com/hashicorp/terraform/helper/logging" ) // This is the directory where our test fixtures are. const fixtureDir = "./test-fixtures" +func TestMain(m *testing.M) { + flag.Parse() + if testing.Verbose() { + // if we're verbose, use the logging requested by TF_LOG + logging.SetOutput() + } else { + // otherwise silence all logs + log.SetOutput(ioutil.Discard) + } + + os.Exit(m.Run()) +} + func TestConfigCopy(t *testing.T) { c := testConfig(t, "copy-basic") rOrig := c.Resources[0] diff --git a/dag/dag_test.go b/dag/dag_test.go index 2d17a8c37..4f8aa3d0d 100644 --- a/dag/dag_test.go +++ b/dag/dag_test.go @@ -1,13 +1,32 @@ package dag import ( + "flag" "fmt" + "io/ioutil" + "log" + "os" "reflect" "strings" "sync" "testing" + + "github.com/hashicorp/terraform/helper/logging" ) +func TestMain(m *testing.M) { + flag.Parse() + if testing.Verbose() { + // if we're verbose, use the logging requested by TF_LOG + logging.SetOutput() + } else { + // otherwise silence all logs + log.SetOutput(ioutil.Discard) + } + + os.Exit(m.Run()) +} + func TestAcyclicGraphRoot(t *testing.T) { var g AcyclicGraph g.Add(1) diff --git a/helper/logging/logging.go b/helper/logging/logging.go index bff2c7254..969a6f151 100644 --- a/helper/logging/logging.go +++ b/helper/logging/logging.go @@ -47,6 +47,22 @@ func LogOutput() (logOutput io.Writer, err error) { return } +// SetOutput checks for a log destination with LogOutput, and calls +// log.SetOutput with the result. If LogOutput returns nil, SetOutput uses +// ioutil.Discard. Any error from LogOutout is fatal. +func SetOutput() { + out, err := LogOutput() + if err != nil { + log.Fatal(err) + } + + if out == nil { + out = ioutil.Discard + } + + log.SetOutput(out) +} + // LogLevel returns the current log level string based the environment vars func LogLevel() string { envLevel := os.Getenv(EnvLog) diff --git a/terraform/terraform_test.go b/terraform/terraform_test.go index fbcf6c61e..7f81bf05a 100644 --- a/terraform/terraform_test.go +++ b/terraform/terraform_test.go @@ -1,9 +1,11 @@ package terraform import ( + "flag" "fmt" "io" "io/ioutil" + "log" "os" "path/filepath" "strings" @@ -13,11 +15,25 @@ import ( "github.com/hashicorp/go-getter" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config/module" + "github.com/hashicorp/terraform/helper/logging" ) // This is the directory where our test fixtures are. const fixtureDir = "./test-fixtures" +func TestMain(m *testing.M) { + flag.Parse() + if testing.Verbose() { + // if we're verbose, use the logging requested by TF_LOG + logging.SetOutput() + } else { + // otherwise silence all logs + log.SetOutput(ioutil.Discard) + } + + os.Exit(m.Run()) +} + func tempDir(t *testing.T) string { dir, err := ioutil.TempDir("", "tf") if err != nil { From 39bbbb8da6230f517f9e992cc591942d823a0da6 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Mon, 1 Aug 2016 18:10:53 -0400 Subject: [PATCH 0496/1238] Add merge interpolation function Add a `merge` interpolation function, which merges any number of maps. Duplicate keys are OK, with the last write winning. --- config/interpolate_funcs.go | 21 +++++++ config/interpolate_funcs_test.go | 98 +++++++++++++++++++++++++++++++- 2 files changed, 118 insertions(+), 1 deletion(-) diff --git a/config/interpolate_funcs.go b/config/interpolate_funcs.go index 099369064..d66073ec6 100644 --- a/config/interpolate_funcs.go +++ b/config/interpolate_funcs.go @@ -73,6 +73,7 @@ func Funcs() map[string]ast.Function { "lower": interpolationFuncLower(), "map": interpolationFuncMap(), "md5": interpolationFuncMd5(), + "merge": interpolationFuncMerge(), "uuid": interpolationFuncUUID(), "replace": interpolationFuncReplace(), "sha1": interpolationFuncSha1(), @@ -898,6 +899,26 @@ func interpolationFuncMd5() ast.Function { } } +func interpolationFuncMerge() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeMap}, + ReturnType: ast.TypeMap, + Variadic: true, + VariadicType: ast.TypeMap, + Callback: func(args []interface{}) (interface{}, error) { + outputMap := make(map[string]ast.Variable) + + for _, arg := range args { + for k, v := range arg.(map[string]ast.Variable) { + outputMap[k] = v + } + } + + return outputMap, nil + }, + } +} + // interpolationFuncUpper implements the "upper" function that does // string upper casing. func interpolationFuncUpper() ast.Function { diff --git a/config/interpolate_funcs_test.go b/config/interpolate_funcs_test.go index 0d4f38d9c..0b77af328 100644 --- a/config/interpolate_funcs_test.go +++ b/config/interpolate_funcs_test.go @@ -498,6 +498,103 @@ func TestInterpolateFuncConcat(t *testing.T) { }) } +func TestInterpolateFuncMerge(t *testing.T) { + testFunction(t, testFunctionConfig{ + Cases: []testFunctionCase{ + // basic merge + { + `${merge(map("a", "b"), map("c", "d"))}`, + map[string]interface{}{"a": "b", "c": "d"}, + false, + }, + + // merge with conflicts is ok, last in wins. + { + `${merge(map("a", "b", "c", "X"), map("c", "d"))}`, + map[string]interface{}{"a": "b", "c": "d"}, + false, + }, + + // merge variadic + { + `${merge(map("a", "b"), map("c", "d"), map("e", "f"))}`, + map[string]interface{}{"a": "b", "c": "d", "e": "f"}, + false, + }, + + // merge with variables + { + `${merge(var.maps[0], map("c", "d"))}`, + map[string]interface{}{"key1": "a", "key2": "b", "c": "d"}, + false, + }, + + // only accept maps + { + `${merge(map("a", "b"), list("c", "d"))}`, + nil, + true, + }, + + // merge maps of maps + { + `${merge(map("a", var.maps[0]), map("b", var.maps[1]))}`, + map[string]interface{}{ + "b": map[string]interface{}{"key3": "d", "key4": "c"}, + "a": map[string]interface{}{"key1": "a", "key2": "b"}, + }, + false, + }, + // merge maps of lists + { + `${merge(map("a", list("b")), map("c", list("d", "e")))}`, + map[string]interface{}{"a": []interface{}{"b"}, "c": []interface{}{"d", "e"}}, + false, + }, + // merge map of various kinds + { + `${merge(map("a", var.maps[0]), map("b", list("c", "d")))}`, + map[string]interface{}{"a": map[string]interface{}{"key1": "a", "key2": "b"}, "b": []interface{}{"c", "d"}}, + false, + }, + }, + Vars: map[string]ast.Variable{ + "var.maps": { + Type: ast.TypeList, + Value: []ast.Variable{ + { + Type: ast.TypeMap, + Value: map[string]ast.Variable{ + "key1": { + Type: ast.TypeString, + Value: "a", + }, + "key2": { + Type: ast.TypeString, + Value: "b", + }, + }, + }, + { + Type: ast.TypeMap, + Value: map[string]ast.Variable{ + "key3": { + Type: ast.TypeString, + Value: "d", + }, + "key4": { + Type: ast.TypeString, + Value: "c", + }, + }, + }, + }, + }, + }, + }) + +} + func TestInterpolateFuncDistinct(t *testing.T) { testFunction(t, testFunctionConfig{ Cases: []testFunctionCase{ @@ -1542,7 +1639,6 @@ type testFunctionCase struct { func testFunction(t *testing.T, config testFunctionConfig) { for i, tc := range config.Cases { - fmt.Println("running", i) ast, err := hil.Parse(tc.Input) if err != nil { t.Fatalf("Case #%d: input: %#v\nerr: %v", i, tc.Input, err) From 0e36c130df87c3b0e31de3444566830289da328e Mon Sep 17 00:00:00 2001 From: James Nugent Date: Mon, 1 Aug 2016 18:27:54 -0500 Subject: [PATCH 0497/1238] docs: Add lists and map overrides to upgrade guide --- .../source/upgrade-guides/0-7.html.markdown | 69 +++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/website/source/upgrade-guides/0-7.html.markdown b/website/source/upgrade-guides/0-7.html.markdown index cf873fab1..30025a11d 100644 --- a/website/source/upgrade-guides/0-7.html.markdown +++ b/website/source/upgrade-guides/0-7.html.markdown @@ -133,3 +133,72 @@ resource "aws_instance" "example" { # ... } ``` + +## Migrating to native lists and maps + +Terraform 0.7 now supports lists and maps as first-class constructs. Although the patterns commonly used in previous versions still work (excepting any compatibility notes), there are now patterns with cleaner syntax available. + +For example, a common pattern for exporting a list of values from a module was to use an output with a `join()` interpolation, like this: + +``` +output "private_subnets" { + value = "${join(",", aws_subnet.private.*.id)}" +} +``` + +When using the value produced by this output in another module, a corresponding `split()` would be used to retrieve individual elements, often parameterized by `count.index`, for example: + +``` +subnet_id = "${element(split(",", var.private_subnets), count.index)}" +``` + +Using Terraform 0.7, list values can now be passed between modules directly. The above example can read like this for the output: + +``` +output "private_subnets" { + value = ["${aws_subnet.private.*.id}"] +} +``` + +And then when passed to another module as a `list` type variable, we can index directly using `[]` syntax: + +``` +subnet_id = "${var.private_subnets[count.index]}" +``` + +Note that indexing syntax does not wrap around if the extent of a list is reached - for example if you are trying to distribute 10 instances across three private subnets. For this behaviour, `element` can still be used: + +``` +subnet_id = "${element(var.private_subnets, count.index)}" +``` + +## Map value overrides + +Previously, individual elements in a map could be overriden by using a dot notation. For example, if the following variable was declared: + +``` +variable "amis" { + type = "map" + default = { + us-east-1 = "ami-123456" + us-west-2 = "ami-456789" + eu-west-1 = "ami-789123" + } +} +``` + +The key "us-west-2" could be overriden using `-var "amis.us-west-2=overriden_value"` (or equivalent in an environment variable or `tfvars` file). The syntax for this has now changed - instead maps from the command line will be merged with the default value, with maps from flags taking precedence. The syntax for overriding individual values is now: + +``` +-var 'amis = { us-west-2 = "overriden_value" }' +``` + +This will give the map the effective value: + +``` +{ + us-east-1 = "ami-123456" + us-west-2 = "overriden_value" + eu-west-1 = "ami-789123" +} +``` From f76c1ec9211a7a517d6fdbd1167814baa6ff2bbf Mon Sep 17 00:00:00 2001 From: James Nugent Date: Mon, 1 Aug 2016 18:54:17 -0500 Subject: [PATCH 0498/1238] provider/aws: Fix aws_route53_record 0-2 migration (#7907) When migrating the state of an `aws_route53_record`, a v0 state was never upgraded to v2, and a typo in a unit test masked this. This commit fixes the migration by chaining the invocation of the migration function, and corrects the test. --- .../providers/aws/resource_aws_route53_record_migrate.go | 8 ++++++-- .../aws/resource_aws_route53_record_migrate_test.go | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/builtin/providers/aws/resource_aws_route53_record_migrate.go b/builtin/providers/aws/resource_aws_route53_record_migrate.go index 5e81b5933..ad6cda9d3 100644 --- a/builtin/providers/aws/resource_aws_route53_record_migrate.go +++ b/builtin/providers/aws/resource_aws_route53_record_migrate.go @@ -12,8 +12,12 @@ func resourceAwsRoute53RecordMigrateState( v int, is *terraform.InstanceState, meta interface{}) (*terraform.InstanceState, error) { switch v { case 0: - log.Println("[INFO] Found AWS Route53 Record State v0; migrating to v1") - return migrateRoute53RecordStateV0toV1(is) + log.Println("[INFO] Found AWS Route53 Record State v0; migrating to v1 then v2") + v1InstanceState, err := migrateRoute53RecordStateV0toV1(is) + if err != nil { + return v1InstanceState, err + } + return migrateRoute53RecordStateV1toV2(v1InstanceState) case 1: log.Println("[INFO] Found AWS Route53 Record State v1; migrating to v2") return migrateRoute53RecordStateV1toV2(is) diff --git a/builtin/providers/aws/resource_aws_route53_record_migrate_test.go b/builtin/providers/aws/resource_aws_route53_record_migrate_test.go index 672b51761..6efe0a4fa 100644 --- a/builtin/providers/aws/resource_aws_route53_record_migrate_test.go +++ b/builtin/providers/aws/resource_aws_route53_record_migrate_test.go @@ -79,7 +79,7 @@ func TestAWSRoute53RecordMigrateStateV1toV2(t *testing.T) { }, }, "v0_2": { - StateVersion: 1, + StateVersion: 0, Attributes: map[string]string{ "weight": "-1", }, From c42121b6c0dc4a8aecdd3b33e61c5cb8ad08eae5 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Tue, 2 Aug 2016 09:57:22 +1000 Subject: [PATCH 0499/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2f4319aee..7f95bf9fd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -253,6 +253,7 @@ BUG FIXES: * provider/aws: Restore Defaults to SQS Queues [GH-7818] * provider/aws: Don't delete Lambda function from state on initial call of the Read func [GH-7829] * provider/aws: `aws_vpn_gateway` should be removed from state when in deleted state [GH-7861] + * provider/aws: Fix aws_route53_record 0-2 migration [GH-7907] * provider/azurerm: Fixes terraform crash when using SSH keys with `azurerm_virtual_machine` [GH-6766] * provider/azurerm: Fix a bug causing 'diffs do not match' on `azurerm_network_interface` resources [GH-6790] * provider/azurerm: Normalizes `availability_set_id` casing to avoid spurious diffs in `azurerm_virtual_machine` [GH-6768] From 068059ab3fe73909e1314e9618fc7f63c3552ca2 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Tue, 2 Aug 2016 09:33:08 -0400 Subject: [PATCH 0500/1238] Add `merge` doc --- website/source/docs/configuration/interpolation.html.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/website/source/docs/configuration/interpolation.html.md b/website/source/docs/configuration/interpolation.html.md index 2d3e42672..91de2341b 100644 --- a/website/source/docs/configuration/interpolation.html.md +++ b/website/source/docs/configuration/interpolation.html.md @@ -193,6 +193,11 @@ The supported built-in functions are: * `map("hello", "world")` * `map("us-east", list("a", "b", "c"), "us-west", list("b", "c", "d"))` + * `merge(map1, map2, ...)` - Returns the union of 2 or more maps. The maps + are consumed in the order provided, and duplciate keys overwrite previous + entries. + * `${merge(map("a", "b"), map("c", "d"))}` returns `{"a": "b", "c": "d"}` + * `md5(string)` - Returns a (conventional) hexadecimal representation of the MD5 hash of the given string. From 2a11f3a138fd6cef718ebd7026b69456ed5423ef Mon Sep 17 00:00:00 2001 From: James Bardin Date: Tue, 2 Aug 2016 09:34:29 -0400 Subject: [PATCH 0501/1238] make variadic syntax consistent in docs --- website/source/docs/configuration/interpolation.html.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/website/source/docs/configuration/interpolation.html.md b/website/source/docs/configuration/interpolation.html.md index 91de2341b..f7cd5108e 100644 --- a/website/source/docs/configuration/interpolation.html.md +++ b/website/source/docs/configuration/interpolation.html.md @@ -110,7 +110,7 @@ The supported built-in functions are: variables or when parsing module outputs. Example: `compact(module.my_asg.load_balancer_names)` - * `concat(list1, list2)` - Combines two or more lists into a single list. + * `concat(list1, list2, ...)` - Combines two or more lists into a single list. Example: `concat(aws_instance.db.*.tags.Name, aws_instance.web.*.tags.Name)` * `distinct(list)` - Removes duplicate items from a list. Keeps the first @@ -132,13 +132,13 @@ The supported built-in functions are: module, you generally want to make the path relative to the module base, like this: `file("${path.module}/file")`. - * `format(format, args...)` - Formats a string according to the given + * `format(format, args, ...)` - Formats a string according to the given format. The syntax for the format is standard `sprintf` syntax. Good documentation for the syntax can be [found here](https://golang.org/pkg/fmt/). Example to zero-prefix a count, used commonly for naming servers: `format("web-%03d", count.index + 1)`. - * `formatlist(format, args...)` - Formats each element of a list + * `formatlist(format, args, ...)` - Formats each element of a list according to the given format, similarly to `format`, and returns a list. Non-list arguments are repeated for each list element. For example, to convert a list of DNS addresses to a list of URLs, you might use: @@ -171,7 +171,7 @@ The supported built-in functions are: * `${length("a,b,c")}` = 5 * `${length(map("key", "val"))}` = 1 - * `list(items...)` - Returns a list consisting of the arguments to the function. + * `list(items, ...)` - Returns a list consisting of the arguments to the function. This function provides a way of representing list literals in interpolation. * `${list("a", "b", "c")}` returns a list of `"a", "b", "c"`. * `${list()}` returns an empty list. From e1532738e6228c8f1729345559624a2777ee8621 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Tue, 2 Aug 2016 11:19:47 -0400 Subject: [PATCH 0502/1238] Minor fixups Some spelling and working fixes. Added mention of deprecation warning to exisiting data sources as resources. Mention `~/.terraform.d` for old builtin plugin location Added mention of `terraform.tfvars` and `-var-file` in map value overrides. --- .../source/upgrade-guides/0-7.html.markdown | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/website/source/upgrade-guides/0-7.html.markdown b/website/source/upgrade-guides/0-7.html.markdown index 30025a11d..6ba96193d 100644 --- a/website/source/upgrade-guides/0-7.html.markdown +++ b/website/source/upgrade-guides/0-7.html.markdown @@ -22,9 +22,9 @@ terraform-provider-* # provider plugins terraform-provisioner-* # provisioner plugins ``` -These binaries needed to all be extracted to somewhere on your `$PATH` for Terraform to work. +These binaries needed to all be extracted to somewhere in your `$PATH` or in the `~/.terraform.d` directory for Terraform to work. -As of v0.7, these plugins all ship embedded in a single binary. This means that if you just extract the v0.7 archive into a path, you may still have the old separate binaries in your `$PATH`. You'll need to remove them manually. +As of v0.7, all built-in plugins ship embedded in a single binary. This means that if you just extract the v0.7 archive into a path, you may still have the old separate binaries in your `$PATH`. You'll need to remove them manually. For example, if you keep Terraform binaries in `/usr/local/bin` you can clear out the old external binaries like this: @@ -32,7 +32,7 @@ For example, if you keep Terraform binaries in `/usr/local/bin` you can clear ou rm /usr/local/bin/terraform-* ``` -External plugin binaries continue to work using the same pattern, but due to updates to the RPC protocol, they will need to be recompiled to be compatible with Terraform v0.7. +External plugin binaries continue to work using the same pattern, but due to updates to the RPC protocol, they will need to be recompiled to be compatible with Terraform v0.7.x. ## Maps in Displayed Plans @@ -59,10 +59,10 @@ The `concat()` interpolation function used to work for both lists and strings. I "${concat(var.foo, "-suffix")}" # => Error! No longer supported. ``` -Instead, you can use nested interpolation braces for string concatenation. +Instead, you can use variable interpolation for string concatenation. ``` -"${"${var.foo}-suffix"}" +"${var.foo}-suffix" ``` ### Nested Quotes and Escaping @@ -101,7 +101,7 @@ For most users, this change will not affect your day-to-day usage of Terraform. ## Migrating to Data Sources -With the addition of [Data Sources](/docs/configuration/data-sources.html), there are several resources that were acting as Data Sources that are now deprecated. +With the addition of [Data Sources](/docs/configuration/data-sources.html), there are several resources that were acting as Data Sources that are now deprecated. Existing configurations will continue to work, but will print a deprecation warning when a data source is used as a resource. * `atlas_artifact` * `template_file` @@ -174,7 +174,7 @@ subnet_id = "${element(var.private_subnets, count.index)}" ## Map value overrides -Previously, individual elements in a map could be overriden by using a dot notation. For example, if the following variable was declared: +Previously, individual elements in a map could be overridden by using a dot notation. For example, if the following variable was declared: ``` variable "amis" { @@ -187,7 +187,7 @@ variable "amis" { } ``` -The key "us-west-2" could be overriden using `-var "amis.us-west-2=overriden_value"` (or equivalent in an environment variable or `tfvars` file). The syntax for this has now changed - instead maps from the command line will be merged with the default value, with maps from flags taking precedence. The syntax for overriding individual values is now: +The key "us-west-2" could be overridden using `-var "amis.us-west-2=overriden_value"` (or equivalent in an environment variable or `tfvars` file). The syntax for this has now changed - instead maps from the command line will be merged with the default value, with maps from flags taking precedence. The syntax for overriding individual values is now: ``` -var 'amis = { us-west-2 = "overriden_value" }' @@ -202,3 +202,5 @@ This will give the map the effective value: eu-west-1 = "ami-789123" } ``` + +It's also possible to override the values in a variables file, either in `terraform.tfvars` or specified using the `-var-file` flag. From e822a79165dbc06bbf8271ee349fe256867d53dc Mon Sep 17 00:00:00 2001 From: James Nugent Date: Tue, 2 Aug 2016 17:59:44 +0000 Subject: [PATCH 0503/1238] v0.7.0 --- CHANGELOG.md | 518 +++++++++++++++++++++---------------------- terraform/version.go | 2 +- website/config.rb | 2 +- 3 files changed, 261 insertions(+), 261 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7f95bf9fd..8a811a785 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,4 @@ -## 0.7 (Unreleased) +## 0.7.0 (August 2, 2016) BACKWARDS INCOMPATIBILITIES / NOTES: @@ -6,15 +6,15 @@ BACKWARDS INCOMPATIBILITIES / NOTES: * Terraform's built-in plugins are now distributed as part of the main Terraform binary, and use the go-plugin framework. Overrides are still available using separate binaries, but will need recompiling against Terraform 0.7. * The `terraform plan` command no longer persists state. This makes the command much safer to run, since it is now side-effect free. The `refresh` and `apply` commands still persist state to local and remote storage. Any automation that assumes that `terraform plan` persists state will need to be reworked to explicitly call `terraform refresh` to get the equivalent side-effect. (The `terraform plan` command no longer has the `-state-out` or `-backup` flags due to this change.) * The `concat()` interpolation function can no longer be used to join strings. - * Quotation marks may no longer be escaped in HIL expressions [GH-7201] + * Quotation marks may no longer be escaped in HIL expressions ([#7201](https://github.com/hashicorp/terraform/issues/7201)) * Lists materialized using splat syntax, for example `aws_instance.foo.*.id` are now ordered by the count index rather than lexographically sorted. If this produces a large number of undesirable differences, you can use the new `sort()` interpolation function to produce the previous behaviour. * You now access the values of maps using the syntax `var.map["key"]` or the `lookup` function instead of `var.map.key`. * Outputs on `terraform_remote_state` resources are now top level attributes rather than inside the `output` map. In order to access outputs, use the syntax: `terraform_remote_state.name.outputname`. Currently outputs cannot be named `config` or `backend`. * AWS Provider * `aws_elb` now defaults `cross_zone_load_balancing` to `true` * `aws_instance`: EC2 Classic users may continue to use `security_groups` to reference Security Groups by their `name`. Users who are managing Instances inside VPCs will need to use `vpc_security_group_ids` instead, and reference the security groups by their `id`. Ref https://github.com/hashicorp/terraform/issues/6416#issuecomment-219145065 - * `aws_kinesis_firehose_delivery_stream`: AWS Kinesis Firehose has been refactored to support Redshift as a destination in addition to S3. As a result, the configuration has changed and users will need to update their configuration to match the new `s3_configuration` block. Checkout the documentaiton on [AWS Kinesis Firehose](http://localhost:4567/docs/providers/aws/r/kinesis_firehose_delivery_stream.html) for more information [GH-7375] - * `aws_route53_record`: `latency_routing_policy`, `geolocation_routing_policy`, and `failover_routing_policy` block options have been added. With these additions we’ve renamed the `weight` attribute to `weighted_routing_policy`, and it has changed from a string to a block to match the others. Please see the updated documentation on using `weighted_routing_policy`: https://www.terraform.io/docs/providers/aws/r/route53_record.html . [GH-6954] + * `aws_kinesis_firehose_delivery_stream`: AWS Kinesis Firehose has been refactored to support Redshift as a destination in addition to S3. As a result, the configuration has changed and users will need to update their configuration to match the new `s3_configuration` block. Checkout the documentaiton on [AWS Kinesis Firehose](http://localhost:4567/docs/providers/aws/r/kinesis_firehose_delivery_stream.html) for more information ([#7375](https://github.com/hashicorp/terraform/issues/7375)) + * `aws_route53_record`: `latency_routing_policy`, `geolocation_routing_policy`, and `failover_routing_policy` block options have been added. With these additions we’ve renamed the `weight` attribute to `weighted_routing_policy`, and it has changed from a string to a block to match the others. Please see the updated documentation on using `weighted_routing_policy`: https://www.terraform.io/docs/providers/aws/r/route53_record.html . ([#6954](https://github.com/hashicorp/terraform/issues/6954)) * `aws_db_instance` now defaults `publicly_accessible` to false * Microsoft Azure Provider * In documentation, the "Azure (Resource Manager)" provider has been renamed to the "Microsoft Azure" provider. @@ -33,269 +33,269 @@ BACKWARDS INCOMPATIBILITIES / NOTES: FEATURES: - * **Data sources** are a new kind of primitive in Terraform. Attributes for data sources are refreshed and available during the planning stage. [GH-6598] - * **Lists and maps** can now be used as first class types for variables and may also be passed between modules. [GH-6322] - * **State management CLI commands** provide a variety of state manipulation functions for advanced use cases. This should be used where possible instead of manually modifying state files. [GH-5811] + * **Data sources** are a new kind of primitive in Terraform. Attributes for data sources are refreshed and available during the planning stage. ([#6598](https://github.com/hashicorp/terraform/issues/6598)) + * **Lists and maps** can now be used as first class types for variables and may also be passed between modules. ([#6322](https://github.com/hashicorp/terraform/issues/6322)) + * **State management CLI commands** provide a variety of state manipulation functions for advanced use cases. This should be used where possible instead of manually modifying state files. ([#5811](https://github.com/hashicorp/terraform/issues/5811)) * **State Import** allows a way to import existing resources into Terraform state for many types of resource. Initial coverage of AWS is quite high, and it is straightforward to add support for new resources. - * **New Command:** `terraform state` to provide access to a variety of state manipulation functions [GH-5811] - * **New Option:** `terraform output` now supports the `-json` flag to print a machine-readable representation of outputs [GH-7608] - * **New Data Source:** `aws_ami` [GH-6911] - * **New Data Source:** `aws_availability_zones` [GH-6805] - * **New Data Source:** `aws_iam_policy_document` [GH-6881] - * **New Data Source:** `aws_s3_bucket_object` [GH-6946] - * **New Data Source:** `aws_ecs_container_definition` [GH-7230] - * **New Data Source:** `atlas_artifact` [GH-7419] - * **New Data Source:** `docker_registry_image` [GH-7000] - * **New Data Source:** `consul_keys` [GH-7678] - * **New Interpolation Function:** `sort` [GH-7128] - * **New Interpolation Function:** `distinct` [GH-7174] - * **New Interpolation Function:** `list` [GH-7528] - * **New Interpolation Function:** `map` [GH-7832] - * **New Provider:** `grafana` [GH-6206] - * **New Provider:** `logentries` [GH-7067] - * **New Provider:** `scaleway` [GH-7331] - * **New Provider:** `random` - allows generation of random values without constantly generating diffs [GH-6672] - * **New Remote State Provider:** - `gcs` - Google Cloud Storage [GH-6814] - * **New Remote State Provider:** - `azure` - Microsoft Azure Storage [GH-7064] - * **New Resource:** `aws_elb_attachment` [GH-6879] - * **New Resource:** `aws_elastictranscoder_preset` [GH-6965] - * **New Resource:** `aws_elastictranscoder_pipeline` [GH-6965] - * **New Resource:** `aws_iam_group_policy_attachment` [GH-6858] - * **New Resource:** `aws_iam_role_policy_attachment` [GH-6858] - * **New Resource:** `aws_iam_user_policy_attachment` [GH-6858] - * **New Resource:** `aws_rds_cluster_parameter_group` [GH-5269] - * **New Resource:** `aws_spot_fleet_request` [GH-7243] - * **New Resource:** `aws_ses_active_receipt_rule_set` [GH-5387] - * **New Resource:** `aws_ses_receipt_filter` [GH-5387] - * **New Resource:** `aws_ses_receipt_rule` [GH-5387] - * **New Resource:** `aws_ses_receipt_rule_set` [GH-5387] - * **New Resource:** `aws_simpledb_domain` [GH-7600] - * **New Resource:** `aws_opsworks_user_profile` [GH-6304] - * **New Resource:** `aws_opsworks_permission` [GH-6304] - * **New Resource:** `aws_ami_launch_permission` [GH-7365] - * **New Resource:** `aws_appautoscaling_policy` [GH-7663] - * **New Resource:** `aws_appautoscaling_target` [GH-7663] - * **New Resource:** `openstack_blockstorage_volume_v2` [GH-6693] - * **New Resource:** `openstack_lb_loadbalancer_v2` [GH-7012] - * **New Resource:** `openstack_lb_listener_v2` [GH-7012] - * **New Resource:** `openstack_lb_pool_v2` [GH-7012] - * **New Resource:** `openstack_lb_member_v2` [GH-7012] - * **New Resource:** `openstack_lb_monitor_v2` [GH-7012] - * **New Resource:** `vsphere_virtual_disk` [GH-6273] - * **New Resource:** `github_repository_collaborator` [GH-6861] - * **New Resource:** `datadog_timeboard` [GH-6900] - * **New Resource:** `digitalocean_tag` [GH-7500] - * **New Resource:** `digitalocean_volume` [GH-7560] - * **New Resource:** `consul_agent_service` [GH-7508] - * **New Resource:** `consul_catalog_entry` [GH-7508] - * **New Resource:** `consul_node` [GH-7508] - * **New Resource:** `consul_service` [GH-7508] - * **New Resource:** `mysql_grant` [GH-7656] - * **New Resource:** `mysql_user` [GH-7656] - * **New Resource:** `azurerm_storage_table` [GH-7327] - * **New Resource:** `azurerm_virtual_machine_scale_set` [GH-6711] - * **New Resource:** `azurerm_traffic_manager_endpoint` [GH-7826] - * **New Resource:** `azurerm_traffic_manager_profile` [GH-7826] - * core: Tainted resources now show up in the plan and respect dependency ordering [GH-6600] - * core: The `lookup` interpolation function can now have a default fall-back value specified [GH-6884] - * core: The `terraform plan` command no longer persists state. [GH-6811] + * **New Command:** `terraform state` to provide access to a variety of state manipulation functions ([#5811](https://github.com/hashicorp/terraform/issues/5811)) + * **New Option:** `terraform output` now supports the `-json` flag to print a machine-readable representation of outputs ([#7608](https://github.com/hashicorp/terraform/issues/7608)) + * **New Data Source:** `aws_ami` ([#6911](https://github.com/hashicorp/terraform/issues/6911)) + * **New Data Source:** `aws_availability_zones` ([#6805](https://github.com/hashicorp/terraform/issues/6805)) + * **New Data Source:** `aws_iam_policy_document` ([#6881](https://github.com/hashicorp/terraform/issues/6881)) + * **New Data Source:** `aws_s3_bucket_object` ([#6946](https://github.com/hashicorp/terraform/issues/6946)) + * **New Data Source:** `aws_ecs_container_definition` ([#7230](https://github.com/hashicorp/terraform/issues/7230)) + * **New Data Source:** `atlas_artifact` ([#7419](https://github.com/hashicorp/terraform/issues/7419)) + * **New Data Source:** `docker_registry_image` ([#7000](https://github.com/hashicorp/terraform/issues/7000)) + * **New Data Source:** `consul_keys` ([#7678](https://github.com/hashicorp/terraform/issues/7678)) + * **New Interpolation Function:** `sort` ([#7128](https://github.com/hashicorp/terraform/issues/7128)) + * **New Interpolation Function:** `distinct` ([#7174](https://github.com/hashicorp/terraform/issues/7174)) + * **New Interpolation Function:** `list` ([#7528](https://github.com/hashicorp/terraform/issues/7528)) + * **New Interpolation Function:** `map` ([#7832](https://github.com/hashicorp/terraform/issues/7832)) + * **New Provider:** `grafana` ([#6206](https://github.com/hashicorp/terraform/issues/6206)) + * **New Provider:** `logentries` ([#7067](https://github.com/hashicorp/terraform/issues/7067)) + * **New Provider:** `scaleway` ([#7331](https://github.com/hashicorp/terraform/issues/7331)) + * **New Provider:** `random` - allows generation of random values without constantly generating diffs ([#6672](https://github.com/hashicorp/terraform/issues/6672)) + * **New Remote State Provider:** - `gcs` - Google Cloud Storage ([#6814](https://github.com/hashicorp/terraform/issues/6814)) + * **New Remote State Provider:** - `azure` - Microsoft Azure Storage ([#7064](https://github.com/hashicorp/terraform/issues/7064)) + * **New Resource:** `aws_elb_attachment` ([#6879](https://github.com/hashicorp/terraform/issues/6879)) + * **New Resource:** `aws_elastictranscoder_preset` ([#6965](https://github.com/hashicorp/terraform/issues/6965)) + * **New Resource:** `aws_elastictranscoder_pipeline` ([#6965](https://github.com/hashicorp/terraform/issues/6965)) + * **New Resource:** `aws_iam_group_policy_attachment` ([#6858](https://github.com/hashicorp/terraform/issues/6858)) + * **New Resource:** `aws_iam_role_policy_attachment` ([#6858](https://github.com/hashicorp/terraform/issues/6858)) + * **New Resource:** `aws_iam_user_policy_attachment` ([#6858](https://github.com/hashicorp/terraform/issues/6858)) + * **New Resource:** `aws_rds_cluster_parameter_group` ([#5269](https://github.com/hashicorp/terraform/issues/5269)) + * **New Resource:** `aws_spot_fleet_request` ([#7243](https://github.com/hashicorp/terraform/issues/7243)) + * **New Resource:** `aws_ses_active_receipt_rule_set` ([#5387](https://github.com/hashicorp/terraform/issues/5387)) + * **New Resource:** `aws_ses_receipt_filter` ([#5387](https://github.com/hashicorp/terraform/issues/5387)) + * **New Resource:** `aws_ses_receipt_rule` ([#5387](https://github.com/hashicorp/terraform/issues/5387)) + * **New Resource:** `aws_ses_receipt_rule_set` ([#5387](https://github.com/hashicorp/terraform/issues/5387)) + * **New Resource:** `aws_simpledb_domain` ([#7600](https://github.com/hashicorp/terraform/issues/7600)) + * **New Resource:** `aws_opsworks_user_profile` ([#6304](https://github.com/hashicorp/terraform/issues/6304)) + * **New Resource:** `aws_opsworks_permission` ([#6304](https://github.com/hashicorp/terraform/issues/6304)) + * **New Resource:** `aws_ami_launch_permission` ([#7365](https://github.com/hashicorp/terraform/issues/7365)) + * **New Resource:** `aws_appautoscaling_policy` ([#7663](https://github.com/hashicorp/terraform/issues/7663)) + * **New Resource:** `aws_appautoscaling_target` ([#7663](https://github.com/hashicorp/terraform/issues/7663)) + * **New Resource:** `openstack_blockstorage_volume_v2` ([#6693](https://github.com/hashicorp/terraform/issues/6693)) + * **New Resource:** `openstack_lb_loadbalancer_v2` ([#7012](https://github.com/hashicorp/terraform/issues/7012)) + * **New Resource:** `openstack_lb_listener_v2` ([#7012](https://github.com/hashicorp/terraform/issues/7012)) + * **New Resource:** `openstack_lb_pool_v2` ([#7012](https://github.com/hashicorp/terraform/issues/7012)) + * **New Resource:** `openstack_lb_member_v2` ([#7012](https://github.com/hashicorp/terraform/issues/7012)) + * **New Resource:** `openstack_lb_monitor_v2` ([#7012](https://github.com/hashicorp/terraform/issues/7012)) + * **New Resource:** `vsphere_virtual_disk` ([#6273](https://github.com/hashicorp/terraform/issues/6273)) + * **New Resource:** `github_repository_collaborator` ([#6861](https://github.com/hashicorp/terraform/issues/6861)) + * **New Resource:** `datadog_timeboard` ([#6900](https://github.com/hashicorp/terraform/issues/6900)) + * **New Resource:** `digitalocean_tag` ([#7500](https://github.com/hashicorp/terraform/issues/7500)) + * **New Resource:** `digitalocean_volume` ([#7560](https://github.com/hashicorp/terraform/issues/7560)) + * **New Resource:** `consul_agent_service` ([#7508](https://github.com/hashicorp/terraform/issues/7508)) + * **New Resource:** `consul_catalog_entry` ([#7508](https://github.com/hashicorp/terraform/issues/7508)) + * **New Resource:** `consul_node` ([#7508](https://github.com/hashicorp/terraform/issues/7508)) + * **New Resource:** `consul_service` ([#7508](https://github.com/hashicorp/terraform/issues/7508)) + * **New Resource:** `mysql_grant` ([#7656](https://github.com/hashicorp/terraform/issues/7656)) + * **New Resource:** `mysql_user` ([#7656](https://github.com/hashicorp/terraform/issues/7656)) + * **New Resource:** `azurerm_storage_table` ([#7327](https://github.com/hashicorp/terraform/issues/7327)) + * **New Resource:** `azurerm_virtual_machine_scale_set` ([#6711](https://github.com/hashicorp/terraform/issues/6711)) + * **New Resource:** `azurerm_traffic_manager_endpoint` ([#7826](https://github.com/hashicorp/terraform/issues/7826)) + * **New Resource:** `azurerm_traffic_manager_profile` ([#7826](https://github.com/hashicorp/terraform/issues/7826)) + * core: Tainted resources now show up in the plan and respect dependency ordering ([#6600](https://github.com/hashicorp/terraform/issues/6600)) + * core: The `lookup` interpolation function can now have a default fall-back value specified ([#6884](https://github.com/hashicorp/terraform/issues/6884)) + * core: The `terraform plan` command no longer persists state. ([#6811](https://github.com/hashicorp/terraform/issues/6811)) IMPROVEMENTS: - * core: The `jsonencode` interpolation function now supports encoding lists and maps [GH-6749] - * core: Add the ability for resource definitions to mark attributes as "sensitive" which will omit them from UI output. [GH-6923] - * core: Support `.` in map keys [GH-7654] - * core: Enhance interpolation functions to account for first class maps and lists [GH-7832] [GH-7834] - * command: Remove second DefaultDataDirectory const [GH-7666] - * provider/aws: Add `dns_name` to `aws_efs_mount_target` [GH-7428] - * provider/aws: Add `force_destroy` to `aws_iam_user` for force-deleting access keys assigned to the user [GH-7766] - * provider/aws: Add `option_settings` to `aws_db_option_group` [GH-6560] - * provider/aws: Add more explicit support for Skipping Final Snapshot in RDS Cluster [GH-6795] - * provider/aws: Add support for S3 Bucket Acceleration [GH-6628] - * provider/aws: Add support for `kms_key_id` to `aws_db_instance` [GH-6651] - * provider/aws: Specifying more than one health check on an `aws_elb` fails with an error prior to making an API request [GH-7489] - * provider/aws: Add support to `aws_redshift_cluster` for `iam_roles` [GH-6647] - * provider/aws: SQS use raw policy string if compact fails [GH-6724] - * provider/aws: Set default description to "Managed by Terraform" [GH-6104] - * provider/aws: Support for Redshift Cluster encryption using a KMS key [GH-6712] - * provider/aws: Support tags for AWS redshift cluster [GH-5356] - * provider/aws: Add `iam_arn` to aws_cloudfront_origin_access_identity [GH-6955] - * provider/aws: Add `cross_zone_load_balancing` on `aws_elb` default to true [GH-6897] - * provider/aws: Add support for `character_set_name` to `aws_db_instance` [GH-4861] - * provider/aws: Add support for DB parameter group with RDS Cluster Instances (Aurora) [GH-6865] - * provider/aws: Add `name_prefix` to `aws_iam_instance_profile` and `aws_iam_role` [GH-6939] - * provider/aws: Allow authentication & credentials validation for federated IAM Roles and EC2 instance profiles [GH-6536] - * provider/aws: Rename parameter_group_name to db_cluster_parameter_group_name [GH-7083] - * provider/aws: Retry RouteTable Route/Assocation creation [GH-7156] - * provider/aws: `delegation_set_id` conflicts w/ `vpc_id` in `aws_route53_zone` as delegation sets can only be used for public zones [GH-7213] - * provider/aws: Support Elastic Beanstalk scheduledaction [GH-7376] - * provider/aws: Add support for NewInstancesProtectedFromScaleIn to `aws_autoscaling_group` [GH-6490] - * provider/aws: Added support for `snapshot_identifier` parameter in aws_rds_cluster [GH-7158] - * provider/aws: Add inplace edit/update DB Security Group Rule Ingress [GH-7245] - * provider/aws: Added support for redshift destination to firehose delivery streams [GH-7375] - * provider/aws: Allow `aws_redshift_security_group` ingress rules to change [GH-5939] - * provider/aws: Add support for `encryption` and `kms_key_id` to `aws_ami` [GH-7181] - * provider/aws: AWS prefix lists to enable security group egress to a VPC Endpoint [GH-7511] - * provider/aws: Retry creation of IAM role depending on new IAM user [GH-7324] - * provider/aws: Allow `port` on `aws_db_instance` to be updated [GH-7441] - * provider/aws: Allow VPC Classic Linking in Autoscaling Launch Configs [GH-7470] - * provider/aws: Support `task_role_arn` on `aws_ecs_task_definition [GH-7653] - * provider/aws: Support Tags on `aws_rds_cluster` [GH-7695] - * provider/aws: Support kms_key_id for `aws_rds_cluster` [GH-7662] - * provider/aws: Allow setting a `poll_interval` on `aws_elastic_beanstalk_environment` [GH-7523] - * provider/aws: Add support for Kinesis streams shard-level metrics [GH-7684] - * provider/aws: Support create / update greater than twenty db parameters in `aws_db_parameter_group` [GH-7364] - * provider/aws: expose network interface id in `aws_instance` [GH-6751] - * provider/aws: Adding passthrough behavior for API Gateway integration [GH-7801] - * provider/aws: Enable Redshift Cluster Logging [GH-7813] - * provider/aws: Add ability to set Performance Mode in `aws_efs_file_system` [GH-7791] - * provider/azurerm: Add support for EnableIPForwarding to `azurerm_network_interface` [GH-6807] - * provider/azurerm: Add support for exporting the `azurerm_storage_account` access keys [GH-6742] - * provider/azurerm: The Azure SDK now exposes better error messages [GH-6976] - * provider/azurerm: `azurerm_dns_zone` now returns `name_servers` [GH-7434] - * provider/azurerm: dump entire Request/Response in autorest Decorator [GH-7719] - * provider/azurerm: add option to delete VMs Data disks on termination [GH-7793] + * core: The `jsonencode` interpolation function now supports encoding lists and maps ([#6749](https://github.com/hashicorp/terraform/issues/6749)) + * core: Add the ability for resource definitions to mark attributes as "sensitive" which will omit them from UI output. ([#6923](https://github.com/hashicorp/terraform/issues/6923)) + * core: Support `.` in map keys ([#7654](https://github.com/hashicorp/terraform/issues/7654)) + * core: Enhance interpolation functions to account for first class maps and lists ([#7832](https://github.com/hashicorp/terraform/issues/7832)) ([#7834](https://github.com/hashicorp/terraform/issues/7834)) + * command: Remove second DefaultDataDirectory const ([#7666](https://github.com/hashicorp/terraform/issues/7666)) + * provider/aws: Add `dns_name` to `aws_efs_mount_target` ([#7428](https://github.com/hashicorp/terraform/issues/7428)) + * provider/aws: Add `force_destroy` to `aws_iam_user` for force-deleting access keys assigned to the user ([#7766](https://github.com/hashicorp/terraform/issues/7766)) + * provider/aws: Add `option_settings` to `aws_db_option_group` ([#6560](https://github.com/hashicorp/terraform/issues/6560)) + * provider/aws: Add more explicit support for Skipping Final Snapshot in RDS Cluster ([#6795](https://github.com/hashicorp/terraform/issues/6795)) + * provider/aws: Add support for S3 Bucket Acceleration ([#6628](https://github.com/hashicorp/terraform/issues/6628)) + * provider/aws: Add support for `kms_key_id` to `aws_db_instance` ([#6651](https://github.com/hashicorp/terraform/issues/6651)) + * provider/aws: Specifying more than one health check on an `aws_elb` fails with an error prior to making an API request ([#7489](https://github.com/hashicorp/terraform/issues/7489)) + * provider/aws: Add support to `aws_redshift_cluster` for `iam_roles` ([#6647](https://github.com/hashicorp/terraform/issues/6647)) + * provider/aws: SQS use raw policy string if compact fails ([#6724](https://github.com/hashicorp/terraform/issues/6724)) + * provider/aws: Set default description to "Managed by Terraform" ([#6104](https://github.com/hashicorp/terraform/issues/6104)) + * provider/aws: Support for Redshift Cluster encryption using a KMS key ([#6712](https://github.com/hashicorp/terraform/issues/6712)) + * provider/aws: Support tags for AWS redshift cluster ([#5356](https://github.com/hashicorp/terraform/issues/5356)) + * provider/aws: Add `iam_arn` to aws_cloudfront_origin_access_identity ([#6955](https://github.com/hashicorp/terraform/issues/6955)) + * provider/aws: Add `cross_zone_load_balancing` on `aws_elb` default to true ([#6897](https://github.com/hashicorp/terraform/issues/6897)) + * provider/aws: Add support for `character_set_name` to `aws_db_instance` ([#4861](https://github.com/hashicorp/terraform/issues/4861)) + * provider/aws: Add support for DB parameter group with RDS Cluster Instances (Aurora) ([#6865](https://github.com/hashicorp/terraform/issues/6865)) + * provider/aws: Add `name_prefix` to `aws_iam_instance_profile` and `aws_iam_role` ([#6939](https://github.com/hashicorp/terraform/issues/6939)) + * provider/aws: Allow authentication & credentials validation for federated IAM Roles and EC2 instance profiles ([#6536](https://github.com/hashicorp/terraform/issues/6536)) + * provider/aws: Rename parameter_group_name to db_cluster_parameter_group_name ([#7083](https://github.com/hashicorp/terraform/issues/7083)) + * provider/aws: Retry RouteTable Route/Assocation creation ([#7156](https://github.com/hashicorp/terraform/issues/7156)) + * provider/aws: `delegation_set_id` conflicts w/ `vpc_id` in `aws_route53_zone` as delegation sets can only be used for public zones ([#7213](https://github.com/hashicorp/terraform/issues/7213)) + * provider/aws: Support Elastic Beanstalk scheduledaction ([#7376](https://github.com/hashicorp/terraform/issues/7376)) + * provider/aws: Add support for NewInstancesProtectedFromScaleIn to `aws_autoscaling_group` ([#6490](https://github.com/hashicorp/terraform/issues/6490)) + * provider/aws: Added support for `snapshot_identifier` parameter in aws_rds_cluster ([#7158](https://github.com/hashicorp/terraform/issues/7158)) + * provider/aws: Add inplace edit/update DB Security Group Rule Ingress ([#7245](https://github.com/hashicorp/terraform/issues/7245)) + * provider/aws: Added support for redshift destination to firehose delivery streams ([#7375](https://github.com/hashicorp/terraform/issues/7375)) + * provider/aws: Allow `aws_redshift_security_group` ingress rules to change ([#5939](https://github.com/hashicorp/terraform/issues/5939)) + * provider/aws: Add support for `encryption` and `kms_key_id` to `aws_ami` ([#7181](https://github.com/hashicorp/terraform/issues/7181)) + * provider/aws: AWS prefix lists to enable security group egress to a VPC Endpoint ([#7511](https://github.com/hashicorp/terraform/issues/7511)) + * provider/aws: Retry creation of IAM role depending on new IAM user ([#7324](https://github.com/hashicorp/terraform/issues/7324)) + * provider/aws: Allow `port` on `aws_db_instance` to be updated ([#7441](https://github.com/hashicorp/terraform/issues/7441)) + * provider/aws: Allow VPC Classic Linking in Autoscaling Launch Configs ([#7470](https://github.com/hashicorp/terraform/issues/7470)) + * provider/aws: Support `task_role_arn` on `aws_ecs_task_definition ([#7653](https://github.com/hashicorp/terraform/issues/7653)) + * provider/aws: Support Tags on `aws_rds_cluster` ([#7695](https://github.com/hashicorp/terraform/issues/7695)) + * provider/aws: Support kms_key_id for `aws_rds_cluster` ([#7662](https://github.com/hashicorp/terraform/issues/7662)) + * provider/aws: Allow setting a `poll_interval` on `aws_elastic_beanstalk_environment` ([#7523](https://github.com/hashicorp/terraform/issues/7523)) + * provider/aws: Add support for Kinesis streams shard-level metrics ([#7684](https://github.com/hashicorp/terraform/issues/7684)) + * provider/aws: Support create / update greater than twenty db parameters in `aws_db_parameter_group` ([#7364](https://github.com/hashicorp/terraform/issues/7364)) + * provider/aws: expose network interface id in `aws_instance` ([#6751](https://github.com/hashicorp/terraform/issues/6751)) + * provider/aws: Adding passthrough behavior for API Gateway integration ([#7801](https://github.com/hashicorp/terraform/issues/7801)) + * provider/aws: Enable Redshift Cluster Logging ([#7813](https://github.com/hashicorp/terraform/issues/7813)) + * provider/aws: Add ability to set Performance Mode in `aws_efs_file_system` ([#7791](https://github.com/hashicorp/terraform/issues/7791)) + * provider/azurerm: Add support for EnableIPForwarding to `azurerm_network_interface` ([#6807](https://github.com/hashicorp/terraform/issues/6807)) + * provider/azurerm: Add support for exporting the `azurerm_storage_account` access keys ([#6742](https://github.com/hashicorp/terraform/issues/6742)) + * provider/azurerm: The Azure SDK now exposes better error messages ([#6976](https://github.com/hashicorp/terraform/issues/6976)) + * provider/azurerm: `azurerm_dns_zone` now returns `name_servers` ([#7434](https://github.com/hashicorp/terraform/issues/7434)) + * provider/azurerm: dump entire Request/Response in autorest Decorator ([#7719](https://github.com/hashicorp/terraform/issues/7719)) + * provider/azurerm: add option to delete VMs Data disks on termination ([#7793](https://github.com/hashicorp/terraform/issues/7793)) * provider/clc: Add support for hyperscale and bareMetal server types and package installation - * provider/clc: Fix optional server password [GH-6414] - * provider/cloudstack: Add support for affinity groups to `cloudstack_instance` [GH-6898] - * provider/cloudstack: Enable swapping of ACLs without having to rebuild the network tier [GH-6741] - * provider/cloudstack: Improve ACL swapping [GH-7315] - * provider/cloudstack: Add project support to `cloudstack_network_acl` and `cloudstack_network_acl_rule` [GH-7612] - * provider/cloudstack: Add option to set `root_disk_size` to `cloudstack_instance` [GH-7070] - * provider/cloudstack: Do no longer force a new `cloudstack_instance` resource when updating `user_data` [GH-7074] - * provider/cloudstack: Add option to set `security_group_names` to `cloudstack_instance` [GH-7240] - * provider/cloudstack: Add option to set `affinity_group_names` to `cloudstack_instance` [GH-7242] - * provider/datadog: Add support for 'require full window' and 'locked' [GH-6738] - * provider/docker: Docker Container DNS Setting Enhancements [GH-7392] - * provider/docker: Add `destroy_grace_seconds` option to stop container before delete [GH-7513] - * provider/docker: Add `pull_trigger` option to `docker_image` to trigger pulling layers of a given image [GH-7000] - * provider/fastly: Add support for Cache Settings [GH-6781] - * provider/fastly: Add support for Service Request Settings on `fastly_service_v1` resources [GH-6622] - * provider/fastly: Add support for custom VCL configuration [GH-6662] - * provider/google: Support optional uuid naming for Instance Template [GH-6604] - * provider/openstack: Add support for client certificate authentication [GH-6279] - * provider/openstack: Allow Neutron-based Floating IP to target a specific tenant [GH-6454] - * provider/openstack: Enable DHCP By Default [GH-6838] - * provider/openstack: Implement fixed_ip on Neutron floating ip allocations [GH-6837] - * provider/openstack: Increase timeouts for image resize, subnets, and routers [GH-6764] - * provider/openstack: Add `lb_provider` argument to `lb_pool_v1` resource [GH-6919] - * provider/openstack: Enforce `ForceNew` on Instance Block Device [GH-6921] - * provider/openstack: Can now stop instances before destroying them [GH-7184] - * provider/openstack: Disassociate LBaaS v1 Monitors from Pool Before Deletion [GH-6997] - * provider/powerdns: Add support for PowerDNS 4 API [GH-7819] - * provider/triton: add `triton_machine` `domain names` [GH-7149] - * provider/vsphere: Add support for `controller_type` to `vsphere_virtual_machine` [GH-6785] - * provider/vsphere: Fix bug with `vsphere_virtual_machine` wait for ip [GH-6377] - * provider/vsphere: Virtual machine update disk [GH-6619] - * provider/vsphere: `vsphere_virtual_machine` adding controller creation logic [GH-6853] - * provider/vsphere: `vsphere_virtual_machine` added support for `mac address` on `network_interface` [GH-6966] - * provider/vsphere: Enhanced `vsphere` logging capabilities [GH-6893] - * provider/vsphere: Add DiskEnableUUID option to `vsphere_virtual_machine` [GH-7088] - * provider/vsphere: Virtual Machine and File resources handle Read errors properley [GH-7220] - * provider/vsphere: set uuid as `vsphere_virtual_machine` output [GH-4382] - * provider/vsphere: Add support for `keep_on_remove` to `vsphere_virtual_machine` [GH-7169] - * provider/vsphere: Add support for additional `vsphere_virtial_machine` SCSI controller types [GH-7525] - * provisioner/file: File provisioners may now have file content set as an attribute [GH-7561] + * provider/clc: Fix optional server password ([#6414](https://github.com/hashicorp/terraform/issues/6414)) + * provider/cloudstack: Add support for affinity groups to `cloudstack_instance` ([#6898](https://github.com/hashicorp/terraform/issues/6898)) + * provider/cloudstack: Enable swapping of ACLs without having to rebuild the network tier ([#6741](https://github.com/hashicorp/terraform/issues/6741)) + * provider/cloudstack: Improve ACL swapping ([#7315](https://github.com/hashicorp/terraform/issues/7315)) + * provider/cloudstack: Add project support to `cloudstack_network_acl` and `cloudstack_network_acl_rule` ([#7612](https://github.com/hashicorp/terraform/issues/7612)) + * provider/cloudstack: Add option to set `root_disk_size` to `cloudstack_instance` ([#7070](https://github.com/hashicorp/terraform/issues/7070)) + * provider/cloudstack: Do no longer force a new `cloudstack_instance` resource when updating `user_data` ([#7074](https://github.com/hashicorp/terraform/issues/7074)) + * provider/cloudstack: Add option to set `security_group_names` to `cloudstack_instance` ([#7240](https://github.com/hashicorp/terraform/issues/7240)) + * provider/cloudstack: Add option to set `affinity_group_names` to `cloudstack_instance` ([#7242](https://github.com/hashicorp/terraform/issues/7242)) + * provider/datadog: Add support for 'require full window' and 'locked' ([#6738](https://github.com/hashicorp/terraform/issues/6738)) + * provider/docker: Docker Container DNS Setting Enhancements ([#7392](https://github.com/hashicorp/terraform/issues/7392)) + * provider/docker: Add `destroy_grace_seconds` option to stop container before delete ([#7513](https://github.com/hashicorp/terraform/issues/7513)) + * provider/docker: Add `pull_trigger` option to `docker_image` to trigger pulling layers of a given image ([#7000](https://github.com/hashicorp/terraform/issues/7000)) + * provider/fastly: Add support for Cache Settings ([#6781](https://github.com/hashicorp/terraform/issues/6781)) + * provider/fastly: Add support for Service Request Settings on `fastly_service_v1` resources ([#6622](https://github.com/hashicorp/terraform/issues/6622)) + * provider/fastly: Add support for custom VCL configuration ([#6662](https://github.com/hashicorp/terraform/issues/6662)) + * provider/google: Support optional uuid naming for Instance Template ([#6604](https://github.com/hashicorp/terraform/issues/6604)) + * provider/openstack: Add support for client certificate authentication ([#6279](https://github.com/hashicorp/terraform/issues/6279)) + * provider/openstack: Allow Neutron-based Floating IP to target a specific tenant ([#6454](https://github.com/hashicorp/terraform/issues/6454)) + * provider/openstack: Enable DHCP By Default ([#6838](https://github.com/hashicorp/terraform/issues/6838)) + * provider/openstack: Implement fixed_ip on Neutron floating ip allocations ([#6837](https://github.com/hashicorp/terraform/issues/6837)) + * provider/openstack: Increase timeouts for image resize, subnets, and routers ([#6764](https://github.com/hashicorp/terraform/issues/6764)) + * provider/openstack: Add `lb_provider` argument to `lb_pool_v1` resource ([#6919](https://github.com/hashicorp/terraform/issues/6919)) + * provider/openstack: Enforce `ForceNew` on Instance Block Device ([#6921](https://github.com/hashicorp/terraform/issues/6921)) + * provider/openstack: Can now stop instances before destroying them ([#7184](https://github.com/hashicorp/terraform/issues/7184)) + * provider/openstack: Disassociate LBaaS v1 Monitors from Pool Before Deletion ([#6997](https://github.com/hashicorp/terraform/issues/6997)) + * provider/powerdns: Add support for PowerDNS 4 API ([#7819](https://github.com/hashicorp/terraform/issues/7819)) + * provider/triton: add `triton_machine` `domain names` ([#7149](https://github.com/hashicorp/terraform/issues/7149)) + * provider/vsphere: Add support for `controller_type` to `vsphere_virtual_machine` ([#6785](https://github.com/hashicorp/terraform/issues/6785)) + * provider/vsphere: Fix bug with `vsphere_virtual_machine` wait for ip ([#6377](https://github.com/hashicorp/terraform/issues/6377)) + * provider/vsphere: Virtual machine update disk ([#6619](https://github.com/hashicorp/terraform/issues/6619)) + * provider/vsphere: `vsphere_virtual_machine` adding controller creation logic ([#6853](https://github.com/hashicorp/terraform/issues/6853)) + * provider/vsphere: `vsphere_virtual_machine` added support for `mac address` on `network_interface` ([#6966](https://github.com/hashicorp/terraform/issues/6966)) + * provider/vsphere: Enhanced `vsphere` logging capabilities ([#6893](https://github.com/hashicorp/terraform/issues/6893)) + * provider/vsphere: Add DiskEnableUUID option to `vsphere_virtual_machine` ([#7088](https://github.com/hashicorp/terraform/issues/7088)) + * provider/vsphere: Virtual Machine and File resources handle Read errors properley ([#7220](https://github.com/hashicorp/terraform/issues/7220)) + * provider/vsphere: set uuid as `vsphere_virtual_machine` output ([#4382](https://github.com/hashicorp/terraform/issues/4382)) + * provider/vsphere: Add support for `keep_on_remove` to `vsphere_virtual_machine` ([#7169](https://github.com/hashicorp/terraform/issues/7169)) + * provider/vsphere: Add support for additional `vsphere_virtial_machine` SCSI controller types ([#7525](https://github.com/hashicorp/terraform/issues/7525)) + * provisioner/file: File provisioners may now have file content set as an attribute ([#7561](https://github.com/hashicorp/terraform/issues/7561)) BUG FIXES: - * core: Correct the previous fix for a bug causing "attribute not found" messages during destroy, as it was insufficient [GH-6599] - * core: Fix issue causing syntax errors interpolating count attribute when value passed between modules [GH-6833] - * core: Fix "diffs didn't match during apply" error for computed sets [GH-7205] - * core: Fix issue where `terraform init .` would truncate existing files [GH-7273] - * core: Don't compare diffs between maps with computed values [GH-7249] - * core: Don't copy existing files over themselves when fetching modules [GH-7273] - * core: Always increment the state serial number when upgrading the version [GH-7402] - * core: Fix a crash during eval when we're upgrading an empty state [GH-7403] - * core: Honor the `-state-out` flag when applying with a plan file [GH-7443] - * core: Fix a panic when a `terraform_remote_state` data source doesn't exist [GH-7464] - * core: Fix issue where `ignore_changes` caused incorrect diffs on dependent resources [GH-7563] - * provider/aws: Manual changes to `aws_codedeploy_deployment_group` resources are now detected [GH-7530] - * provider/aws: Changing keys in `aws_dynamodb_table` correctly force new resources [GH-6829] - * provider/aws: Fix a bug where CloudWatch alarms are created repeatedly if the user does not have permission to use the the DescribeAlarms operation [GH-7227] - * provider/aws: Fix crash in `aws_elasticache_parameter_group` occuring following edits in the console [GH-6687] - * provider/aws: Fix issue reattaching a VPN gateway to a VPC [GH-6987] - * provider/aws: Fix issue with Root Block Devices and encrypted flag in Launch Configurations [GH-6512] - * provider/aws: If more ENIs are attached to `aws_instance`, the one w/ DeviceIndex `0` is always used in context of `aws_instance` (previously unpredictable) [GH-6761] - * provider/aws: Increased lambda event mapping creation timeout [GH-7657] - * provider/aws: Handle spurious failures in resourceAwsSecurityGroupRuleRead [GH-7377] - * provider/aws: Make 'stage_name' required in api_gateway_deployment [GH-6797] - * provider/aws: Mark Lambda function as gone when it's gone [GH-6924] - * provider/aws: Trim trailing `.` from `name` in `aws_route53_record` resources to prevent spurious diffs [GH-6592] - * provider/aws: Update Lambda functions on name change [GH-7081] - * provider/aws: Updating state when `aws_sns_topic_subscription` is missing [GH-6629] - * provider/aws: `aws_codedeploy_deployment_group` panic when setting `on_premises_instance_tag_filter` [GH-6617] - * provider/aws: `aws_db_instance` now defaults `publicly_accessible` to false [GH-7117] - * provider/aws: `aws_opsworks_application.app_source` SSH key is write-only [GH-6649] - * provider/aws: fix Elastic Beanstalk `cname_prefix` continual plans [GH-6653] - * provider/aws: Bundle IOPs and Allocated Storage update for DB Instances [GH-7203] - * provider/aws: Fix case when instanceId is absent in network interfaces [GH-6851] - * provider/aws: fix aws_security_group_rule refresh [GH-6730] - * provider/aws: Fix issue with Elastic Beanstalk and invalid settings [GH-7222] - * provider/aws: Fix issue where aws_app_cookie_stickiness_policy fails on destroy if LoadBalancer doesn't exist [GH-7166] - * provider/aws: Stickiness Policy exists, but isn't assigned to the ELB [GH-7188] - * provider/aws: Fix issue with `manage_bundler` on `aws_opsworks_layers` [GH-7219] - * provider/aws: Set Elastic Beanstalk stack name back to state [GH-7445] - * provider/aws: Allow recreation of VPC Peering Connection when state is rejected [GH-7466] - * provider/aws: Remove EFS File System from State when NotFound [GH-7437] - * provider/aws: `aws_customer_gateway` refreshing from state on deleted state [GH-7482] - * provider/aws: Retry finding `aws_route` after creating it [GH-7463] - * provider/aws: Refresh CloudWatch Group from state on 404 [GH-7576] - * provider/aws: Adding in additional retry logic due to latency with delete of `db_option_group` [GH-7312] - * provider/aws: Safely get ELB values [GH-7585] - * provider/aws: Fix bug for recurring plans on ec2-classic and vpc in beanstalk [GH-6491] - * provider/aws: Bump rds_cluster timeout to 15 mins [GH-7604] - * provider/aws: Fix ICMP fields in `aws_network_acl_rule` to allow ICMP code 0 (echo reply) to be configured [GH-7669] - * provider/aws: Fix bug with Updating `aws_autoscaling_group` `enabled_metrics` [GH-7698] - * provider/aws: Ignore IOPS on non io1 AWS root_block_device [GH-7783] - * provider/aws: Ignore missing ENI attachment when trying to detach ENI [GH-7185] - * provider/aws: Fix issue updating ElasticBeanstalk Environment templates [GH-7811] - * provider/aws: Restore Defaults to SQS Queues [GH-7818] - * provider/aws: Don't delete Lambda function from state on initial call of the Read func [GH-7829] - * provider/aws: `aws_vpn_gateway` should be removed from state when in deleted state [GH-7861] - * provider/aws: Fix aws_route53_record 0-2 migration [GH-7907] - * provider/azurerm: Fixes terraform crash when using SSH keys with `azurerm_virtual_machine` [GH-6766] - * provider/azurerm: Fix a bug causing 'diffs do not match' on `azurerm_network_interface` resources [GH-6790] - * provider/azurerm: Normalizes `availability_set_id` casing to avoid spurious diffs in `azurerm_virtual_machine` [GH-6768] - * provider/azurerm: Add support for storage container name validation [GH-6852] - * provider/azurerm: Remove storage containers and blobs when storage accounts are not found [GH-6855] - * provider/azurerm: `azurerm_virtual_machine` fix `additional_unattend_rm` Windows config option [GH-7105] - * provider/azurerm: Fix `azurerm_virtual_machine` windows_config [GH-7123] - * provider/azurerm: `azurerm_dns_cname_record` can create CNAME records again [GH-7113] - * provider/azurerm: `azurerm_network_security_group` now waits for the provisioning state of `ready` before proceeding [GH-7307] - * provider/azurerm: `computer_name` is now required for `azurerm_virtual_machine` resources [GH-7308] - * provider/azurerm: destroy azurerm_virtual_machine OS Disk VHD on deletion [GH-7584] - * provider/azurerm: catch `azurerm_template_deployment` erroring silently [GH-7644] - * provider/azurerm: changing the name of an `azurerm_virtual_machine` now forces a new resource [GH-7646] - * provider/azurerm: azurerm_storage_account now returns storage keys value instead of their names [GH-7674] - * provider/azurerm: `azurerm_virtual_machine` computer_name now Required [GH-7308] - * provider/azurerm: Change of `availability_set_id` on `azurerm_virtual_machine` should ForceNew [GH-7650] - * provider/azurerm: Wait for `azurerm_storage_account` to be available [GH-7329] - * provider/cloudflare: Fix issue upgrading CloudFlare Records created before v0.6.15 [GH-6969] - * provider/cloudstack: Fix using `cloudstack_network_acl` within a project [GH-6743] - * provider/cloudstack: Fix refresing `cloudstack_network_acl_rule` when the associated ACL is deleted [GH-7612] - * provider/cloudstack: Fix refresing `cloudstack_port_forward` when the associated IP address is no longer associated [GH-7612] - * provider/cloudstack: Fix creating `cloudstack_network` with offerings that do not support specifying IP ranges [GH-7612] - * provider/digitalocean: Stop `digitocean_droplet` forcing new resource on uppercase region [GH-7044] - * provider/digitalocean: Reassign Floating IP when droplet changes [GH-7411] - * provider/google: Fix a bug causing an error attempting to delete an already-deleted `google_compute_disk` [GH-6689] - * provider/mysql: Specifying empty provider credentials no longer causes a panic [GH-7211] - * provider/openstack: Reassociate Floating IP on network changes [GH-6579] - * provider/openstack: Ensure CIDRs Are Lower Case [GH-6864] - * provider/openstack: Rebuild Instances On Network Changes [GH-6844] - * provider/openstack: Firewall rules are applied in the correct order [GH-7194] - * provider/openstack: Fix Security Group EOF Error when Adding / Removing Multiple Groups [GH-7468] - * provider/openstack: Fixing boot volumes interfering with block storage volumes list [GH-7649] - * provider/vsphere: `gateway` and `ipv6_gateway` are now read from `vsphere_virtual_machine` resources [GH-6522] - * provider/vsphere: `ipv*_gateway` parameters won't force a new `vsphere_virtual_machine` [GH-6635] - * provider/vsphere: adding a `vsphere_virtual_machine` migration [GH-7023] - * provider/vsphere: Don't require vsphere debug paths to be set [GH-7027] - * provider/vsphere: Fix bug where `enable_disk_uuid` was not set on `vsphere_virtual_machine` resources [GH-7275] - * provider/vsphere: Make `vsphere_virtual_machine` `product_key` optional [GH-7410] - * provider/vsphere: Refreshing devices list after adding a disk or cdrom controller [GH-7167] - * provider/vsphere: `vsphere_virtual_machine` no longer has to be powered on to delete [GH-7206] - * provider/vSphere: Fixes the hasBootableVmdk flag when attaching multiple disks [GH-7804] - * provisioner/remote-exec: Properly seed random script paths so they are not deterministic across runs [GH-7413] + * core: Correct the previous fix for a bug causing "attribute not found" messages during destroy, as it was insufficient ([#6599](https://github.com/hashicorp/terraform/issues/6599)) + * core: Fix issue causing syntax errors interpolating count attribute when value passed between modules ([#6833](https://github.com/hashicorp/terraform/issues/6833)) + * core: Fix "diffs didn't match during apply" error for computed sets ([#7205](https://github.com/hashicorp/terraform/issues/7205)) + * core: Fix issue where `terraform init .` would truncate existing files ([#7273](https://github.com/hashicorp/terraform/issues/7273)) + * core: Don't compare diffs between maps with computed values ([#7249](https://github.com/hashicorp/terraform/issues/7249)) + * core: Don't copy existing files over themselves when fetching modules ([#7273](https://github.com/hashicorp/terraform/issues/7273)) + * core: Always increment the state serial number when upgrading the version ([#7402](https://github.com/hashicorp/terraform/issues/7402)) + * core: Fix a crash during eval when we're upgrading an empty state ([#7403](https://github.com/hashicorp/terraform/issues/7403)) + * core: Honor the `-state-out` flag when applying with a plan file ([#7443](https://github.com/hashicorp/terraform/issues/7443)) + * core: Fix a panic when a `terraform_remote_state` data source doesn't exist ([#7464](https://github.com/hashicorp/terraform/issues/7464)) + * core: Fix issue where `ignore_changes` caused incorrect diffs on dependent resources ([#7563](https://github.com/hashicorp/terraform/issues/7563)) + * provider/aws: Manual changes to `aws_codedeploy_deployment_group` resources are now detected ([#7530](https://github.com/hashicorp/terraform/issues/7530)) + * provider/aws: Changing keys in `aws_dynamodb_table` correctly force new resources ([#6829](https://github.com/hashicorp/terraform/issues/6829)) + * provider/aws: Fix a bug where CloudWatch alarms are created repeatedly if the user does not have permission to use the the DescribeAlarms operation ([#7227](https://github.com/hashicorp/terraform/issues/7227)) + * provider/aws: Fix crash in `aws_elasticache_parameter_group` occuring following edits in the console ([#6687](https://github.com/hashicorp/terraform/issues/6687)) + * provider/aws: Fix issue reattaching a VPN gateway to a VPC ([#6987](https://github.com/hashicorp/terraform/issues/6987)) + * provider/aws: Fix issue with Root Block Devices and encrypted flag in Launch Configurations ([#6512](https://github.com/hashicorp/terraform/issues/6512)) + * provider/aws: If more ENIs are attached to `aws_instance`, the one w/ DeviceIndex `0` is always used in context of `aws_instance` (previously unpredictable) ([#6761](https://github.com/hashicorp/terraform/issues/6761)) + * provider/aws: Increased lambda event mapping creation timeout ([#7657](https://github.com/hashicorp/terraform/issues/7657)) + * provider/aws: Handle spurious failures in resourceAwsSecurityGroupRuleRead ([#7377](https://github.com/hashicorp/terraform/issues/7377)) + * provider/aws: Make 'stage_name' required in api_gateway_deployment ([#6797](https://github.com/hashicorp/terraform/issues/6797)) + * provider/aws: Mark Lambda function as gone when it's gone ([#6924](https://github.com/hashicorp/terraform/issues/6924)) + * provider/aws: Trim trailing `.` from `name` in `aws_route53_record` resources to prevent spurious diffs ([#6592](https://github.com/hashicorp/terraform/issues/6592)) + * provider/aws: Update Lambda functions on name change ([#7081](https://github.com/hashicorp/terraform/issues/7081)) + * provider/aws: Updating state when `aws_sns_topic_subscription` is missing ([#6629](https://github.com/hashicorp/terraform/issues/6629)) + * provider/aws: `aws_codedeploy_deployment_group` panic when setting `on_premises_instance_tag_filter` ([#6617](https://github.com/hashicorp/terraform/issues/6617)) + * provider/aws: `aws_db_instance` now defaults `publicly_accessible` to false ([#7117](https://github.com/hashicorp/terraform/issues/7117)) + * provider/aws: `aws_opsworks_application.app_source` SSH key is write-only ([#6649](https://github.com/hashicorp/terraform/issues/6649)) + * provider/aws: fix Elastic Beanstalk `cname_prefix` continual plans ([#6653](https://github.com/hashicorp/terraform/issues/6653)) + * provider/aws: Bundle IOPs and Allocated Storage update for DB Instances ([#7203](https://github.com/hashicorp/terraform/issues/7203)) + * provider/aws: Fix case when instanceId is absent in network interfaces ([#6851](https://github.com/hashicorp/terraform/issues/6851)) + * provider/aws: fix aws_security_group_rule refresh ([#6730](https://github.com/hashicorp/terraform/issues/6730)) + * provider/aws: Fix issue with Elastic Beanstalk and invalid settings ([#7222](https://github.com/hashicorp/terraform/issues/7222)) + * provider/aws: Fix issue where aws_app_cookie_stickiness_policy fails on destroy if LoadBalancer doesn't exist ([#7166](https://github.com/hashicorp/terraform/issues/7166)) + * provider/aws: Stickiness Policy exists, but isn't assigned to the ELB ([#7188](https://github.com/hashicorp/terraform/issues/7188)) + * provider/aws: Fix issue with `manage_bundler` on `aws_opsworks_layers` ([#7219](https://github.com/hashicorp/terraform/issues/7219)) + * provider/aws: Set Elastic Beanstalk stack name back to state ([#7445](https://github.com/hashicorp/terraform/issues/7445)) + * provider/aws: Allow recreation of VPC Peering Connection when state is rejected ([#7466](https://github.com/hashicorp/terraform/issues/7466)) + * provider/aws: Remove EFS File System from State when NotFound ([#7437](https://github.com/hashicorp/terraform/issues/7437)) + * provider/aws: `aws_customer_gateway` refreshing from state on deleted state ([#7482](https://github.com/hashicorp/terraform/issues/7482)) + * provider/aws: Retry finding `aws_route` after creating it ([#7463](https://github.com/hashicorp/terraform/issues/7463)) + * provider/aws: Refresh CloudWatch Group from state on 404 ([#7576](https://github.com/hashicorp/terraform/issues/7576)) + * provider/aws: Adding in additional retry logic due to latency with delete of `db_option_group` ([#7312](https://github.com/hashicorp/terraform/issues/7312)) + * provider/aws: Safely get ELB values ([#7585](https://github.com/hashicorp/terraform/issues/7585)) + * provider/aws: Fix bug for recurring plans on ec2-classic and vpc in beanstalk ([#6491](https://github.com/hashicorp/terraform/issues/6491)) + * provider/aws: Bump rds_cluster timeout to 15 mins ([#7604](https://github.com/hashicorp/terraform/issues/7604)) + * provider/aws: Fix ICMP fields in `aws_network_acl_rule` to allow ICMP code 0 (echo reply) to be configured ([#7669](https://github.com/hashicorp/terraform/issues/7669)) + * provider/aws: Fix bug with Updating `aws_autoscaling_group` `enabled_metrics` ([#7698](https://github.com/hashicorp/terraform/issues/7698)) + * provider/aws: Ignore IOPS on non io1 AWS root_block_device ([#7783](https://github.com/hashicorp/terraform/issues/7783)) + * provider/aws: Ignore missing ENI attachment when trying to detach ENI ([#7185](https://github.com/hashicorp/terraform/issues/7185)) + * provider/aws: Fix issue updating ElasticBeanstalk Environment templates ([#7811](https://github.com/hashicorp/terraform/issues/7811)) + * provider/aws: Restore Defaults to SQS Queues ([#7818](https://github.com/hashicorp/terraform/issues/7818)) + * provider/aws: Don't delete Lambda function from state on initial call of the Read func ([#7829](https://github.com/hashicorp/terraform/issues/7829)) + * provider/aws: `aws_vpn_gateway` should be removed from state when in deleted state ([#7861](https://github.com/hashicorp/terraform/issues/7861)) + * provider/aws: Fix aws_route53_record 0-2 migration ([#7907](https://github.com/hashicorp/terraform/issues/7907)) + * provider/azurerm: Fixes terraform crash when using SSH keys with `azurerm_virtual_machine` ([#6766](https://github.com/hashicorp/terraform/issues/6766)) + * provider/azurerm: Fix a bug causing 'diffs do not match' on `azurerm_network_interface` resources ([#6790](https://github.com/hashicorp/terraform/issues/6790)) + * provider/azurerm: Normalizes `availability_set_id` casing to avoid spurious diffs in `azurerm_virtual_machine` ([#6768](https://github.com/hashicorp/terraform/issues/6768)) + * provider/azurerm: Add support for storage container name validation ([#6852](https://github.com/hashicorp/terraform/issues/6852)) + * provider/azurerm: Remove storage containers and blobs when storage accounts are not found ([#6855](https://github.com/hashicorp/terraform/issues/6855)) + * provider/azurerm: `azurerm_virtual_machine` fix `additional_unattend_rm` Windows config option ([#7105](https://github.com/hashicorp/terraform/issues/7105)) + * provider/azurerm: Fix `azurerm_virtual_machine` windows_config ([#7123](https://github.com/hashicorp/terraform/issues/7123)) + * provider/azurerm: `azurerm_dns_cname_record` can create CNAME records again ([#7113](https://github.com/hashicorp/terraform/issues/7113)) + * provider/azurerm: `azurerm_network_security_group` now waits for the provisioning state of `ready` before proceeding ([#7307](https://github.com/hashicorp/terraform/issues/7307)) + * provider/azurerm: `computer_name` is now required for `azurerm_virtual_machine` resources ([#7308](https://github.com/hashicorp/terraform/issues/7308)) + * provider/azurerm: destroy azurerm_virtual_machine OS Disk VHD on deletion ([#7584](https://github.com/hashicorp/terraform/issues/7584)) + * provider/azurerm: catch `azurerm_template_deployment` erroring silently ([#7644](https://github.com/hashicorp/terraform/issues/7644)) + * provider/azurerm: changing the name of an `azurerm_virtual_machine` now forces a new resource ([#7646](https://github.com/hashicorp/terraform/issues/7646)) + * provider/azurerm: azurerm_storage_account now returns storage keys value instead of their names ([#7674](https://github.com/hashicorp/terraform/issues/7674)) + * provider/azurerm: `azurerm_virtual_machine` computer_name now Required ([#7308](https://github.com/hashicorp/terraform/issues/7308)) + * provider/azurerm: Change of `availability_set_id` on `azurerm_virtual_machine` should ForceNew ([#7650](https://github.com/hashicorp/terraform/issues/7650)) + * provider/azurerm: Wait for `azurerm_storage_account` to be available ([#7329](https://github.com/hashicorp/terraform/issues/7329)) + * provider/cloudflare: Fix issue upgrading CloudFlare Records created before v0.6.15 ([#6969](https://github.com/hashicorp/terraform/issues/6969)) + * provider/cloudstack: Fix using `cloudstack_network_acl` within a project ([#6743](https://github.com/hashicorp/terraform/issues/6743)) + * provider/cloudstack: Fix refresing `cloudstack_network_acl_rule` when the associated ACL is deleted ([#7612](https://github.com/hashicorp/terraform/issues/7612)) + * provider/cloudstack: Fix refresing `cloudstack_port_forward` when the associated IP address is no longer associated ([#7612](https://github.com/hashicorp/terraform/issues/7612)) + * provider/cloudstack: Fix creating `cloudstack_network` with offerings that do not support specifying IP ranges ([#7612](https://github.com/hashicorp/terraform/issues/7612)) + * provider/digitalocean: Stop `digitocean_droplet` forcing new resource on uppercase region ([#7044](https://github.com/hashicorp/terraform/issues/7044)) + * provider/digitalocean: Reassign Floating IP when droplet changes ([#7411](https://github.com/hashicorp/terraform/issues/7411)) + * provider/google: Fix a bug causing an error attempting to delete an already-deleted `google_compute_disk` ([#6689](https://github.com/hashicorp/terraform/issues/6689)) + * provider/mysql: Specifying empty provider credentials no longer causes a panic ([#7211](https://github.com/hashicorp/terraform/issues/7211)) + * provider/openstack: Reassociate Floating IP on network changes ([#6579](https://github.com/hashicorp/terraform/issues/6579)) + * provider/openstack: Ensure CIDRs Are Lower Case ([#6864](https://github.com/hashicorp/terraform/issues/6864)) + * provider/openstack: Rebuild Instances On Network Changes ([#6844](https://github.com/hashicorp/terraform/issues/6844)) + * provider/openstack: Firewall rules are applied in the correct order ([#7194](https://github.com/hashicorp/terraform/issues/7194)) + * provider/openstack: Fix Security Group EOF Error when Adding / Removing Multiple Groups ([#7468](https://github.com/hashicorp/terraform/issues/7468)) + * provider/openstack: Fixing boot volumes interfering with block storage volumes list ([#7649](https://github.com/hashicorp/terraform/issues/7649)) + * provider/vsphere: `gateway` and `ipv6_gateway` are now read from `vsphere_virtual_machine` resources ([#6522](https://github.com/hashicorp/terraform/issues/6522)) + * provider/vsphere: `ipv*_gateway` parameters won't force a new `vsphere_virtual_machine` ([#6635](https://github.com/hashicorp/terraform/issues/6635)) + * provider/vsphere: adding a `vsphere_virtual_machine` migration ([#7023](https://github.com/hashicorp/terraform/issues/7023)) + * provider/vsphere: Don't require vsphere debug paths to be set ([#7027](https://github.com/hashicorp/terraform/issues/7027)) + * provider/vsphere: Fix bug where `enable_disk_uuid` was not set on `vsphere_virtual_machine` resources ([#7275](https://github.com/hashicorp/terraform/issues/7275)) + * provider/vsphere: Make `vsphere_virtual_machine` `product_key` optional ([#7410](https://github.com/hashicorp/terraform/issues/7410)) + * provider/vsphere: Refreshing devices list after adding a disk or cdrom controller ([#7167](https://github.com/hashicorp/terraform/issues/7167)) + * provider/vsphere: `vsphere_virtual_machine` no longer has to be powered on to delete ([#7206](https://github.com/hashicorp/terraform/issues/7206)) + * provider/vSphere: Fixes the hasBootableVmdk flag when attaching multiple disks ([#7804](https://github.com/hashicorp/terraform/issues/7804)) + * provisioner/remote-exec: Properly seed random script paths so they are not deterministic across runs ([#7413](https://github.com/hashicorp/terraform/issues/7413)) ## 0.6.16 (May 9, 2016) diff --git a/terraform/version.go b/terraform/version.go index d753ff8e7..f42820214 100644 --- a/terraform/version.go +++ b/terraform/version.go @@ -12,7 +12,7 @@ const Version = "0.7.0" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "dev" +const VersionPrerelease = "" // SemVersion is an instance of version.Version. This has the secondary // benefit of verifying during tests and init time that our version is a diff --git a/website/config.rb b/website/config.rb index e025500a0..bf97974b0 100644 --- a/website/config.rb +++ b/website/config.rb @@ -2,6 +2,6 @@ set :base_url, "https://www.terraform.io/" activate :hashicorp do |h| h.name = "terraform" - h.version = "0.6.16" + h.version = "0.7.0" h.github_slug = "hashicorp/terraform" end From fa64ac7815e937001a8153dd5a95445e497b6c06 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Tue, 2 Aug 2016 18:17:05 +0000 Subject: [PATCH 0504/1238] release: clean up after v0.7.0 --- CHANGELOG.md | 8 ++++++++ terraform/version.go | 4 ++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8a811a785..2549a3b5d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,11 @@ +## 0.7.1 (Unreleased) + +FEATURES: + +IMPROVEMENTS: + +BUG FIXES: + ## 0.7.0 (August 2, 2016) BACKWARDS INCOMPATIBILITIES / NOTES: diff --git a/terraform/version.go b/terraform/version.go index f42820214..ae5f1f7cb 100644 --- a/terraform/version.go +++ b/terraform/version.go @@ -7,12 +7,12 @@ import ( ) // The main version number that is being run at the moment. -const Version = "0.7.0" +const Version = "0.7.1" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "" +const VersionPrerelease = "dev" // SemVersion is an instance of version.Version. This has the secondary // benefit of verifying during tests and init time that our version is a From 9306e29651f6eceab7cda59506491f91ef66c77b Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 3 Aug 2016 05:06:44 +1000 Subject: [PATCH 0505/1238] docs/website: Linking the downloads page to the upgrade guide for 0.7 (#7913) --- website/source/downloads.html.erb | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/website/source/downloads.html.erb b/website/source/downloads.html.erb index 87179f12e..ec2f7b5f4 100644 --- a/website/source/downloads.html.erb +++ b/website/source/downloads.html.erb @@ -32,8 +32,7 @@ description: |- Checkout the v<%= latest_version %> CHANGELOG for information on the latest release.

    - Note: Terraform now ships as a single binary. When upgrading from Terraform < 0.7.0 - you will need to remove the old terraform-* plugins from your installation path. + Note: if you are upgrading to 0.7 please see the upgrade guide.

    From 79f2f229b1bfd5a8a54aa592086a45ea3d4643e2 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Tue, 2 Aug 2016 14:13:25 -0500 Subject: [PATCH 0506/1238] website: add listmap anchor to upgrade guide linked from blog post --- website/source/upgrade-guides/0-7.html.markdown | 2 ++ 1 file changed, 2 insertions(+) diff --git a/website/source/upgrade-guides/0-7.html.markdown b/website/source/upgrade-guides/0-7.html.markdown index 6ba96193d..a29ac6621 100644 --- a/website/source/upgrade-guides/0-7.html.markdown +++ b/website/source/upgrade-guides/0-7.html.markdown @@ -134,6 +134,8 @@ resource "aws_instance" "example" { } ``` + + ## Migrating to native lists and maps Terraform 0.7 now supports lists and maps as first-class constructs. Although the patterns commonly used in previous versions still work (excepting any compatibility notes), there are now patterns with cleaner syntax available. From ab30753498e4b4930f9836d3e88ba290cab3231c Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Tue, 2 Aug 2016 16:03:02 -0500 Subject: [PATCH 0507/1238] website: update community people section As the page says: > Over time, faces may appear from this list as contributors come and > go. Let's reflect the list of peoople working on Terraform nowadays! :) --- website/source/community.html.erb | 48 +++++++++++++++++-------------- 1 file changed, 26 insertions(+), 22 deletions(-) diff --git a/website/source/community.html.erb b/website/source/community.html.erb index cfb931604..e5a0e3f48 100644 --- a/website/source/community.html.erb +++ b/website/source/community.html.erb @@ -54,36 +54,40 @@ disappear from this list as contributors come and go.
    - +
    -

    Armon Dadgar (@armon)

    +

    Paul Hinze (@phinze)

    - Armon Dadgar is a creator of Terraform. He created valuable sections - of the core and helps maintain providers as well. Armon is also the - creator of - Consul, - Serf, - Statsite, and - Bloomd. -

    + Paul Hinze is the Project Lead of Terraform. He helps organize the team + of HashiCorp employees and community members that work on Terraform + day-to-day. He works on Terraform's core and providers. +

    -
    +
    - +
    -

    Jack Pearkes (@pearkes)

    +

    Clint Shryock (@catsby)

    - Jack Pearkes is a creator of Terraform. He created and maintains - most of the providers and documentation. - He is also a core committer to - Packer and - Consul - while also being an employee of - HashiCorp. + Clint Shryock is a HashiCorp Engineer working on Terraform. He is the + primary maintainer of the AWS provider, and works across all providers. + Client was also the primary author of the Fastly provider.

    -
    + -
    +
    + +
    +

    James Nugent (@jen20)

    +

    + James Nugent is a HashiCorp Engineer working on Terraform. He is one the + principal developers working in Terraform's core, though he can also be + found working on providers from time to time as well. +

    +
    +
    + +
    From 70f43ac8dca72295da0aef229b20e7fc89f25d92 Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Tue, 2 Aug 2016 20:00:02 -0500 Subject: [PATCH 0508/1238] website: fix community page typo --- website/source/community.html.erb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/community.html.erb b/website/source/community.html.erb index e5a0e3f48..72cf6eb96 100644 --- a/website/source/community.html.erb +++ b/website/source/community.html.erb @@ -72,7 +72,7 @@ disappear from this list as contributors come and go.

    Clint Shryock is a HashiCorp Engineer working on Terraform. He is the primary maintainer of the AWS provider, and works across all providers. - Client was also the primary author of the Fastly provider. + Clint is also the primary author of the Fastly provider.

    From 6c35953994fdf7295fef8c171b54fc352d25ad3d Mon Sep 17 00:00:00 2001 From: stack72 Date: Wed, 3 Aug 2016 13:18:36 +1000 Subject: [PATCH 0509/1238] website/docs: Adding a list of resources that are available to import now --- .../source/docs/import/importability.html.md | 130 ++++++++++++++++++ 1 file changed, 130 insertions(+) diff --git a/website/source/docs/import/importability.html.md b/website/source/docs/import/importability.html.md index 324e4a7e8..5b2893009 100644 --- a/website/source/docs/import/importability.html.md +++ b/website/source/docs/import/importability.html.md @@ -20,3 +20,133 @@ would be grateful. To make a resource importable, please see the [plugin documentation on writing a resource](/docs/plugins/provider.html). + +# Currently Available to Import + +# AWS + +* aws_api_gateway_account +* aws_api_gateway_key +* aws_autoscaling_group +* aws_cloudfront_distribution +* aws_cloudfront_origin_access_identity +* aws_cloudtrail +* aws_cloudwatch_event_rule +* aws_cloudwatch_log_group +* aws_cloudwatch_metric_alarm +* aws_customer_gateway +* aws_db_instance +* aws_db_option_group +* aws_db_parameter_group +* aws_db_security_group +* aws_db_subnet_group +* aws_dynamodb_table +* aws_ebs_volume +* aws_ecr_repository +* aws_efs_file_system +* aws_efs_mount_target +* aws_eip +* aws_elastic_beanstalk_application +* aws_elastic_beanstalk_environment +* aws_elasticache_parameter_group +* aws_elasticache_subnet_group +* aws_elb +* aws_flow_log +* aws_glacier_vault +* aws_iam_account_password_policy +* aws_iam_group +* aws_iam_saml_provider +* aws_iam_user +* aws_instance +* aws_internet_gateway +* aws_key_pair +* aws_kms_key +* aws_lambda_function +* aws_launch_configuration +* aws_nat_gateway +* aws_network_acl +* aws_network_interface +* aws_placement_group +* aws_rds_cluster +* aws_rds_cluster_instance +* aws_rds_cluster_parameter_group +* aws_redshift_cluster +* aws_redshift_parameter_group +* aws_redshift_security_group +* aws_redshift_subnet_group +* aws_route53_delegation_set +* aws_route53_health_check +* aws_route53_zone +* aws_route_table +* aws_security_group +* aws_ses_receipt_filter +* aws_ses_receipt_rule_set +* aws_simpledb_domain +* aws_sns_topic +* aws_sns_topic_subscription +* aws_sqs_queue +* aws_subnet +* aws_vpc +* aws_vpc_dhcp_options +* aws_vpc_endpoint +* aws_vpc_peering_connection +* aws_vpn_connection +* aws_vpn_gateway + + +# Azure + +* azurerm_availability_set +* azurerm_dns_zone +* azurerm_local_network_gateway +* azurerm_network_security_group +* azurerm_network_security_rule +* azurerm_public_ip +* azurerm_resource_group +* azurerm_sql_firewall_rule +* azurerm_storage_account +* azurerm_virtual_network + +# DigitalOcean + +* digitalocean_domain +* digitalocean_droplet +* digitalocean_floating_ip +* digitalocean_ssh_key +* digitalocean_tag +* digitalocean_volume + +# Fastly + +* fastly_service_v1 + +# OpenStack + +* openstack_blockstorage_volume_v1 +* openstack_blockstorage_volume_v2 +* openstack_compute_floatingip_v2 +* openstack_compute_keypair_v2 +* openstack_compute_secgroup_v2 +* openstack_compute_servergroup_v2 +* openstack_fw_firewall_v1 +* openstack_fw_policy_v1 +* openstack_fw_rule_v1 +* openstack_lb_member_v1 +* openstack_lb_monitor_v1 +* openstack_lb_pool_v1 +* openstack_lb_vip_v1 +* openstack_networking_floatingip_v2 +* openstack_networking_network_v2 +* openstack_networking_port_v2 +* openstack_networking_secgroup_rule_v2 +* openstack_networking_secgroup_v2 +* openstack_networking_subnet_v2 + + +# Triton + +* triton_firewall_rule +* triton_key +* triton_machine +* triton_vlan + From 9a4c0c24555f1a5dd1127c3b833bc43aeac16fce Mon Sep 17 00:00:00 2001 From: stack72 Date: Wed, 3 Aug 2016 13:24:54 +1000 Subject: [PATCH 0510/1238] docs/aws: Clarifying that needs the ARN of the IAM Role not the ID --- website/source/docs/providers/aws/r/ecs_service.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/providers/aws/r/ecs_service.html.markdown b/website/source/docs/providers/aws/r/ecs_service.html.markdown index 33c329046..4fd8d6646 100644 --- a/website/source/docs/providers/aws/r/ecs_service.html.markdown +++ b/website/source/docs/providers/aws/r/ecs_service.html.markdown @@ -41,7 +41,7 @@ The following arguments are supported: * `task_definition` - (Required) The family and revision (`family:revision`) or full ARN of the task definition that you want to run in your service. * `desired_count` - (Required) The number of instances of the task definition to place and keep running * `cluster` - (Optional) ARN of an ECS cluster -* `iam_role` - (Optional) IAM role that allows your Amazon ECS container agent to make calls to your load balancer on your behalf. This parameter is only required if you are using a load balancer with your service. +* `iam_role` - (Optional) The ARN of IAM role that allows your Amazon ECS container agent to make calls to your load balancer on your behalf. This parameter is only required if you are using a load balancer with your service. * `deployment_maximum_percent` - (Optional) The upper limit (as a percentage of the service's desiredCount) of the number of running tasks that can be running in a service during a deployment. * `deployment_minimum_healthy_percent` - (Optional) The lower limit (as a percentage of the service's desiredCount) of the number of running tasks that must remain running and healthy in a service during a deployment. * `load_balancer` - (Optional) A load balancer block. Load balancers documented below. From f335c5fa9173afd5355cddd1dd4f6a3f8998a5a8 Mon Sep 17 00:00:00 2001 From: Jason Myers Date: Wed, 3 Aug 2016 07:51:41 -0500 Subject: [PATCH 0511/1238] Update ecs_task_definition.html.markdown Add a note about the recently added task_role_arn argument. --- .../docs/providers/aws/r/ecs_task_definition.html.markdown | 1 + 1 file changed, 1 insertion(+) diff --git a/website/source/docs/providers/aws/r/ecs_task_definition.html.markdown b/website/source/docs/providers/aws/r/ecs_task_definition.html.markdown index 31cd1f1ae..4d4109464 100644 --- a/website/source/docs/providers/aws/r/ecs_task_definition.html.markdown +++ b/website/source/docs/providers/aws/r/ecs_task_definition.html.markdown @@ -52,6 +52,7 @@ The following arguments are supported: * `family` - (Required) The family, unique name for your task definition. * `container_definitions` - (Required) A list of container definitions in JSON format. See [AWS docs](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/create-task-definition.html) for syntax. Note, you only need the containerDefinitions array, not the parent hash including the family and volumes keys. +* `task_role_arn` - (Optional) The ARN of IAM role that allows your Amazon ECS container task to make calls to other AWS services. * `volume` - (Optional) A volume block. Volumes documented below. Volumes support the following: From 8e6dd97ebafc8272c91d1630a82a9e2636b1d993 Mon Sep 17 00:00:00 2001 From: Dan Webb Date: Wed, 3 Aug 2016 14:07:57 +0100 Subject: [PATCH 0512/1238] Fix terraform_remote_state documentation Fixing minor typo to match documentation on https://www.terraform.io/docs/providers/terraform/d/remote_state.html --- website/source/docs/providers/terraform/index.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/providers/terraform/index.html.markdown b/website/source/docs/providers/terraform/index.html.markdown index aba805349..815c13622 100644 --- a/website/source/docs/providers/terraform/index.html.markdown +++ b/website/source/docs/providers/terraform/index.html.markdown @@ -20,7 +20,7 @@ Use the navigation to the left to read about the available data sources. data "terraform_remote_state" "vpc" { backend = "atlas" config { - path = "hashicorp/vpc-prod" + name = "hashicorp/vpc-prod" } } From 581f23dfa0ba8596c55ca60857d5731e08fb7c37 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Wed, 3 Aug 2016 07:15:26 -0700 Subject: [PATCH 0513/1238] docs: Tidy up importabable resources list --- website/source/docs/import/importability.html.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/website/source/docs/import/importability.html.md b/website/source/docs/import/importability.html.md index 5b2893009..f0a297370 100644 --- a/website/source/docs/import/importability.html.md +++ b/website/source/docs/import/importability.html.md @@ -21,9 +21,9 @@ would be grateful. To make a resource importable, please see the [plugin documentation on writing a resource](/docs/plugins/provider.html). -# Currently Available to Import +## Currently Available to Import -# AWS +### AWS * aws_api_gateway_account * aws_api_gateway_key @@ -94,7 +94,7 @@ To make a resource importable, please see the * aws_vpn_gateway -# Azure +### Azure (Resource Manager) * azurerm_availability_set * azurerm_dns_zone @@ -107,7 +107,7 @@ To make a resource importable, please see the * azurerm_storage_account * azurerm_virtual_network -# DigitalOcean +### DigitalOcean * digitalocean_domain * digitalocean_droplet @@ -116,11 +116,11 @@ To make a resource importable, please see the * digitalocean_tag * digitalocean_volume -# Fastly +### Fastly * fastly_service_v1 -# OpenStack +### OpenStack * openstack_blockstorage_volume_v1 * openstack_blockstorage_volume_v2 @@ -143,7 +143,7 @@ To make a resource importable, please see the * openstack_networking_subnet_v2 -# Triton +### Triton * triton_firewall_rule * triton_key From cb5062d237e97029b9c11bd6c69014a55d2485d1 Mon Sep 17 00:00:00 2001 From: grayaii Date: Wed, 3 Aug 2016 16:39:28 +0200 Subject: [PATCH 0514/1238] Increase timeout yet again for aws elasticsearch resource We create hundreds of AWS Elasticsearch resources over the last few months and we get occasional timeout failures from AWS. This will PR is to increase the timeout once again. I did it before: https://github.com/hashicorp/terraform/pull/5910/files But we've seen enough timeouts from AWS on this resource that increasing the timeout seems like the only solution. --- builtin/providers/aws/resource_aws_elasticsearch_domain.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/builtin/providers/aws/resource_aws_elasticsearch_domain.go b/builtin/providers/aws/resource_aws_elasticsearch_domain.go index f2e7d8c7d..35bffc89a 100644 --- a/builtin/providers/aws/resource_aws_elasticsearch_domain.go +++ b/builtin/providers/aws/resource_aws_elasticsearch_domain.go @@ -207,7 +207,7 @@ func resourceAwsElasticSearchDomainCreate(d *schema.ResourceData, meta interface d.SetId(*out.DomainStatus.ARN) log.Printf("[DEBUG] Waiting for ElasticSearch domain %q to be created", d.Id()) - err = resource.Retry(30*time.Minute, func() *resource.RetryError { + err = resource.Retry(60*time.Minute, func() *resource.RetryError { out, err := conn.DescribeElasticsearchDomain(&elasticsearch.DescribeElasticsearchDomainInput{ DomainName: aws.String(d.Get("domain_name").(string)), }) @@ -403,7 +403,7 @@ func resourceAwsElasticSearchDomainDelete(d *schema.ResourceData, meta interface } log.Printf("[DEBUG] Waiting for ElasticSearch domain %q to be deleted", d.Get("domain_name").(string)) - err = resource.Retry(30*time.Minute, func() *resource.RetryError { + err = resource.Retry(60*time.Minute, func() *resource.RetryError { out, err := conn.DescribeElasticsearchDomain(&elasticsearch.DescribeElasticsearchDomainInput{ DomainName: aws.String(d.Get("domain_name").(string)), }) From c584287cf9f6078d4723e69bfd3f475cf3758d44 Mon Sep 17 00:00:00 2001 From: Kyle West Date: Wed, 3 Aug 2016 10:54:25 -0400 Subject: [PATCH 0515/1238] fix awkward wording of atlas token generation Too many "to"s (and other prepositions) made this hard to skim. --- website/source/intro/getting-started/remote.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/intro/getting-started/remote.html.markdown b/website/source/intro/getting-started/remote.html.markdown index 95ab71fb1..125eb00dc 100644 --- a/website/source/intro/getting-started/remote.html.markdown +++ b/website/source/intro/getting-started/remote.html.markdown @@ -31,7 +31,7 @@ or you can follow the outlined steps below. First, If you don't have an Atlas account, you can [create an account here](https://atlas.hashicorp.com/account/new?utm_source=oss&utm_medium=getting-started&utm_campaign=terraform). -In order for the Terraform CLI to gain access to your Atlas account you're going to need to generate an access key. From the main menu, select your username in the left side navigation menu to access your profile. Under `Personal`, click on the `Tokens` tab and hit generate. +The Terraform CLI uses your `Atlas Token` to securely communicate with your Atlas account. To generate a token: from the main menu, select your username in the left side navigation menu to access your profile. Under `Personal`, click on the `Tokens` tab and hit `Generate`. For the purposes of this tutorial you can use this token by exporting it to your local shell session: From 8d9a9ddebe1093c56f84ac35acf0713b4fcdcd21 Mon Sep 17 00:00:00 2001 From: Cecchi MacNaughton Date: Wed, 3 Aug 2016 10:48:00 -0700 Subject: [PATCH 0516/1238] Correct typo in `length()` documentation (#7947) --- website/source/docs/configuration/interpolation.html.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/website/source/docs/configuration/interpolation.html.md b/website/source/docs/configuration/interpolation.html.md index f7cd5108e..22b8c04d1 100644 --- a/website/source/docs/configuration/interpolation.html.md +++ b/website/source/docs/configuration/interpolation.html.md @@ -165,8 +165,7 @@ The supported built-in functions are: * `keys(map)` - Returns a lexically sorted list of the map keys. - * `length(list)` - Returns a number of members in a given list, map, or string. - or a number of characters in a given string. + * `length(list)` - Returns a number of members in a given list or map, or a number of characters in a given string. * `${length(split(",", "a,b,c"))}` = 3 * `${length("a,b,c")}` = 5 * `${length(map("key", "val"))}` = 1 From ec7ff802b697ee147e4f29c4f4aea2ca8266b5e9 Mon Sep 17 00:00:00 2001 From: Krzysztof Wilczynski Date: Thu, 4 Aug 2016 03:45:14 +0900 Subject: [PATCH 0517/1238] Fix link to the remote state link post 0.7.x. (#7946) * Fix link to the remote state link post 0.7.x. Signed-off-by: Krzysztof Wilczynski * Correct "resource" to "data source". Signed-off-by: Krzysztof Wilczynski --- website/source/docs/state/remote/index.html.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/state/remote/index.html.md b/website/source/docs/state/remote/index.html.md index 9a1c627d7..519900246 100644 --- a/website/source/docs/state/remote/index.html.md +++ b/website/source/docs/state/remote/index.html.md @@ -47,7 +47,7 @@ teams to run their own infrastructure. As a more specific example with AWS: you can expose things such as VPC IDs, subnets, NAT instance IDs, etc. through remote state and have other Terraform states consume that. -For example usage see the [terraform_remote_state](/docs/providers/terraform/r/remote_state.html) resource. +For example usage see the [terraform_remote_state](/docs/providers/terraform/d/remote_state.html) data source. ## Locking and Teamwork From 96d88f171453119eeb9a133e58064358e96f0db7 Mon Sep 17 00:00:00 2001 From: Rasty Turek Date: Wed, 3 Aug 2016 13:44:52 -0700 Subject: [PATCH 0518/1238] typo --- .../docs/providers/google/r/compute_instance.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/providers/google/r/compute_instance.html.markdown b/website/source/docs/providers/google/r/compute_instance.html.markdown index 960ce1db0..baa14c98c 100644 --- a/website/source/docs/providers/google/r/compute_instance.html.markdown +++ b/website/source/docs/providers/google/r/compute_instance.html.markdown @@ -141,7 +141,7 @@ The `network_interface` block supports: * `access_config` - (Optional) Access configurations, i.e. IPs via which this instance can be accessed via the Internet. Omit to ensure that the instance is not accessible from the Internet (this means that ssh provisioners will - not work unless you are running Terraform can send traffic tothe instance's + not work unless you are running Terraform can send traffic to the instance's network (e.g. via tunnel or because it is running on another cloud instance on that network). This block can be repeated multiple times. Structure documented below. From a0fc4276ba65e6513becaa168f925b226025daf8 Mon Sep 17 00:00:00 2001 From: Sam Stavinoha Date: Thu, 4 Aug 2016 17:47:12 +1100 Subject: [PATCH 0519/1238] remove duplicate 'recipients' argument (#7968) --- .../source/docs/providers/aws/r/ses_receipt_rule.html.markdown | 1 - 1 file changed, 1 deletion(-) diff --git a/website/source/docs/providers/aws/r/ses_receipt_rule.html.markdown b/website/source/docs/providers/aws/r/ses_receipt_rule.html.markdown index 5362d3737..180cc560d 100644 --- a/website/source/docs/providers/aws/r/ses_receipt_rule.html.markdown +++ b/website/source/docs/providers/aws/r/ses_receipt_rule.html.markdown @@ -41,7 +41,6 @@ The following arguments are supported: * `after` - (Optional) The name of the rule to place this rule after * `enabled` - (Optional) If true, the rule will be enabled * `recipients` - (Optional) A list of email addresses -* `recipients` - (Optional) A list of email addresses * `scan_enabled` - (Optional) If true, incoming emails will be scanned for spam and viruses * `tls_policy` - (Optional) Require or Optional * `add_header_action` - (Optional) A list of Add Header Action blocks. Documented below. From 98d84407117162a8bc3f4d9d15252498854ee52f Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Thu, 4 Aug 2016 09:40:45 +0100 Subject: [PATCH 0520/1238] docs/aws: Fix ordering of data sources (#7970) --- website/source/layouts/aws.erb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb index c4765aafe..00c9f140f 100644 --- a/website/source/layouts/aws.erb +++ b/website/source/layouts/aws.erb @@ -16,12 +16,12 @@ > aws_ami - > - aws_ecs_container_definition - > aws_availability_zones + > + aws_ecs_container_definition + > aws_iam_policy_document From bf83b435e11be2d48d62595391849fd01121cee4 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Thu, 4 Aug 2016 11:16:35 -0400 Subject: [PATCH 0521/1238] numeric variables aren't always interpreted as str If we have a number value in our config variables, format it as a string, and send it with the HCL=true flag just in case. Also use %g for for float encoding, as the output is a generally a little friendlier. --- command/hcl_printer.go | 2 +- command/push.go | 26 ++++++++------------------ 2 files changed, 9 insertions(+), 19 deletions(-) diff --git a/command/hcl_printer.go b/command/hcl_printer.go index 1537fff14..dbf1797f5 100644 --- a/command/hcl_printer.go +++ b/command/hcl_printer.go @@ -107,7 +107,7 @@ func (e *encodeState) encodeInt(i interface{}) error { } func (e *encodeState) encodeFloat(f interface{}) error { - _, err := fmt.Fprintf(e, "%f", f) + _, err := fmt.Fprintf(e, "%g", f) return err } diff --git a/command/push.go b/command/push.go index d7845ddb4..0a6290594 100644 --- a/command/push.go +++ b/command/push.go @@ -327,25 +327,15 @@ RANGE: case string: tfv.Value = v - case []interface{}: - hcl, err = encodeHCL(v) - if err != nil { - break RANGE - } - - tfv.Value = string(hcl) - tfv.IsHCL = true - - case map[string]interface{}: - hcl, err = encodeHCL(v) - if err != nil { - break RANGE - } - - tfv.Value = string(hcl) - tfv.IsHCL = true default: - err = fmt.Errorf("unknown type %T for variable %s", v, k) + // everything that's not a string is now HCL encoded + hcl, err = encodeHCL(v) + if err != nil { + break RANGE + } + + tfv.Value = string(hcl) + tfv.IsHCL = true } tfVars = append(tfVars, tfv) From bb84dc75b764c295bfd481eba62956027e7ed10e Mon Sep 17 00:00:00 2001 From: James Bardin Date: Thu, 4 Aug 2016 11:20:11 -0400 Subject: [PATCH 0522/1238] Fix improper wait group usage in test --- builtin/providers/template/datasource_template_file_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/builtin/providers/template/datasource_template_file_test.go b/builtin/providers/template/datasource_template_file_test.go index 7b13f69e6..43dda582c 100644 --- a/builtin/providers/template/datasource_template_file_test.go +++ b/builtin/providers/template/datasource_template_file_test.go @@ -122,8 +122,8 @@ func TestValidateVarsAttribute(t *testing.T) { func TestTemplateSharedMemoryRace(t *testing.T) { var wg sync.WaitGroup for i := 0; i < 100; i++ { - go func(wg *sync.WaitGroup, t *testing.T, i int) { - wg.Add(1) + wg.Add(1) + go func(t *testing.T, i int) { out, err := execute("don't panic!", map[string]interface{}{}) if err != nil { t.Fatalf("err: %s", err) @@ -132,7 +132,7 @@ func TestTemplateSharedMemoryRace(t *testing.T) { t.Fatalf("bad output: %s", out) } wg.Done() - }(&wg, t, i) + }(t, i) } wg.Wait() } From 403d97183ef3fe1a8b50cc393c28981b3485b7dc Mon Sep 17 00:00:00 2001 From: James Bardin Date: Thu, 4 Aug 2016 11:23:59 -0400 Subject: [PATCH 0523/1238] update github.com/mitchellh/copystructure Patched a panic where copystructure tries to set an unexported struct field. --- .../mitchellh/copystructure/copystructure.go | 12 +++++------- vendor/vendor.json | 4 +++- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/vendor/github.com/mitchellh/copystructure/copystructure.go b/vendor/github.com/mitchellh/copystructure/copystructure.go index 7ef83d8aa..98c5144e2 100644 --- a/vendor/github.com/mitchellh/copystructure/copystructure.go +++ b/vendor/github.com/mitchellh/copystructure/copystructure.go @@ -95,7 +95,9 @@ func (w *walker) Exit(l reflectwalk.Location) error { if v.IsValid() { s := w.cs[len(w.cs)-1] sf := reflect.Indirect(s).FieldByName(f.Name) - sf.Set(v) + if sf.CanSet() { + sf.Set(v) + } } case reflectwalk.WalkLoc: // Clear out the slices for GC @@ -111,16 +113,12 @@ func (w *walker) Map(m reflect.Value) error { return nil } - // Get the type for the map - t := m.Type() - mapType := reflect.MapOf(t.Key(), t.Elem()) - // Create the map. If the map itself is nil, then just make a nil map var newMap reflect.Value if m.IsNil() { - newMap = reflect.Indirect(reflect.New(mapType)) + newMap = reflect.Indirect(reflect.New(m.Type())) } else { - newMap = reflect.MakeMap(reflect.MapOf(t.Key(), t.Elem())) + newMap = reflect.MakeMap(m.Type()) } w.cs = append(w.cs, newMap) diff --git a/vendor/vendor.json b/vendor/vendor.json index 9931fb9d8..ee31a419f 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -1410,8 +1410,10 @@ "revision": "8631ce90f28644f54aeedcb3e389a85174e067d1" }, { + "checksumSHA1": "86nE93o1VIND0Doe8PuhCXnhUx0=", "path": "github.com/mitchellh/copystructure", - "revision": "80adcec1955ee4e97af357c30dee61aadcc02c10" + "revision": "cdac8253d00f2ecf0a0b19fbff173a9a72de4f82", + "revisionTime": "2016-08-04T03:23:30Z" }, { "path": "github.com/mitchellh/go-homedir", From 13ea0a01c61416795b0497cee2b9dfba30438e6a Mon Sep 17 00:00:00 2001 From: Joern Barthel Date: Thu, 4 Aug 2016 19:19:43 +0200 Subject: [PATCH 0524/1238] Added IP ranges from Fastly --- .../providers/fastly/data_source_ip_ranges.go | 68 +++++++++++++++++++ builtin/providers/fastly/provider.go | 3 + 2 files changed, 71 insertions(+) create mode 100644 builtin/providers/fastly/data_source_ip_ranges.go diff --git a/builtin/providers/fastly/data_source_ip_ranges.go b/builtin/providers/fastly/data_source_ip_ranges.go new file mode 100644 index 000000000..bc01cd232 --- /dev/null +++ b/builtin/providers/fastly/data_source_ip_ranges.go @@ -0,0 +1,68 @@ +package fastly + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "log" + "sort" + "time" + + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/terraform/helper/schema" +) + +type dataSourceFastlyIPRangesResult struct { + Addresses []string +} + +func dataSourceFastlyIPRanges() *schema.Resource { + return &schema.Resource{ + Read: dataSourceFastlyIPRangesRead, + + Schema: map[string]*schema.Schema{ + "blocks": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceFastlyIPRangesRead(d *schema.ResourceData, meta interface{}) error { + + conn := cleanhttp.DefaultClient() + + log.Printf("[DEBUG] Reading IP ranges") + d.SetId(time.Now().UTC().String()) + + res, err := conn.Get("https://api.fastly.com/public-ip-list") + + if err != nil { + return fmt.Errorf("Error listing IP ranges: %s", err) + } + + defer res.Body.Close() + + data, err := ioutil.ReadAll(res.Body) + + if err != nil { + return fmt.Errorf("Error reading response body: %s", err) + } + + result := new(dataSourceFastlyIPRangesResult) + + if err := json.Unmarshal(data, result); err != nil { + return fmt.Errorf("Error parsing result: %s", err) + } + + sort.Strings(result.Addresses) + + if err := d.Set("blocks", result.Addresses); err != nil { + return fmt.Errorf("Error setting ip ranges: %s", err) + } + + return nil + +} diff --git a/builtin/providers/fastly/provider.go b/builtin/providers/fastly/provider.go index f68c6705b..eee4be8e8 100644 --- a/builtin/providers/fastly/provider.go +++ b/builtin/providers/fastly/provider.go @@ -18,6 +18,9 @@ func Provider() terraform.ResourceProvider { Description: "Fastly API Key from https://app.fastly.com/#account", }, }, + DataSourcesMap: map[string]*schema.Resource{ + "fastly_ip_ranges": dataSourceFastlyIPRanges(), + }, ResourcesMap: map[string]*schema.Resource{ "fastly_service_v1": resourceServiceV1(), }, From 8accef2c27e14d90658730b82d52cfe13875e27b Mon Sep 17 00:00:00 2001 From: Joern Barthel Date: Thu, 4 Aug 2016 19:20:14 +0200 Subject: [PATCH 0525/1238] Added IP ranges from AWS --- .../aws/data_source_aws_ip_ranges.go | 128 ++++++++++++++++++ builtin/providers/aws/provider.go | 1 + 2 files changed, 129 insertions(+) create mode 100644 builtin/providers/aws/data_source_aws_ip_ranges.go diff --git a/builtin/providers/aws/data_source_aws_ip_ranges.go b/builtin/providers/aws/data_source_aws_ip_ranges.go new file mode 100644 index 000000000..c530d981d --- /dev/null +++ b/builtin/providers/aws/data_source_aws_ip_ranges.go @@ -0,0 +1,128 @@ +package aws + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "log" + "sort" + "strings" + "time" + + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/terraform/helper/schema" +) + +type dataSourceAwsIPRangesResult struct { + CreateDate string + Prefixes []dataSourceAwsIPRangesPrefix + SyncToken string +} + +type dataSourceAwsIPRangesPrefix struct { + IpPrefix string `json:"ip_prefix"` + Region string + Service string +} + +func dataSourceAwsIPRanges() *schema.Resource { + return &schema.Resource{ + Read: dataSourceAwsIPRangesRead, + + Schema: map[string]*schema.Schema{ + "blocks": &schema.Schema{ + Type: schema.TypeList, + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "create_date": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "regions": &schema.Schema{ + Type: schema.TypeSet, + Elem: &schema.Schema{Type: schema.TypeString}, + Optional: true, + }, + "services": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "sync_token": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func dataSourceAwsIPRangesRead(d *schema.ResourceData, meta interface{}) error { + + conn := cleanhttp.DefaultClient() + + log.Printf("[DEBUG] Reading IP ranges") + d.SetId(time.Now().UTC().String()) + + res, err := conn.Get("https://ip-ranges.amazonaws.com/ip-ranges.json") + + if err != nil { + return fmt.Errorf("Error listing IP ranges: %s", err) + } + + defer res.Body.Close() + + data, err := ioutil.ReadAll(res.Body) + + if err != nil { + return fmt.Errorf("Error reading response body: %s", err) + } + + result := new(dataSourceAwsIPRangesResult) + + if err := json.Unmarshal(data, result); err != nil { + return fmt.Errorf("Error parsing result: %s", err) + } + + if err := d.Set("create_date", result.CreateDate); err != nil { + return fmt.Errorf("Error setting create date: %s", err) + } + + if err := d.Set("sync_token", result.SyncToken); err != nil { + return fmt.Errorf("Error setting sync token: %s", err) + } + + var ( + regions = d.Get("regions").(*schema.Set) + services = d.Get("services").(*schema.Set) + noRegionFilter = regions.Len() == 0 + noServiceFilter = services.Len() == 0 + prefixes []string + ) + + for _, e := range result.Prefixes { + + var ( + matchRegion = noRegionFilter || regions.Contains(strings.ToLower(e.Region)) + matchService = noServiceFilter || services.Contains(strings.ToLower(e.Service)) + ) + + if matchRegion && matchService { + prefixes = append(prefixes, e.IpPrefix) + } + + } + + if len(prefixes) == 0 { + log.Printf("[WARN] No ip ranges result from filters") + } + + sort.Strings(prefixes) + + if err := d.Set("blocks", prefixes); err != nil { + return fmt.Errorf("Error setting ip ranges: %s", err) + } + + return nil + +} diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index 69e264dd9..e15931225 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -114,6 +114,7 @@ func Provider() terraform.ResourceProvider { "aws_ami": dataSourceAwsAmi(), "aws_availability_zones": dataSourceAwsAvailabilityZones(), "aws_iam_policy_document": dataSourceAwsIamPolicyDocument(), + "aws_ip_ranges": dataSourceAwsIPRanges(), "aws_s3_bucket_object": dataSourceAwsS3BucketObject(), "aws_ecs_container_definition": dataSourceAwsEcsContainerDefinition(), }, From 582e3bd883d737cbbf13e98565c71195c18217fe Mon Sep 17 00:00:00 2001 From: Raphael Randschau Date: Thu, 4 Aug 2016 21:26:43 +0200 Subject: [PATCH 0526/1238] provider/aws: guard against missing digestSha 7956 (#7966) --- .../aws/data_source_aws_ecs_container_definition.go | 5 ++++- .../aws/data_source_aws_ecs_container_definition_test.go | 1 + 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/builtin/providers/aws/data_source_aws_ecs_container_definition.go b/builtin/providers/aws/data_source_aws_ecs_container_definition.go index ecc1b20b7..91750abe8 100644 --- a/builtin/providers/aws/data_source_aws_ecs_container_definition.go +++ b/builtin/providers/aws/data_source_aws_ecs_container_definition.go @@ -77,7 +77,10 @@ func dataSourceAwsEcsContainerDefinitionRead(d *schema.ResourceData, meta interf d.SetId(fmt.Sprintf("%s/%s", aws.StringValue(taskDefinition.TaskDefinitionArn), d.Get("container_name").(string))) d.Set("image", aws.StringValue(def.Image)) - d.Set("image_digest", strings.Split(aws.StringValue(def.Image), ":")[1]) + image := aws.StringValue(def.Image) + if strings.Contains(image, ":") { + d.Set("image_digest", strings.Split(image, ":")[1]) + } d.Set("cpu", aws.Int64Value(def.Cpu)) d.Set("memory", aws.Int64Value(def.Memory)) d.Set("disable_networking", aws.BoolValue(def.DisableNetworking)) diff --git a/builtin/providers/aws/data_source_aws_ecs_container_definition_test.go b/builtin/providers/aws/data_source_aws_ecs_container_definition_test.go index 461808550..85f3d1fb7 100644 --- a/builtin/providers/aws/data_source_aws_ecs_container_definition_test.go +++ b/builtin/providers/aws/data_source_aws_ecs_container_definition_test.go @@ -15,6 +15,7 @@ func TestAccAWSEcsDataSource_ecsContainerDefinition(t *testing.T) { Config: testAccCheckAwsEcsContainerDefinitionDataSourceConfig, Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr("data.aws_ecs_container_definition.mongo", "image", "mongo:latest"), + resource.TestCheckResourceAttr("data.aws_ecs_container_definition.mongo", "image_digest", "latest"), resource.TestCheckResourceAttr("data.aws_ecs_container_definition.mongo", "memory", "128"), resource.TestCheckResourceAttr("data.aws_ecs_container_definition.mongo", "cpu", "128"), resource.TestCheckResourceAttr("data.aws_ecs_container_definition.mongo", "environment.SECRET", "KEY"), From f140724ee6bd74ac90aa7f4236afa67b2714e271 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Fri, 5 Aug 2016 05:27:53 +1000 Subject: [PATCH 0527/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2549a3b5d..4b28e20d9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ FEATURES: IMPROVEMENTS: BUG FIXES: + * provider/aws: guard against missing image_digest in `aws_ecs_task_definition` [GH-7966] ## 0.7.0 (August 2, 2016) From ed771058220f3895ec1615801a5528f296e1979e Mon Sep 17 00:00:00 2001 From: Dan Allegood Date: Thu, 4 Aug 2016 12:29:02 -0700 Subject: [PATCH 0528/1238] Improved SCSI controller handling (#7908) Govmomi tries to use the 7th slot in a scsi controller, which is not allowed. This patch will appropriately select the slot to attach a disk to as well as determine if a scsi controller is full. --- .../resource_vsphere_virtual_machine.go | 56 +++++++++++++++ .../resource_vsphere_virtual_machine_test.go | 70 +++++++++++++++++++ 2 files changed, 126 insertions(+) diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go index 444e10877..d8e907212 100644 --- a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go +++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go @@ -1198,6 +1198,12 @@ func addHardDisk(vm *object.VirtualMachine, size, iops int64, diskType string, d } if err != nil || controller == nil { + // Check if max number of scsi controller are already used + diskControllers := getSCSIControllers(devices) + if len(diskControllers) >= 4 { + return fmt.Errorf("[ERROR] Maximum number of SCSI controllers created") + } + log.Printf("[DEBUG] Couldn't find a %v controller. Creating one..", controller_type) var c types.BaseVirtualDevice @@ -1268,6 +1274,14 @@ func addHardDisk(vm *object.VirtualMachine, size, iops int64, diskType string, d log.Printf("[DEBUG] addHardDisk - diskPath: %v", diskPath) disk := devices.CreateDisk(controller, datastore.Reference(), diskPath) + if strings.Contains(controller_type, "scsi") { + unitNumber, err := getNextUnitNumber(devices, controller) + if err != nil { + return err + } + *disk.UnitNumber = unitNumber + } + existing := devices.SelectByBackingInfo(disk.Backing) log.Printf("[DEBUG] disk: %#v\n", disk) @@ -1300,6 +1314,44 @@ func addHardDisk(vm *object.VirtualMachine, size, iops int64, diskType string, d } } +func getSCSIControllers(vmDevices object.VirtualDeviceList) []*types.VirtualController { + // get virtual scsi controllers of all supported types + var scsiControllers []*types.VirtualController + for _, device := range vmDevices { + devType := vmDevices.Type(device) + switch devType { + case "scsi", "lsilogic", "buslogic", "pvscsi", "lsilogic-sas": + if c, ok := device.(types.BaseVirtualController); ok { + scsiControllers = append(scsiControllers, c.GetVirtualController()) + } + } + } + return scsiControllers +} + +func getNextUnitNumber(devices object.VirtualDeviceList, c types.BaseVirtualController) (int32, error) { + key := c.GetVirtualController().Key + + var unitNumbers [16]bool + unitNumbers[7] = true + + for _, device := range devices { + d := device.GetVirtualDevice() + + if d.ControllerKey == key { + if d.UnitNumber != nil { + unitNumbers[*d.UnitNumber] = true + } + } + } + for i, taken := range unitNumbers { + if !taken { + return int32(i), nil + } + } + return -1, fmt.Errorf("[ERROR] getNextUnitNumber - controller is full") +} + // addCdrom adds a new virtual cdrom drive to the VirtualMachine and attaches an image (ISO) to it from a datastore path. func addCdrom(vm *object.VirtualMachine, datastore, path string) error { devices, err := vm.Device(context.TODO()) @@ -1902,6 +1954,10 @@ func (vm *virtualMachine) setupVirtualMachine(c *govmomi.Client) error { err = addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, vm.hardDisks[i].initType, datastore, diskPath, vm.hardDisks[i].controller) if err != nil { + err2 := addHardDisk(newVM, vm.hardDisks[i].size, vm.hardDisks[i].iops, vm.hardDisks[i].initType, datastore, diskPath, vm.hardDisks[i].controller) + if err2 != nil { + return err2 + } return err } } diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go index 56f2db226..5a1dafe24 100644 --- a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go +++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go @@ -328,6 +328,76 @@ func TestAccVSphereVirtualMachine_client_debug(t *testing.T) { }) } +const testAccCheckVSphereVirtualMachineConfig_diskSCSICapacity = ` +resource "vsphere_virtual_machine" "scsiCapacity" { + name = "terraform-test" +` + testAccTemplateBasicBody + ` + disk { + size = 1 + controller_type = "scsi-paravirtual" + name = "one" + } + disk { + size = 1 + controller_type = "scsi-paravirtual" + name = "two" + } + disk { + size = 1 + controller_type = "scsi-paravirtual" + name = "three" + } + disk { + size = 1 + controller_type = "scsi-paravirtual" + name = "four" + } + disk { + size = 1 + controller_type = "scsi-paravirtual" + name = "five" + } + disk { + size = 1 + controller_type = "scsi-paravirtual" + name = "six" + } + disk { + size = 1 + controller_type = "scsi-paravirtual" + name = "seven" + } +} +` + +func TestAccVSphereVirtualMachine_diskSCSICapacity(t *testing.T) { + var vm virtualMachine + basic_vars := setupTemplateBasicBodyVars() + config := basic_vars.testSprintfTemplateBody(testAccCheckVSphereVirtualMachineConfig_diskSCSICapacity) + + vmName := "vsphere_virtual_machine.scsiCapacity" + + test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label := + TestFuncData{vm: vm, label: basic_vars.label, vmName: vmName, numDisks: "8"}.testCheckFuncBasic() + + log.Printf("[DEBUG] template= %s", testAccCheckVSphereVirtualMachineConfig_diskSCSICapacity) + log.Printf("[DEBUG] template config= %s", config) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckVSphereVirtualMachineDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: config, + Check: resource.ComposeTestCheckFunc( + test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label, + ), + }, + }, + }) +} + const testAccCheckVSphereVirtualMachineConfig_initType = ` resource "vsphere_virtual_machine" "thin" { name = "terraform-test" From 2b743915fd91050c6412f6c6e602ece46a51571d Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Fri, 5 Aug 2016 05:29:57 +1000 Subject: [PATCH 0529/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4b28e20d9..3e68f910e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,7 @@ FEATURES: IMPROVEMENTS: + * provider/vsphere: Improved SCSI controller handling in `vsphere_virtual_machine` [GH-7908] BUG FIXES: * provider/aws: guard against missing image_digest in `aws_ecs_task_definition` [GH-7966] From 895383ac92036373f151f9ffb022c9aa50035edd Mon Sep 17 00:00:00 2001 From: Davide Agnello Date: Wed, 3 Aug 2016 11:53:59 -0700 Subject: [PATCH 0530/1238] vSphere file resource: extending functionality to copy files in vSphere * Enables copy of files within vSphere * Can copy files between different datacenters and datastores * Update can move uploaded or copied files between datacenters and datastores * Preserves original functionality for backward compatibility --- .../vsphere/resource_vsphere_file.go | 167 ++++++++++++++---- .../vsphere/resource_vsphere_file_test.go | 153 +++++++++++++++- .../providers/vsphere/r/file.html.markdown | 37 +++- 3 files changed, 308 insertions(+), 49 deletions(-) diff --git a/builtin/providers/vsphere/resource_vsphere_file.go b/builtin/providers/vsphere/resource_vsphere_file.go index 55d3d6cbb..c8afe05d9 100644 --- a/builtin/providers/vsphere/resource_vsphere_file.go +++ b/builtin/providers/vsphere/resource_vsphere_file.go @@ -3,6 +3,7 @@ package vsphere import ( "fmt" "log" + "strings" "github.com/hashicorp/terraform/helper/schema" "github.com/vmware/govmomi" @@ -13,10 +14,14 @@ import ( ) type file struct { - datacenter string - datastore string - sourceFile string - destinationFile string + sourceDatacenter string + datacenter string + sourceDatastore string + datastore string + sourceFile string + destinationFile string + createDirectories bool + copyFile bool } func resourceVSphereFile() *schema.Resource { @@ -30,10 +35,20 @@ func resourceVSphereFile() *schema.Resource { "datacenter": { Type: schema.TypeString, Optional: true, + }, + + "source_datacenter": { + Type: schema.TypeString, + Optional: true, ForceNew: true, }, "datastore": { + Type: schema.TypeString, + Required: true, + }, + + "source_datastore": { Type: schema.TypeString, Optional: true, ForceNew: true, @@ -49,6 +64,11 @@ func resourceVSphereFile() *schema.Resource { Type: schema.TypeString, Required: true, }, + + "create_directories": { + Type: schema.TypeBool, + Optional: true, + }, }, } } @@ -60,10 +80,20 @@ func resourceVSphereFileCreate(d *schema.ResourceData, meta interface{}) error { f := file{} + if v, ok := d.GetOk("source_datacenter"); ok { + f.sourceDatacenter = v.(string) + f.copyFile = true + } + if v, ok := d.GetOk("datacenter"); ok { f.datacenter = v.(string) } + if v, ok := d.GetOk("source_datastore"); ok { + f.sourceDatastore = v.(string) + f.copyFile = true + } + if v, ok := d.GetOk("datastore"); ok { f.datastore = v.(string) } else { @@ -82,6 +112,10 @@ func resourceVSphereFileCreate(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("destination_file argument is required") } + if v, ok := d.GetOk("create_directories"); ok { + f.createDirectories = v.(bool) + } + err := createFile(client, &f) if err != nil { return err @@ -108,16 +142,53 @@ func createFile(client *govmomi.Client, f *file) error { return fmt.Errorf("error %s", err) } - dsurl, err := ds.URL(context.TODO(), dc, f.destinationFile) - if err != nil { - return err + if f.copyFile { + // Copying file from withing vSphere + source_dc, err := finder.Datacenter(context.TODO(), f.sourceDatacenter) + if err != nil { + return fmt.Errorf("error %s", err) + } + finder = finder.SetDatacenter(dc) + + source_ds, err := getDatastore(finder, f.sourceDatastore) + if err != nil { + return fmt.Errorf("error %s", err) + } + + fm := object.NewFileManager(client.Client) + if f.createDirectories { + directoryPathIndex := strings.LastIndex(f.destinationFile, "/") + path := f.destinationFile[0:directoryPathIndex] + err = fm.MakeDirectory(context.TODO(), ds.Path(path), dc, true) + if err != nil { + return fmt.Errorf("error %s", err) + } + } + task, err := fm.CopyDatastoreFile(context.TODO(), source_ds.Path(f.sourceFile), source_dc, ds.Path(f.destinationFile), dc, true) + + if err != nil { + return fmt.Errorf("error %s", err) + } + + _, err = task.WaitForResult(context.TODO(), nil) + if err != nil { + return fmt.Errorf("error %s", err) + } + + } else { + // Uploading file to vSphere + dsurl, err := ds.URL(context.TODO(), dc, f.destinationFile) + if err != nil { + return fmt.Errorf("error %s", err) + } + + p := soap.DefaultUpload + err = client.Client.UploadFile(f.sourceFile, dsurl, &p) + if err != nil { + return fmt.Errorf("error %s", err) + } } - p := soap.DefaultUpload - err = client.Client.UploadFile(f.sourceFile, dsurl, &p) - if err != nil { - return fmt.Errorf("error %s", err) - } return nil } @@ -126,10 +197,18 @@ func resourceVSphereFileRead(d *schema.ResourceData, meta interface{}) error { log.Printf("[DEBUG] reading file: %#v", d) f := file{} + if v, ok := d.GetOk("source_datacenter"); ok { + f.sourceDatacenter = v.(string) + } + if v, ok := d.GetOk("datacenter"); ok { f.datacenter = v.(string) } + if v, ok := d.GetOk("source_datastore"); ok { + f.sourceDatastore = v.(string) + } + if v, ok := d.GetOk("datastore"); ok { f.datastore = v.(string) } else { @@ -179,57 +258,69 @@ func resourceVSphereFileRead(d *schema.ResourceData, meta interface{}) error { func resourceVSphereFileUpdate(d *schema.ResourceData, meta interface{}) error { log.Printf("[DEBUG] updating file: %#v", d) - if d.HasChange("destination_file") { - oldDestinationFile, newDestinationFile := d.GetChange("destination_file") - f := file{} - if v, ok := d.GetOk("datacenter"); ok { - f.datacenter = v.(string) - } - - if v, ok := d.GetOk("datastore"); ok { - f.datastore = v.(string) + if d.HasChange("destination_file") || d.HasChange("datacenter") || d.HasChange("datastore") { + // File needs to be moved, get old and new destination changes + var oldDataceneter, newDatacenter, oldDatastore, newDatastore, oldDestinationFile, newDestinationFile string + if d.HasChange("datacenter") { + tmpOldDataceneter, tmpNewDatacenter := d.GetChange("datacenter") + oldDataceneter = tmpOldDataceneter.(string) + newDatacenter = tmpNewDatacenter.(string) } else { - return fmt.Errorf("datastore argument is required") + if v, ok := d.GetOk("datacenter"); ok { + oldDataceneter = v.(string) + newDatacenter = oldDataceneter + } } - - if v, ok := d.GetOk("source_file"); ok { - f.sourceFile = v.(string) + if d.HasChange("datastore") { + tmpOldDatastore, tmpNewDatastore := d.GetChange("datastore") + oldDatastore = tmpOldDatastore.(string) + newDatastore = tmpNewDatastore.(string) } else { - return fmt.Errorf("source_file argument is required") + oldDatastore = d.Get("datastore").(string) + newDatastore = oldDatastore } - - if v, ok := d.GetOk("destination_file"); ok { - f.destinationFile = v.(string) + if d.HasChange("destination_file") { + tmpOldDestinationFile, tmpNewDestinationFile := d.GetChange("destination_file") + oldDestinationFile = tmpOldDestinationFile.(string) + newDestinationFile = tmpNewDestinationFile.(string) } else { - return fmt.Errorf("destination_file argument is required") + oldDestinationFile = d.Get("destination_file").(string) + newDestinationFile = oldDestinationFile } + // Get old and new dataceter and datastore client := meta.(*govmomi.Client) - dc, err := getDatacenter(client, f.datacenter) + dcOld, err := getDatacenter(client, oldDataceneter) + if err != nil { + return err + } + dcNew, err := getDatacenter(client, newDatacenter) if err != nil { return err } - finder := find.NewFinder(client.Client, true) - finder = finder.SetDatacenter(dc) - - ds, err := getDatastore(finder, f.datastore) + finder = finder.SetDatacenter(dcOld) + dsOld, err := getDatastore(finder, oldDatastore) + if err != nil { + return fmt.Errorf("error %s", err) + } + finder = finder.SetDatacenter(dcNew) + dsNew, err := getDatastore(finder, newDatastore) if err != nil { return fmt.Errorf("error %s", err) } + // Move file between old/new dataceter, datastore and path (destination_file) fm := object.NewFileManager(client.Client) - task, err := fm.MoveDatastoreFile(context.TODO(), ds.Path(oldDestinationFile.(string)), dc, ds.Path(newDestinationFile.(string)), dc, true) + task, err := fm.MoveDatastoreFile(context.TODO(), dsOld.Path(oldDestinationFile), dcOld, dsNew.Path(newDestinationFile), dcNew, true) if err != nil { return err } - _, err = task.WaitForResult(context.TODO(), nil) if err != nil { return err } - } return nil diff --git a/builtin/providers/vsphere/resource_vsphere_file_test.go b/builtin/providers/vsphere/resource_vsphere_file_test.go index 81520b0cb..7e5aa44e7 100644 --- a/builtin/providers/vsphere/resource_vsphere_file_test.go +++ b/builtin/providers/vsphere/resource_vsphere_file_test.go @@ -14,7 +14,7 @@ import ( "golang.org/x/net/context" ) -// Basic file creation +// Basic file creation (upload to vSphere) func TestAccVSphereFile_basic(t *testing.T) { testVmdkFileData := []byte("# Disk DescriptorFile\n") testVmdkFile := "/tmp/tf_test.vmdk" @@ -55,6 +55,59 @@ func TestAccVSphereFile_basic(t *testing.T) { os.Remove(testVmdkFile) } +// Basic file copy within vSphere +func TestAccVSphereFile_basicUploadAndCopy(t *testing.T) { + testVmdkFileData := []byte("# Disk DescriptorFile\n") + sourceFile := "/tmp/tf_test.vmdk" + uploadResourceName := "myfileupload" + copyResourceName := "myfilecopy" + sourceDatacenter := os.Getenv("VSPHERE_DATACENTER") + datacenter := sourceDatacenter + sourceDatastore := os.Getenv("VSPHERE_DATASTORE") + datastore := sourceDatastore + destinationFile := "tf_file_test.vmdk" + sourceFileCopy := "${vsphere_file." + uploadResourceName + ".destination_file}" + destinationFileCopy := "tf_file_test_copy.vmdk" + + err := ioutil.WriteFile(sourceFile, testVmdkFileData, 0644) + if err != nil { + t.Errorf("error %s", err) + return + } + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckVSphereFileDestroy, + Steps: []resource.TestStep{ + { + Config: fmt.Sprintf( + testAccCheckVSphereFileCopyConfig, + uploadResourceName, + datacenter, + datastore, + sourceFile, + destinationFile, + copyResourceName, + datacenter, + datacenter, + datastore, + datastore, + sourceFileCopy, + destinationFileCopy, + ), + Check: resource.ComposeTestCheckFunc( + testAccCheckVSphereFileExists("vsphere_file."+uploadResourceName, destinationFile, true), + testAccCheckVSphereFileExists("vsphere_file."+copyResourceName, destinationFileCopy, true), + resource.TestCheckResourceAttr("vsphere_file."+uploadResourceName, "destination_file", destinationFile), + resource.TestCheckResourceAttr("vsphere_file."+copyResourceName, "destination_file", destinationFileCopy), + ), + }, + }, + }) + os.Remove(sourceFile) +} + // file creation followed by a rename of file (update) func TestAccVSphereFile_renamePostCreation(t *testing.T) { testVmdkFileData := []byte("# Disk DescriptorFile\n") @@ -67,7 +120,7 @@ func TestAccVSphereFile_renamePostCreation(t *testing.T) { datacenter := os.Getenv("VSPHERE_DATACENTER") datastore := os.Getenv("VSPHERE_DATASTORE") - testMethod := "basic" + testMethod := "create_upgrade" resourceName := "vsphere_file." + testMethod destinationFile := "tf_test_file.vmdk" destinationFileMoved := "tf_test_file_moved.vmdk" @@ -76,7 +129,7 @@ func TestAccVSphereFile_renamePostCreation(t *testing.T) { resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, - CheckDestroy: testAccCheckVSphereFolderDestroy, + CheckDestroy: testAccCheckVSphereFileDestroy, Steps: []resource.TestStep{ { Config: fmt.Sprintf( @@ -113,6 +166,84 @@ func TestAccVSphereFile_renamePostCreation(t *testing.T) { os.Remove(testVmdkFile) } +// file upload, then copy, finally the copy is renamed (moved) (update) +func TestAccVSphereFile_uploadAndCopyAndUpdate(t *testing.T) { + testVmdkFileData := []byte("# Disk DescriptorFile\n") + sourceFile := "/tmp/tf_test.vmdk" + uploadResourceName := "myfileupload" + copyResourceName := "myfilecopy" + sourceDatacenter := os.Getenv("VSPHERE_DATACENTER") + datacenter := sourceDatacenter + sourceDatastore := os.Getenv("VSPHERE_DATASTORE") + datastore := sourceDatastore + destinationFile := "tf_file_test.vmdk" + sourceFileCopy := "${vsphere_file." + uploadResourceName + ".destination_file}" + destinationFileCopy := "tf_file_test_copy.vmdk" + destinationFileMoved := "tf_test_file_moved.vmdk" + + err := ioutil.WriteFile(sourceFile, testVmdkFileData, 0644) + if err != nil { + t.Errorf("error %s", err) + return + } + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckVSphereFileDestroy, + Steps: []resource.TestStep{ + { + Config: fmt.Sprintf( + testAccCheckVSphereFileCopyConfig, + uploadResourceName, + datacenter, + datastore, + sourceFile, + destinationFile, + copyResourceName, + datacenter, + datacenter, + datastore, + datastore, + sourceFileCopy, + destinationFileCopy, + ), + Check: resource.ComposeTestCheckFunc( + testAccCheckVSphereFileExists("vsphere_file."+uploadResourceName, destinationFile, true), + testAccCheckVSphereFileExists("vsphere_file."+copyResourceName, destinationFileCopy, true), + resource.TestCheckResourceAttr("vsphere_file."+uploadResourceName, "destination_file", destinationFile), + resource.TestCheckResourceAttr("vsphere_file."+copyResourceName, "destination_file", destinationFileCopy), + ), + }, + { + Config: fmt.Sprintf( + testAccCheckVSphereFileCopyConfig, + uploadResourceName, + datacenter, + datastore, + sourceFile, + destinationFile, + copyResourceName, + datacenter, + datacenter, + datastore, + datastore, + sourceFileCopy, + destinationFileMoved, + ), + Check: resource.ComposeTestCheckFunc( + testAccCheckVSphereFileExists("vsphere_file."+uploadResourceName, destinationFile, true), + testAccCheckVSphereFileExists("vsphere_file."+copyResourceName, destinationFileCopy, false), + testAccCheckVSphereFileExists("vsphere_file."+copyResourceName, destinationFileMoved, true), + resource.TestCheckResourceAttr("vsphere_file."+uploadResourceName, "destination_file", destinationFile), + resource.TestCheckResourceAttr("vsphere_file."+copyResourceName, "destination_file", destinationFileMoved), + ), + }, + }, + }) + os.Remove(sourceFile) +} + func testAccCheckVSphereFileDestroy(s *terraform.State) error { client := testAccProvider.Meta().(*govmomi.Client) finder := find.NewFinder(client.Client, true) @@ -201,3 +332,19 @@ resource "vsphere_file" "%s" { destination_file = "%s" } ` +const testAccCheckVSphereFileCopyConfig = ` +resource "vsphere_file" "%s" { + datacenter = "%s" + datastore = "%s" + source_file = "%s" + destination_file = "%s" +} +resource "vsphere_file" "%s" { + source_datacenter = "%s" + datacenter = "%s" + source_datastore = "%s" + datastore = "%s" + source_file = "%s" + destination_file = "%s" +} +` diff --git a/website/source/docs/providers/vsphere/r/file.html.markdown b/website/source/docs/providers/vsphere/r/file.html.markdown index 443aa3046..9bd4c4b17 100644 --- a/website/source/docs/providers/vsphere/r/file.html.markdown +++ b/website/source/docs/providers/vsphere/r/file.html.markdown @@ -3,28 +3,49 @@ layout: "vsphere" page_title: "VMware vSphere: vsphere_file" sidebar_current: "docs-vsphere-resource-file" description: |- - Provides a VMware vSphere virtual machine file resource. This can be used to upload files (e.g. vmdk disks) from the Terraform host machine to a remote vSphere. + Provides a VMware vSphere virtual machine file resource. This can be used to upload files (e.g. vmdk disks) from the Terraform host machine to a remote vSphere or copy fields withing vSphere. --- # vsphere\_file -Provides a VMware vSphere virtual machine file resource. This can be used to upload files (e.g. vmdk disks) from the Terraform host machine to a remote vSphere. +Provides a VMware vSphere virtual machine file resource. This can be used to upload files (e.g. vmdk disks) from the Terraform host machine to a remote vSphere. The file resource can also be used to copy files within vSphere. Files can be copied between Datacenters and/or Datastores. -## Example Usage +Updates to file resources will handle moving a file to a new destination (datacenter and/or datastore and/or destination_file). If any source parameter (e.g. `source_datastore`, `source_datacenter` or `source_file`) are changed, this results in a new resource (new file uploaded or copied and old one being deleted). +## Example Usages + +**Upload file to vSphere:** ``` -resource "vsphere_file" "ubuntu_disk" { +resource "vsphere_file" "ubuntu_disk_upload" { + datacenter = "my_datacenter" datastore = "local" source_file = "/home/ubuntu/my_disks/custom_ubuntu.vmdk" destination_file = "/my_path/disks/custom_ubuntu.vmdk" } ``` +**Copy file within vSphere:** +``` +resource "vsphere_file" "ubuntu_disk_copy" { + source_datacenter = "my_datacenter" + datacenter = "my_datacenter" + source_datastore = "local" + datastore = "local" + source_file = "/my_path/disks/custom_ubuntu.vmdk" + destination_file = "/my_path/custom_ubuntu_id.vmdk" +} +``` + ## Argument Reference +If `source_datacenter` and `source_datastore` are not provided, the file resource will upload the file from Terraform host. If either `source_datacenter` or `source_datastore` are provided, the file resource will copy from within specified locations in vSphere. + The following arguments are supported: -* `source_file` - (Required) The path to the file on the Terraform host that will be uploaded to vSphere. -* `destination_file` - (Required) The path to where the file should be uploaded to on vSphere. -* `datacenter` - (Optional) The name of a Datacenter in which the file will be created/uploaded to. -* `datastore` - (Required) The name of the Datastore in which to create/upload the file to. +* `source_file` - (Required) The path to the file being uploaded from the Terraform host to vSphere or copied within vSphere. +* `destination_file` - (Required) The path to where the file should be uploaded or copied to on vSphere. +* `source_datacenter` - (Optional) The name of a Datacenter in which the file will be copied from. +* `datacenter` - (Optional) The name of a Datacenter in which the file will be uploaded to. +* `source_datastore` - (Optional) The name of the Datastore in which file will be copied from. +* `datastore` - (Required) The name of the Datastore in which to upload the file to. +* `create_directories` - (Optional) Create directories in `destination_file` path parameter if any missing for copy operation. *Note: Directories are not deleted on destroy operation. \ No newline at end of file From f75c3a9459b08da760aba3462aad338e3edf503c Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Thu, 4 Aug 2016 16:56:18 -0400 Subject: [PATCH 0531/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3e68f910e..4ca08820e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ IMPROVEMENTS: BUG FIXES: * provider/aws: guard against missing image_digest in `aws_ecs_task_definition` [GH-7966] + * provider/google: Use resource specific project when making queries/changes [GH-7029] ## 0.7.0 (August 2, 2016) From 9d0faa2cae80fee57674a575d53baef2910e1089 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Thu, 4 Aug 2016 17:17:41 -0400 Subject: [PATCH 0532/1238] Strip off extra \n in hcl encoded variables They don't change the value, but they do currently end up in the UI --- command/hcl_printer.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/command/hcl_printer.go b/command/hcl_printer.go index dbf1797f5..7947bb815 100644 --- a/command/hcl_printer.go +++ b/command/hcl_printer.go @@ -35,7 +35,13 @@ func encodeHCL(i interface{}) ([]byte, error) { // now strip that first assignment off eq := regexp.MustCompile(`=\s+`).FindIndex(hcl) - return hcl[eq[1]:], nil + // strip of an extra \n if it's there + end := len(hcl) + if hcl[end-1] == '\n' { + end -= 1 + } + + return hcl[eq[1]:end], nil } type encodeState struct { From d5fbb5f5c02e5048ce26c9e6db46fa46778d7f7b Mon Sep 17 00:00:00 2001 From: James Bardin Date: Thu, 4 Aug 2016 17:19:02 -0400 Subject: [PATCH 0533/1238] Modify the tfvars test to also use a cli var Modify the test to demonstrate where cli vars were being lost because they weren't interpreted as strings. --- command/push_test.go | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/command/push_test.go b/command/push_test.go index 60270169e..8c6cfa923 100644 --- a/command/push_test.go +++ b/command/push_test.go @@ -389,6 +389,8 @@ func TestPush_tfvars(t *testing.T) { args := []string{ "-var-file", path + "/terraform.tfvars", "-vcs=false", + "-var", + "bar=1", path, } if code := c.Run(args); code != 0 { @@ -412,12 +414,19 @@ func TestPush_tfvars(t *testing.T) { //now check TFVars tfvars := pushTFVars() + // update bar to match cli value + for i, v := range tfvars { + if v.Key == "bar" { + tfvars[i].Value = "1" + tfvars[i].IsHCL = true + } + } for i, expected := range tfvars { got := client.UpsertOptions.TFVars[i] if got != expected { t.Logf("%2d expected: %#v", i, expected) - t.Logf(" got: %#v", got) + t.Fatalf(" got: %#v", got) } } } @@ -589,9 +598,8 @@ func pushTFVars() []atlas.TFVar { {"baz", `{ A = "a" interp = "${file("t.txt")}" -} -`, true}, - {"fob", `["a", "quotes \"in\" quotes"]` + "\n", true}, +}`, true}, + {"fob", `["a", "quotes \"in\" quotes"]`, true}, {"foo", "bar", false}, } } From 67bd4f29e0aac83a6e310ffaea551f511f844df1 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Thu, 4 Aug 2016 16:48:31 -0400 Subject: [PATCH 0534/1238] Override atlas variables even if they aren't local Some Atlas usage patterns expect to be able to override a variable set in Atlas, even if it's not seen in the local context. This allows overwriting a variable that is returned from atlas, and sends it back. Also use a unique sential value in the context where we have variables from atlas. This way atals variables aren't combined with the local variables, and we don't do something like inadvertantly change the type, double encode/escape, etc. --- command/push.go | 71 +++++++++++++++++++++++++--------- command/push_test.go | 91 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 144 insertions(+), 18 deletions(-) diff --git a/command/push.go b/command/push.go index d7845ddb4..959070858 100644 --- a/command/push.go +++ b/command/push.go @@ -47,6 +47,22 @@ func (c *PushCommand) Run(args []string) int { overwriteMap[v] = struct{}{} } + // This is a map of variables specifically from the CLI that we want to overwrite. + // We need this because there is a chance that the user is trying to modify + // a variable we don't see in our context, but which exists in this atlas + // environment. + cliVars := make(map[string]string) + for k, v := range c.variables { + if _, ok := overwriteMap[k]; ok { + if val, ok := v.(string); ok { + cliVars[k] = val + } else { + c.Ui.Error(fmt.Sprintf("Error reading value for variable: %s", k)) + return 1 + } + } + } + // The pwd is used for the configuration path if one is not given pwd, err := os.Getwd() if err != nil { @@ -145,19 +161,14 @@ func (c *PushCommand) Run(args []string) int { return 1 } - // filter any overwrites from the atlas vars - for k := range overwriteMap { - delete(atlasVars, k) - } - // Set remote variables in the context if we don't have a value here. These // don't have to be correct, it just prevents the Input walk from prompting - // the user for input, The atlas variable may be an hcl-encoded object, but - // we're just going to set it as the raw string value. + // the user for input. ctxVars := ctx.Variables() - for k, av := range atlasVars { + atlasVarSentry := "ATLAS_78AC153CA649EAA44815DAD6CBD4816D" + for k, _ := range atlasVars { if _, ok := ctxVars[k]; !ok { - ctx.SetVariable(k, av.Value) + ctx.SetVariable(k, atlasVarSentry) } } @@ -203,23 +214,47 @@ func (c *PushCommand) Run(args []string) int { return 1 } - // Output to the user the variables that will be uploaded + // List of the vars we're uploading to display to the user. + // We always upload all vars from atlas, but only report them if they are overwritten. var setVars []string + // variables to upload var uploadVars []atlas.TFVar - // Now we can combine the vars for upload to atlas and list the variables - // we're uploading for the user + // first add all the variables we want to send which have been serialized + // from the local context. for _, sv := range serializedVars { - if av, ok := atlasVars[sv.Key]; ok { - // this belongs to Atlas - uploadVars = append(uploadVars, av) - } else { - // we're uploading our local version - setVars = append(setVars, sv.Key) + _, inOverwrite := overwriteMap[sv.Key] + _, inAtlas := atlasVars[sv.Key] + + // We have a variable that's not in atlas, so always send it. + if !inAtlas { uploadVars = append(uploadVars, sv) + setVars = append(setVars, sv.Key) } + // We're overwriting an atlas variable. + // We also want to check that we + // don't send the dummy sentry value back to atlas. This could happen + // if it's specified as an overwrite on the cli, but we didn't set a + // new value. + if inAtlas && inOverwrite && sv.Value != atlasVarSentry { + uploadVars = append(uploadVars, sv) + setVars = append(setVars, sv.Key) + + // remove this value from the atlas vars, because we're going to + // send back the remainder regardless. + delete(atlasVars, sv.Key) + } + } + + // now send back all the existing atlas vars, inserting any overwrites from the cli. + for k, av := range atlasVars { + if v, ok := cliVars[k]; ok { + av.Value = v + setVars = append(setVars, k) + } + uploadVars = append(uploadVars, av) } sort.Strings(setVars) diff --git a/command/push_test.go b/command/push_test.go index 60270169e..9bb702381 100644 --- a/command/push_test.go +++ b/command/push_test.go @@ -264,6 +264,97 @@ func TestPush_localOverride(t *testing.T) { } } +// This tests that the push command will override Atlas variables +// even if we don't have it defined locally +func TestPush_remoteOverride(t *testing.T) { + // Disable test mode so input would be asked and setup the + // input reader/writers. + test = false + defer func() { test = true }() + defaultInputReader = bytes.NewBufferString("nope\n") + defaultInputWriter = new(bytes.Buffer) + + tmp, cwd := testCwd(t) + defer testFixCwd(t, tmp, cwd) + + // Create remote state file, this should be pulled + conf, srv := testRemoteState(t, testState(), 200) + defer srv.Close() + + // Persist local remote state + s := terraform.NewState() + s.Serial = 5 + s.Remote = conf + testStateFileRemote(t, s) + + // Path where the archive will be "uploaded" to + archivePath := testTempFile(t) + defer os.Remove(archivePath) + + client := &mockPushClient{File: archivePath} + // Provided vars should override existing ones + client.GetResult = map[string]atlas.TFVar{ + "remote": atlas.TFVar{ + Key: "remote", + Value: "old", + }, + } + ui := new(cli.MockUi) + c := &PushCommand{ + Meta: Meta{ + ContextOpts: testCtxConfig(testProvider()), + Ui: ui, + }, + + client: client, + } + + path := testFixturePath("push-tfvars") + args := []string{ + "-var-file", path + "/terraform.tfvars", + "-vcs=false", + "-overwrite=remote", + "-var", + "remote=new", + path, + } + + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + actual := testArchiveStr(t, archivePath) + expected := []string{ + ".terraform/", + ".terraform/terraform.tfstate", + "main.tf", + "terraform.tfvars", + } + if !reflect.DeepEqual(actual, expected) { + t.Fatalf("bad: %#v", actual) + } + + if client.UpsertOptions.Name != "foo" { + t.Fatalf("bad: %#v", client.UpsertOptions) + } + + found := false + // find the "remote" var and make sure we're going to set it + for _, tfVar := range client.UpsertOptions.TFVars { + if tfVar.Key == "remote" { + found = true + if tfVar.Value != "new" { + t.Log("'remote' variable should be set to 'new'") + t.Fatalf("sending instead: %#v", tfVar) + } + } + } + + if !found { + t.Fatal("'remote' variable not being sent to atlas") + } +} + // This tests that the push command prefers Atlas variables over // local ones. func TestPush_preferAtlas(t *testing.T) { From 85b551a9347d79d0764993455111d1f8e733453e Mon Sep 17 00:00:00 2001 From: Ryan Moran Date: Mon, 1 Aug 2016 17:44:20 -0700 Subject: [PATCH 0535/1238] Adds support for uploading blobs to azure storage from local source - adds "source", "parallelism", and "attempts" fields - supports both block and page type blobs - uploads run concurrently - page blobs skip empty byte ranges to efficiently upload large sparse files - "source" expects an absolute path to a file on the local file system - "parallelism" expects an integer value that indicates the number of workers per CPU core to run for concurrent uploads - "attempts" expects an integer value for number of attempts to make per page or block when uploading Signed-off-by: Raina Masand --- .../azurerm/resource_arm_storage_blob.go | 378 +++++++++++++++++- .../azurerm/resource_arm_storage_blob_test.go | 288 +++++++++++++ 2 files changed, 660 insertions(+), 6 deletions(-) diff --git a/builtin/providers/azurerm/resource_arm_storage_blob.go b/builtin/providers/azurerm/resource_arm_storage_blob.go index 80a3aed92..67a3900b0 100644 --- a/builtin/providers/azurerm/resource_arm_storage_blob.go +++ b/builtin/providers/azurerm/resource_arm_storage_blob.go @@ -1,10 +1,18 @@ package azurerm import ( + "bytes" + "crypto/rand" + "encoding/base64" "fmt" + "io" "log" + "os" + "runtime" "strings" + "sync" + "github.com/Azure/azure-sdk-for-go/storage" "github.com/hashicorp/terraform/helper/schema" ) @@ -49,14 +57,53 @@ func resourceArmStorageBlob() *schema.Resource { Default: 0, ValidateFunc: validateArmStorageBlobSize, }, + "source": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, "url": { Type: schema.TypeString, Computed: true, }, + "parallelism": { + Type: schema.TypeInt, + Optional: true, + Default: 8, + ForceNew: true, + ValidateFunc: validateArmStorageBlobParallelism, + }, + "attempts": { + Type: schema.TypeInt, + Optional: true, + Default: 1, + ForceNew: true, + ValidateFunc: validateArmStorageBlobAttempts, + }, }, } } +func validateArmStorageBlobParallelism(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + + if value <= 0 { + errors = append(errors, fmt.Errorf("Blob Parallelism %q is invalid, must be greater than 0", value)) + } + + return +} + +func validateArmStorageBlobAttempts(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + + if value <= 0 { + errors = append(errors, fmt.Errorf("Blob Attempts %q is invalid, must be greater than 0", value)) + } + + return +} + func validateArmStorageBlobSize(v interface{}, k string) (ws []string, errors []error) { value := v.(int) @@ -101,19 +148,338 @@ func resourceArmStorageBlobCreate(d *schema.ResourceData, meta interface{}) erro log.Printf("[INFO] Creating blob %q in storage account %q", name, storageAccountName) switch strings.ToLower(blobType) { case "block": - err = blobClient.CreateBlockBlob(cont, name) + if err := blobClient.CreateBlockBlob(cont, name); err != nil { + return fmt.Errorf("Error creating storage blob on Azure: %s", err) + } + + source := d.Get("source").(string) + if source != "" { + parallelism := d.Get("parallelism").(int) + attempts := d.Get("attempts").(int) + if err := resourceArmStorageBlobBlockUploadFromSource(cont, name, source, blobClient, parallelism, attempts); err != nil { + return fmt.Errorf("Error creating storage blob on Azure: %s", err) + } + } case "page": - size := int64(d.Get("size").(int)) - err = blobClient.PutPageBlob(cont, name, size, map[string]string{}) - } - if err != nil { - return fmt.Errorf("Error creating storage blob on Azure: %s", err) + source := d.Get("source").(string) + if source != "" { + parallelism := d.Get("parallelism").(int) + attempts := d.Get("attempts").(int) + if err := resourceArmStorageBlobPageUploadFromSource(cont, name, source, blobClient, parallelism, attempts); err != nil { + return fmt.Errorf("Error creating storage blob on Azure: %s", err) + } + } else { + size := int64(d.Get("size").(int)) + if err := blobClient.PutPageBlob(cont, name, size, map[string]string{}); err != nil { + return fmt.Errorf("Error creating storage blob on Azure: %s", err) + } + } } d.SetId(name) return resourceArmStorageBlobRead(d, meta) } +type resourceArmStorageBlobPage struct { + offset int64 + section *io.SectionReader +} + +func resourceArmStorageBlobPageUploadFromSource(container, name, source string, client *storage.BlobStorageClient, parallelism, attempts int) error { + workerCount := parallelism * runtime.NumCPU() + + file, err := os.Open(source) + if err != nil { + return fmt.Errorf("Error opening source file for upload %q: %s", source, err) + } + defer file.Close() + + blobSize, pageList, err := resourceArmStorageBlobPageSplit(file) + if err != nil { + return fmt.Errorf("Error splitting source file %q into pages: %s", source, err) + } + + if err := client.PutPageBlob(container, name, blobSize, map[string]string{}); err != nil { + return fmt.Errorf("Error creating storage blob on Azure: %s", err) + } + + pages := make(chan resourceArmStorageBlobPage, len(pageList)) + errors := make(chan error, len(pageList)) + wg := &sync.WaitGroup{} + wg.Add(len(pageList)) + + total := int64(0) + for _, page := range pageList { + total += page.section.Size() + pages <- page + } + close(pages) + + for i := 0; i < workerCount; i++ { + go resourceArmStorageBlobPageUploadWorker(resourceArmStorageBlobPageUploadContext{ + container: container, + name: name, + source: source, + blobSize: blobSize, + client: client, + pages: pages, + errors: errors, + wg: wg, + attempts: attempts, + }) + } + + wg.Wait() + + if len(errors) > 0 { + return fmt.Errorf("Error while uploading source file %q: %s", source, <-errors) + } + + return nil +} + +func resourceArmStorageBlobPageSplit(file *os.File) (int64, []resourceArmStorageBlobPage, error) { + const ( + minPageSize int64 = 4 * 1024 + maxPageSize int64 = 4 * 1024 * 1024 + ) + + info, err := file.Stat() + if err != nil { + return int64(0), nil, fmt.Errorf("Could not stat file %q: %s", file.Name(), err) + } + + blobSize := info.Size() + if info.Size()%minPageSize != 0 { + blobSize = info.Size() + (minPageSize - (info.Size() % minPageSize)) + } + + emptyPage := make([]byte, minPageSize) + + type byteRange struct { + offset int64 + length int64 + } + + var nonEmptyRanges []byteRange + var currentRange byteRange + for i := int64(0); i < blobSize; i += minPageSize { + pageBuf := make([]byte, minPageSize) + _, err = file.ReadAt(pageBuf, i) + if err != nil && err != io.EOF { + return int64(0), nil, fmt.Errorf("Could not read chunk at %d: %s", i, err) + } + + if bytes.Equal(pageBuf, emptyPage) { + if currentRange.length != 0 { + nonEmptyRanges = append(nonEmptyRanges, currentRange) + } + currentRange = byteRange{ + offset: i + minPageSize, + } + } else { + currentRange.length += minPageSize + if currentRange.length == maxPageSize || (currentRange.offset+currentRange.length == blobSize) { + nonEmptyRanges = append(nonEmptyRanges, currentRange) + currentRange = byteRange{ + offset: i + minPageSize, + } + } + } + } + + var pages []resourceArmStorageBlobPage + for _, nonEmptyRange := range nonEmptyRanges { + pages = append(pages, resourceArmStorageBlobPage{ + offset: nonEmptyRange.offset, + section: io.NewSectionReader(file, nonEmptyRange.offset, nonEmptyRange.length), + }) + } + + return info.Size(), pages, nil +} + +type resourceArmStorageBlobPageUploadContext struct { + container string + name string + source string + blobSize int64 + client *storage.BlobStorageClient + pages chan resourceArmStorageBlobPage + errors chan error + wg *sync.WaitGroup + attempts int +} + +func resourceArmStorageBlobPageUploadWorker(ctx resourceArmStorageBlobPageUploadContext) { + for page := range ctx.pages { + start := page.offset + end := page.offset + page.section.Size() - 1 + if end > ctx.blobSize-1 { + end = ctx.blobSize - 1 + } + size := end - start + 1 + + chunk := make([]byte, size) + _, err := page.section.Read(chunk) + if err != nil && err != io.EOF { + ctx.errors <- fmt.Errorf("Error reading source file %q at offset %d: %s", ctx.source, page.offset, err) + ctx.wg.Done() + continue + } + + for x := 0; x < ctx.attempts; x++ { + err = ctx.client.PutPage(ctx.container, ctx.name, start, end, storage.PageWriteTypeUpdate, chunk, map[string]string{}) + if err == nil { + break + } + } + if err != nil { + ctx.errors <- fmt.Errorf("Error writing page at offset %d for file %q: %s", page.offset, ctx.source, err) + ctx.wg.Done() + continue + } + + ctx.wg.Done() + } +} + +type resourceArmStorageBlobBlock struct { + section *io.SectionReader + id string +} + +func resourceArmStorageBlobBlockUploadFromSource(container, name, source string, client *storage.BlobStorageClient, parallelism, attempts int) error { + workerCount := parallelism * runtime.NumCPU() + + file, err := os.Open(source) + if err != nil { + return fmt.Errorf("Error opening source file for upload %q: %s", source, err) + } + defer file.Close() + + blockList, parts, err := resourceArmStorageBlobBlockSplit(file) + if err != nil { + return fmt.Errorf("Error reading and splitting source file for upload %q: %s", source, err) + } + + wg := &sync.WaitGroup{} + blocks := make(chan resourceArmStorageBlobBlock, len(parts)) + errors := make(chan error, len(parts)) + + wg.Add(len(parts)) + for _, p := range parts { + blocks <- p + } + close(blocks) + + for i := 0; i < workerCount; i++ { + go resourceArmStorageBlobBlockUploadWorker(resourceArmStorageBlobBlockUploadContext{ + client: client, + source: source, + container: container, + name: name, + blocks: blocks, + errors: errors, + wg: wg, + attempts: attempts, + }) + } + + wg.Wait() + + if len(errors) > 0 { + return fmt.Errorf("Error while uploading source file %q: %s", source, <-errors) + } + + err = client.PutBlockList(container, name, blockList) + if err != nil { + return fmt.Errorf("Error updating block list for source file %q: %s", source, err) + } + + return nil +} + +func resourceArmStorageBlobBlockSplit(file *os.File) ([]storage.Block, []resourceArmStorageBlobBlock, error) { + const ( + idSize = 64 + blockSize int64 = 4 * 1024 * 1024 + ) + var parts []resourceArmStorageBlobBlock + var blockList []storage.Block + + info, err := file.Stat() + if err != nil { + return nil, nil, fmt.Errorf("Error stating source file %q: %s", file.Name(), err) + } + + for i := int64(0); i < info.Size(); i = i + blockSize { + entropy := make([]byte, idSize) + _, err = rand.Read(entropy) + if err != nil { + return nil, nil, fmt.Errorf("Error generating a random block ID for source file %q: %s", file.Name(), err) + } + + sectionSize := blockSize + remainder := info.Size() - i + if remainder < blockSize { + sectionSize = remainder + } + + block := storage.Block{ + ID: base64.StdEncoding.EncodeToString(entropy), + Status: storage.BlockStatusUncommitted, + } + + blockList = append(blockList, block) + + parts = append(parts, resourceArmStorageBlobBlock{ + id: block.ID, + section: io.NewSectionReader(file, i, sectionSize), + }) + } + + return blockList, parts, nil +} + +type resourceArmStorageBlobBlockUploadContext struct { + client *storage.BlobStorageClient + container string + name string + source string + attempts int + blocks chan resourceArmStorageBlobBlock + errors chan error + wg *sync.WaitGroup +} + +func resourceArmStorageBlobBlockUploadWorker(ctx resourceArmStorageBlobBlockUploadContext) { + for block := range ctx.blocks { + buffer := make([]byte, block.section.Size()) + + _, err := block.section.Read(buffer) + if err != nil { + ctx.errors <- fmt.Errorf("Error reading source file %q: %s", ctx.source, err) + ctx.wg.Done() + continue + } + + for i := 0; i < ctx.attempts; i++ { + err = ctx.client.PutBlock(ctx.container, ctx.name, block.id, buffer) + if err == nil { + break + } + } + if err != nil { + ctx.errors <- fmt.Errorf("Error uploading block %q for source file %q: %s", block.id, ctx.source, err) + ctx.wg.Done() + continue + } + + ctx.wg.Done() + } +} + func resourceArmStorageBlobRead(d *schema.ResourceData, meta interface{}) error { armClient := meta.(*ArmClient) diff --git a/builtin/providers/azurerm/resource_arm_storage_blob_test.go b/builtin/providers/azurerm/resource_arm_storage_blob_test.go index d4fc5dc74..ecf768fe7 100644 --- a/builtin/providers/azurerm/resource_arm_storage_blob_test.go +++ b/builtin/providers/azurerm/resource_arm_storage_blob_test.go @@ -1,11 +1,15 @@ package azurerm import ( + "crypto/rand" "fmt" + "io" + "io/ioutil" "testing" "strings" + "github.com/Azure/azure-sdk-for-go/storage" "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" @@ -83,6 +87,62 @@ func TestResourceAzureRMStorageBlobSize_validation(t *testing.T) { } } +func TestResourceAzureRMStorageBlobParallelism_validation(t *testing.T) { + cases := []struct { + Value int + ErrCount int + }{ + { + Value: 1, + ErrCount: 0, + }, + { + Value: 0, + ErrCount: 1, + }, + { + Value: -1, + ErrCount: 1, + }, + } + + for _, tc := range cases { + _, errors := validateArmStorageBlobParallelism(tc.Value, "azurerm_storage_blob") + + if len(errors) != tc.ErrCount { + t.Fatalf("Expected the Azure RM Storage Blob parallelism to trigger a validation error") + } + } +} + +func TestResourceAzureRMStorageBlobAttempts_validation(t *testing.T) { + cases := []struct { + Value int + ErrCount int + }{ + { + Value: 1, + ErrCount: 0, + }, + { + Value: 0, + ErrCount: 1, + }, + { + Value: -1, + ErrCount: 1, + }, + } + + for _, tc := range cases { + _, errors := validateArmStorageBlobAttempts(tc.Value, "azurerm_storage_blob") + + if len(errors) != tc.ErrCount { + t.Fatalf("Expected the Azure RM Storage Blob attempts to trigger a validation error") + } + } +} + func TestAccAzureRMStorageBlob_basic(t *testing.T) { ri := acctest.RandInt() rs := strings.ToLower(acctest.RandString(11)) @@ -103,6 +163,100 @@ func TestAccAzureRMStorageBlob_basic(t *testing.T) { }) } +func TestAccAzureRMStorageBlobBlock_source(t *testing.T) { + ri := acctest.RandInt() + rs1 := strings.ToLower(acctest.RandString(11)) + sourceBlob, err := ioutil.TempFile("", "") + if err != nil { + t.Fatalf("Failed to create local source blob file") + } + + _, err = io.CopyN(sourceBlob, rand.Reader, 25*1024*1024) + if err != nil { + t.Fatalf("Failed to write random test to source blob") + } + + err = sourceBlob.Close() + if err != nil { + t.Fatalf("Failed to close source blob") + } + + config := fmt.Sprintf(testAccAzureRMStorageBlobBlock_source, ri, rs1, sourceBlob.Name()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMStorageBlobDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMStorageBlobMatchesFile("azurerm_storage_blob.source", storage.BlobTypeBlock, sourceBlob.Name()), + ), + }, + }, + }) +} + +func TestAccAzureRMStorageBlobPage_source(t *testing.T) { + ri := acctest.RandInt() + rs1 := strings.ToLower(acctest.RandString(11)) + sourceBlob, err := ioutil.TempFile("", "") + if err != nil { + t.Fatalf("Failed to create local source blob file") + } + + err = sourceBlob.Truncate(25*1024*1024 + 512) + if err != nil { + t.Fatalf("Failed to truncate file to 25M") + } + + for i := int64(0); i < 20; i = i + 2 { + randomBytes := make([]byte, 1*1024*1024) + _, err = rand.Read(randomBytes) + if err != nil { + t.Fatalf("Failed to read random bytes") + } + + _, err = sourceBlob.WriteAt(randomBytes, i*1024*1024) + if err != nil { + t.Fatalf("Failed to write random bytes to file") + } + } + + randomBytes := make([]byte, 5*1024*1024) + _, err = rand.Read(randomBytes) + if err != nil { + t.Fatalf("Failed to read random bytes") + } + + _, err = sourceBlob.WriteAt(randomBytes, 20*1024*1024) + if err != nil { + t.Fatalf("Failed to write random bytes to file") + } + + err = sourceBlob.Close() + if err != nil { + t.Fatalf("Failed to close source blob") + } + + config := fmt.Sprintf(testAccAzureRMStorageBlobPage_source, ri, rs1, sourceBlob.Name()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMStorageBlobDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMStorageBlobMatchesFile("azurerm_storage_blob.source", storage.BlobTypePage, sourceBlob.Name()), + ), + }, + }, + }) +} + func testCheckAzureRMStorageBlobExists(name string) resource.TestCheckFunc { return func(s *terraform.State) error { @@ -141,6 +295,64 @@ func testCheckAzureRMStorageBlobExists(name string) resource.TestCheckFunc { } } +func testCheckAzureRMStorageBlobMatchesFile(name string, kind storage.BlobType, filePath string) resource.TestCheckFunc { + return func(s *terraform.State) error { + + rs, ok := s.RootModule().Resources[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + name := rs.Primary.Attributes["name"] + storageAccountName := rs.Primary.Attributes["storage_account_name"] + storageContainerName := rs.Primary.Attributes["storage_container_name"] + resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] + if !hasResourceGroup { + return fmt.Errorf("Bad: no resource group found in state for storage blob: %s", name) + } + + armClient := testAccProvider.Meta().(*ArmClient) + blobClient, accountExists, err := armClient.getBlobStorageClientForStorageAccount(resourceGroup, storageAccountName) + if err != nil { + return err + } + if !accountExists { + return fmt.Errorf("Bad: Storage Account %q does not exist", storageAccountName) + } + + properties, err := blobClient.GetBlobProperties(storageContainerName, name) + if err != nil { + return err + } + + if properties.BlobType != kind { + return fmt.Errorf("Bad: blob type %q does not match expected type %q", properties.BlobType, kind) + } + + blob, err := blobClient.GetBlob(storageContainerName, name) + if err != nil { + return err + } + + contents, err := ioutil.ReadAll(blob) + if err != nil { + return err + } + defer blob.Close() + + expectedContents, err := ioutil.ReadFile(filePath) + if err != nil { + return err + } + + if string(contents) != string(expectedContents) { + return fmt.Errorf("Bad: Storage Blob %q (storage container: %q) does not match contents", name, storageContainerName) + } + + return nil + } +} + func testCheckAzureRMStorageBlobDestroy(s *terraform.State) error { for _, rs := range s.RootModule().Resources { if rs.Type != "azurerm_storage_blob" { @@ -212,3 +424,79 @@ resource "azurerm_storage_blob" "test" { size = 5120 } ` + +var testAccAzureRMStorageBlobBlock_source = ` +resource "azurerm_resource_group" "test" { + name = "acctestrg-%d" + location = "westus" +} + +resource "azurerm_storage_account" "source" { + name = "acctestacc%s" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "westus" + account_type = "Standard_LRS" + + tags { + environment = "staging" + } +} + +resource "azurerm_storage_container" "source" { + name = "source" + resource_group_name = "${azurerm_resource_group.test.name}" + storage_account_name = "${azurerm_storage_account.source.name}" + container_access_type = "blob" +} + +resource "azurerm_storage_blob" "source" { + name = "source.vhd" + + resource_group_name = "${azurerm_resource_group.test.name}" + storage_account_name = "${azurerm_storage_account.source.name}" + storage_container_name = "${azurerm_storage_container.source.name}" + + type = "block" + source = "%s" + parallelism = 4 + attempts = 2 +} +` + +var testAccAzureRMStorageBlobPage_source = ` +resource "azurerm_resource_group" "test" { + name = "acctestrg-%d" + location = "westus" +} + +resource "azurerm_storage_account" "source" { + name = "acctestacc%s" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "westus" + account_type = "Standard_LRS" + + tags { + environment = "staging" + } +} + +resource "azurerm_storage_container" "source" { + name = "source" + resource_group_name = "${azurerm_resource_group.test.name}" + storage_account_name = "${azurerm_storage_account.source.name}" + container_access_type = "blob" +} + +resource "azurerm_storage_blob" "source" { + name = "source.vhd" + + resource_group_name = "${azurerm_resource_group.test.name}" + storage_account_name = "${azurerm_storage_account.source.name}" + storage_container_name = "${azurerm_storage_container.source.name}" + + type = "page" + source = "%s" + parallelism = 3 + attempts = 3 +} +` From 19800b8e2652331aaa6ac06183fd59c44298ba51 Mon Sep 17 00:00:00 2001 From: Krzysztof Wilczynski Date: Fri, 5 Aug 2016 09:14:05 +0900 Subject: [PATCH 0536/1238] Add state filter to aws_availability_zones data source. (#7965) * Add state filter to aws_availability_zones data source. This commit adds an ability to filter Availability Zones based on state, where by default it would only list available zones. Be advised that this does not always works reliably for an older accounts which have been created in the pre-VPC era of EC2. These accounts tends to retrieve availability zones that are not VPC-enabled, thus creation of a custom subnet within such Availability Zone would result in a failure. Signed-off-by: Krzysztof Wilczynski * Update documentation for aws_availability_zones data source. Signed-off-by: Krzysztof Wilczynski * Do not filter on state by default. This commit makes the state filter applicable only when set. Signed-off-by: Krzysztof Wilczynski --- .../aws/data_source_availability_zones.go | 51 ++++++++++++++++--- .../data_source_availability_zones_test.go | 42 +++++++++++++-- .../aws/d/availability_zones.html.markdown | 11 ++-- 3 files changed, 89 insertions(+), 15 deletions(-) diff --git a/builtin/providers/aws/data_source_availability_zones.go b/builtin/providers/aws/data_source_availability_zones.go index fd3a6f18a..a9a9d501f 100644 --- a/builtin/providers/aws/data_source_availability_zones.go +++ b/builtin/providers/aws/data_source_availability_zones.go @@ -21,6 +21,11 @@ func dataSourceAwsAvailabilityZones() *schema.Resource { Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, + "state": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateStateType, + }, }, } } @@ -28,25 +33,55 @@ func dataSourceAwsAvailabilityZones() *schema.Resource { func dataSourceAwsAvailabilityZonesRead(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn - log.Printf("[DEBUG] Reading availability zones") + log.Printf("[DEBUG] Reading Availability Zones.") d.SetId(time.Now().UTC().String()) - req := &ec2.DescribeAvailabilityZonesInput{DryRun: aws.Bool(false)} - azresp, err := conn.DescribeAvailabilityZones(req) - if err != nil { - return fmt.Errorf("Error listing availability zones: %s", err) + request := &ec2.DescribeAvailabilityZonesInput{} + + if v, ok := d.GetOk("state"); ok { + request.Filters = []*ec2.Filter{ + &ec2.Filter{ + Name: aws.String("state"), + Values: []*string{aws.String(v.(string))}, + }, + } } - raw := make([]string, len(azresp.AvailabilityZones)) - for i, v := range azresp.AvailabilityZones { + log.Printf("[DEBUG] Availability Zones request options: %#v", *request) + + resp, err := conn.DescribeAvailabilityZones(request) + if err != nil { + return fmt.Errorf("Error fetching Availability Zones: %s", err) + } + + raw := make([]string, len(resp.AvailabilityZones)) + for i, v := range resp.AvailabilityZones { raw[i] = *v.ZoneName } sort.Strings(raw) if err := d.Set("names", raw); err != nil { - return fmt.Errorf("[WARN] Error setting availability zones") + return fmt.Errorf("[WARN] Error setting Availability Zones: %s", err) } return nil } + +func validateStateType(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + validState := map[string]bool{ + "available": true, + "information": true, + "impaired": true, + "unavailable": true, + } + + if !validState[value] { + errors = append(errors, fmt.Errorf( + "%q contains an invalid Availability Zone state %q. Valid states are: %q, %q, %q and %q.", + k, value, "available", "information", "impaired", "unavailable")) + } + return +} diff --git a/builtin/providers/aws/data_source_availability_zones_test.go b/builtin/providers/aws/data_source_availability_zones_test.go index 86060fb8f..7dbae2398 100644 --- a/builtin/providers/aws/data_source_availability_zones_test.go +++ b/builtin/providers/aws/data_source_availability_zones_test.go @@ -22,6 +22,12 @@ func TestAccAWSAvailabilityZones_basic(t *testing.T) { testAccCheckAwsAvailabilityZonesMeta("data.aws_availability_zones.availability_zones"), ), }, + resource.TestStep{ + Config: testAccCheckAwsAvailabilityZonesStateConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckAwsAvailabilityZoneState("data.aws_availability_zones.state_filter"), + ), + }, }, }) } @@ -34,7 +40,7 @@ func testAccCheckAwsAvailabilityZonesMeta(n string) resource.TestCheckFunc { } if rs.Primary.ID == "" { - return fmt.Errorf("AZ resource ID not set") + return fmt.Errorf("AZ resource ID not set.") } actual, err := testAccCheckAwsAvailabilityZonesBuildAvailable(rs.Primary.Attributes) @@ -51,10 +57,33 @@ func testAccCheckAwsAvailabilityZonesMeta(n string) resource.TestCheckFunc { } } +func testAccCheckAwsAvailabilityZoneState(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Can't find AZ resource: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("AZ resource ID not set.") + } + + if _, ok := rs.Primary.Attributes["state"]; !ok { + return fmt.Errorf("AZs state filter is missing, should be set.") + } + + _, err := testAccCheckAwsAvailabilityZonesBuildAvailable(rs.Primary.Attributes) + if err != nil { + return err + } + return nil + } +} + func testAccCheckAwsAvailabilityZonesBuildAvailable(attrs map[string]string) ([]string, error) { v, ok := attrs["names.#"] if !ok { - return nil, fmt.Errorf("Available AZ list is missing") + return nil, fmt.Errorf("Available AZ list is missing.") } qty, err := strconv.Atoi(v) if err != nil { @@ -67,7 +96,7 @@ func testAccCheckAwsAvailabilityZonesBuildAvailable(attrs map[string]string) ([] for n := range zones { zone, ok := attrs["names."+strconv.Itoa(n)] if !ok { - return nil, fmt.Errorf("AZ list corrupt, this is definitely a bug") + return nil, fmt.Errorf("AZ list corrupt, this is definitely a bug.") } zones[n] = zone } @@ -75,6 +104,11 @@ func testAccCheckAwsAvailabilityZonesBuildAvailable(attrs map[string]string) ([] } const testAccCheckAwsAvailabilityZonesConfig = ` -data "aws_availability_zones" "availability_zones" { +data "aws_availability_zones" "availability_zones" { } +` + +const testAccCheckAwsAvailabilityZonesStateConfig = ` +data "aws_availability_zones" "state_filter" { + state = "available" } ` diff --git a/website/source/docs/providers/aws/d/availability_zones.html.markdown b/website/source/docs/providers/aws/d/availability_zones.html.markdown index e482a142b..0eb87d781 100644 --- a/website/source/docs/providers/aws/d/availability_zones.html.markdown +++ b/website/source/docs/providers/aws/d/availability_zones.html.markdown @@ -3,7 +3,7 @@ layout: "aws" page_title: "AWS: aws_availability_zones" sidebar_current: "docs-aws-datasource-availability-zones" description: |- - Provides a list of availability zones which can be used by an AWS account + Provides a list of Availability Zones which can be used by an AWS account. --- # aws\_availability\_zones @@ -35,10 +35,15 @@ resource "aws_subnet" "secondary" { ## Argument Reference -There are no arguments for this data source. +The following arguments are supported: + +* `state` - (Optional) Allows to filter list of Availability Zones based on their +current state. Can be either `"available"`, `"information"`, `"impaired"` or +`"unavailable"`. By default the list includes a complete set of Availability Zones +to which the underlying AWS account has access, regardless of their state. ## Attributes Reference The following attributes are exported: -* `names` - A list of the availability zone names available to the account. +* `names` - A list of the Availability Zone names available to the account. From e8dbd0e1aea68155a728b995ab2cd3241e883f0d Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Fri, 5 Aug 2016 10:15:29 +1000 Subject: [PATCH 0537/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4ca08820e..2ccc2b25c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ IMPROVEMENTS: BUG FIXES: * provider/aws: guard against missing image_digest in `aws_ecs_task_definition` [GH-7966] + * provider/aws: Add state filter to `aws_availability_zone`s data source [GH-7965] * provider/google: Use resource specific project when making queries/changes [GH-7029] ## 0.7.0 (August 2, 2016) From 25a860e990b8d13ada7ca84fcb2e0140c060c56b Mon Sep 17 00:00:00 2001 From: stack72 Date: Fri, 5 Aug 2016 10:36:10 +1000 Subject: [PATCH 0538/1238] docs/aws: Adding to the docs side bar --- website/source/layouts/aws.erb | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb index 00c9f140f..8ba2ae80e 100644 --- a/website/source/layouts/aws.erb +++ b/website/source/layouts/aws.erb @@ -209,6 +209,10 @@ aws_ami_from_instance + > + ami_launch_permission + + > aws_app_cookie_stickiness_policy From e7f31ebfd8b73cbbcde57b973a71f41b45be8c08 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Fri, 5 Aug 2016 10:38:00 +1000 Subject: [PATCH 0539/1238] docs/aws: Add and as docs to the AWS resource (#7988) --- website/source/docs/providers/aws/r/ami_copy.html.markdown | 2 ++ 1 file changed, 2 insertions(+) diff --git a/website/source/docs/providers/aws/r/ami_copy.html.markdown b/website/source/docs/providers/aws/r/ami_copy.html.markdown index 14745f20c..a65aeab38 100644 --- a/website/source/docs/providers/aws/r/ami_copy.html.markdown +++ b/website/source/docs/providers/aws/r/ami_copy.html.markdown @@ -43,6 +43,8 @@ The following arguments are supported: given by `source_ami_region`. * `source_region` - (Required) The region from which the AMI will be copied. This may be the same as the AWS provider region in order to create a copy within the same region. +* `encrypted` - (Optional) Specifies whether the destination snapshots of the copied image should be encrypted. Defaults to `false` +* `kms_key_id` - (Optional) The full ARN of the KMS Key to use when encrypting the snapshots of an image during a copy operation. If not specified, then the default AWS KMS Key will be used This resource also exposes the full set of arguments from the [`aws_ami`](ami.html) resource. From 0e565e597303d65e43d4c4799cf9c057ec59efc9 Mon Sep 17 00:00:00 2001 From: Evan Brown Date: Thu, 4 Aug 2016 14:12:52 -0700 Subject: [PATCH 0540/1238] providers/google: Allow custom Compute Engine service account This commit allows an operator to specify the e-mail address of a service account to use with a Google Compute Engine instance. If no service account e-mail is provided, the default service account is used. Closes #7985 --- .../providers/google/resource_compute_instance.go | 11 +++++++++-- .../google/r/compute_instance.html.markdown | 14 +++++++++----- 2 files changed, 18 insertions(+), 7 deletions(-) diff --git a/builtin/providers/google/resource_compute_instance.go b/builtin/providers/google/resource_compute_instance.go index 11aa864dd..cb06822fa 100644 --- a/builtin/providers/google/resource_compute_instance.go +++ b/builtin/providers/google/resource_compute_instance.go @@ -250,14 +250,16 @@ func resourceComputeInstance() *schema.Resource { "service_account": &schema.Schema{ Type: schema.TypeList, + MaxItems: 1, Optional: true, ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "email": &schema.Schema{ Type: schema.TypeString, - Computed: true, ForceNew: true, + Optional: true, + Computed: true, }, "scopes": &schema.Schema{ @@ -524,8 +526,13 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err scopes[i] = canonicalizeServiceScope(v.(string)) } + email := "default" + if v := d.Get(prefix + ".email"); v != nil { + email = v.(string) + } + serviceAccount := &compute.ServiceAccount{ - Email: "default", + Email: email, Scopes: scopes, } diff --git a/website/source/docs/providers/google/r/compute_instance.html.markdown b/website/source/docs/providers/google/r/compute_instance.html.markdown index baa14c98c..77f89f360 100644 --- a/website/source/docs/providers/google/r/compute_instance.html.markdown +++ b/website/source/docs/providers/google/r/compute_instance.html.markdown @@ -101,6 +101,7 @@ The following arguments are supported: this configuration option are detailed below. * `service_account` - (Optional) Service account to attach to the instance. + Structure is documented below. * `tags` - (Optional) Tags to attach to the instance. @@ -151,6 +152,14 @@ The `access_config` block supports: * `nat_ip` - (Optional) The IP address that will be 1:1 mapped to the instance's network ip. If not given, one will be generated. +The `service_account` block supports: + +* `email` - (Optional) The service account e-mail address. If not given, the + default Google Compute Engine service account is used. + +* `scopes` - (Required) A list of service scopes. Both OAuth2 URLs and gcloud + short names are supported. + (DEPRECATED) The `network` block supports: * `source` - (Required) The name of the network to attach this interface to. @@ -158,11 +167,6 @@ The `access_config` block supports: * `address` - (Optional) The IP address of a reserved IP address to assign to this interface. -The `service_account` block supports: - -* `scopes` - (Required) A list of service scopes. Both OAuth2 URLs and gcloud - short names are supported. - The `scheduling` block supports: * `preemptible` - (Optional) Is the instance preemptible. From 99d8c2a3b38d320f8c245d95437033158df85675 Mon Sep 17 00:00:00 2001 From: Krzysztof Wilczynski Date: Fri, 5 Aug 2016 12:04:57 +0900 Subject: [PATCH 0541/1238] Fix. Handle lack of snapshot ID for a volume. (#7995) This commit resolves the issue where lack of snapshot ID in the device mapping section of the API response to DescribeImagesResponse would cause Terraform to crash due to a nil pointer dereference. Usually, the snapshot ID is included, but in some unique cases (e.g. ECS-enabled AMI from Amazon available on the Market Place) a volume that is attached might not have it. The API documentation does not clearly define whether the snapshot ID either should be or must be included for any volume in the response. Signed-off-by: Krzysztof Wilczynski --- builtin/providers/aws/resource_aws_ami.go | 5 ++++- builtin/providers/aws/resource_aws_ami_copy_test.go | 3 +++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_ami.go b/builtin/providers/aws/resource_aws_ami.go index 621881036..8b727e105 100644 --- a/builtin/providers/aws/resource_aws_ami.go +++ b/builtin/providers/aws/resource_aws_ami.go @@ -174,13 +174,16 @@ func resourceAwsAmiRead(d *schema.ResourceData, meta interface{}) error { "delete_on_termination": *blockDev.Ebs.DeleteOnTermination, "encrypted": *blockDev.Ebs.Encrypted, "iops": 0, - "snapshot_id": *blockDev.Ebs.SnapshotId, "volume_size": int(*blockDev.Ebs.VolumeSize), "volume_type": *blockDev.Ebs.VolumeType, } if blockDev.Ebs.Iops != nil { ebsBlockDev["iops"] = int(*blockDev.Ebs.Iops) } + // The snapshot ID might not be set. + if blockDev.Ebs.SnapshotId != nil { + ebsBlockDev["snapshot_id"] = *blockDev.Ebs.SnapshotId + } ebsBlockDevs = append(ebsBlockDevs, ebsBlockDev) } else { ephemeralBlockDevs = append(ephemeralBlockDevs, map[string]interface{}{ diff --git a/builtin/providers/aws/resource_aws_ami_copy_test.go b/builtin/providers/aws/resource_aws_ami_copy_test.go index 029e9a5ab..0a64526d6 100644 --- a/builtin/providers/aws/resource_aws_ami_copy_test.go +++ b/builtin/providers/aws/resource_aws_ami_copy_test.go @@ -61,6 +61,9 @@ func TestAccAWSAMICopy(t *testing.T) { } for _, bdm := range image.BlockDeviceMappings { + // The snapshot ID might not be set, + // even for a block device that is an + // EBS volume. if bdm.Ebs != nil && bdm.Ebs.SnapshotId != nil { snapshots = append(snapshots, *bdm.Ebs.SnapshotId) } From 3982495723449470fad8fd8cd0477dd405de8965 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Fri, 5 Aug 2016 13:06:24 +1000 Subject: [PATCH 0542/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2ccc2b25c..3743ba4c8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ IMPROVEMENTS: BUG FIXES: * provider/aws: guard against missing image_digest in `aws_ecs_task_definition` [GH-7966] * provider/aws: Add state filter to `aws_availability_zone`s data source [GH-7965] + * provider/aws: Handle lack of snapshot ID for a volume in `ami_copy` [GH-7995] * provider/google: Use resource specific project when making queries/changes [GH-7029] ## 0.7.0 (August 2, 2016) From ebf6e51b32ed016d6505655f301609684a50b135 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Fri, 5 Aug 2016 07:12:27 +0100 Subject: [PATCH 0543/1238] provider/aws: Retry association of IAM Role & instance profile (#7938) --- builtin/providers/aws/resource_aws_instance.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/builtin/providers/aws/resource_aws_instance.go b/builtin/providers/aws/resource_aws_instance.go index ebd611109..4204debb3 100644 --- a/builtin/providers/aws/resource_aws_instance.go +++ b/builtin/providers/aws/resource_aws_instance.go @@ -366,15 +366,20 @@ func resourceAwsInstanceCreate(d *schema.ResourceData, meta interface{}) error { log.Printf("[DEBUG] Run configuration: %s", runOpts) var runResp *ec2.Reservation - err = resource.Retry(10*time.Second, func() *resource.RetryError { + err = resource.Retry(15*time.Second, func() *resource.RetryError { var err error runResp, err = conn.RunInstances(runOpts) - // IAM profiles can take ~10 seconds to propagate in AWS: + // IAM instance profiles can take ~10 seconds to propagate in AWS: // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#launch-instance-with-role-console if isAWSErr(err, "InvalidParameterValue", "Invalid IAM Instance Profile") { log.Printf("[DEBUG] Invalid IAM Instance Profile referenced, retrying...") return resource.RetryableError(err) } + // IAM roles can also take time to propagate in AWS: + if isAWSErr(err, "InvalidParameterValue", " has no associated IAM Roles") { + log.Printf("[DEBUG] IAM Instance Profile appears to have no IAM roles, retrying...") + return resource.RetryableError(err) + } return resource.NonRetryableError(err) }) // Warn if the AWS Error involves group ids, to help identify situation From 6c057c8c11829398f6cbb9567326d4880591f392 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Fri, 5 Aug 2016 16:13:22 +1000 Subject: [PATCH 0544/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3743ba4c8..6465ec47b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ BUG FIXES: * provider/aws: guard against missing image_digest in `aws_ecs_task_definition` [GH-7966] * provider/aws: Add state filter to `aws_availability_zone`s data source [GH-7965] * provider/aws: Handle lack of snapshot ID for a volume in `ami_copy` [GH-7995] + * provider/aws: Retry association of IAM Role & instance profile [GH-7938] * provider/google: Use resource specific project when making queries/changes [GH-7029] ## 0.7.0 (August 2, 2016) From 7630a585a23063f3f64983a8e07472d7f9961ca0 Mon Sep 17 00:00:00 2001 From: Raphael Randschau Date: Fri, 5 Aug 2016 08:27:03 +0200 Subject: [PATCH 0545/1238] Improve influxdb provider (#7333) * Improve influxdb provider - reduce public funcs. We should not make things public that don't need to be public - improve tests by verifying remote state - add influxdb_user resource allows you to manage influxdb users: ``` resource "influxdb_user" "admin" { name = "administrator" password = "super-secret" admin = true } ``` and also database specific grants: ``` resource "influxdb_user" "ro" { name = "read-only" password = "read-only" grant { database = "a" privilege = "read" } } ``` * Grant/ revoke admin access properly * Add continuous_query resource see https://docs.influxdata.com/influxdb/v0.13/query_language/continuous_queries/ for the details about continuous queries: ``` resource "influxdb_database" "test" { name = "terraform-test" } resource "influxdb_continuous_query" "minnie" { name = "minnie" database = "${influxdb_database.test.name}" query = "SELECT min(mouse) INTO min_mouse FROM zoo GROUP BY time(30m)" } ``` --- .../providers/influxdb/continuous_query.go | 120 ++++++ .../influxdb/continuous_query_test.go | 87 +++++ builtin/providers/influxdb/provider.go | 10 +- .../providers/influxdb/resource_database.go | 14 +- .../influxdb/resource_database_test.go | 42 ++- builtin/providers/influxdb/resource_user.go | 271 ++++++++++++++ .../providers/influxdb/resource_user_test.go | 349 ++++++++++++++++++ .../providers/influxdb/index.html.markdown | 11 + .../influxdb/r/continuous_query.html.md | 38 ++ .../docs/providers/influxdb/r/user.html.md | 47 +++ website/source/layouts/influxdb.erb | 8 +- 11 files changed, 984 insertions(+), 13 deletions(-) create mode 100644 builtin/providers/influxdb/continuous_query.go create mode 100644 builtin/providers/influxdb/continuous_query_test.go create mode 100644 builtin/providers/influxdb/resource_user.go create mode 100644 builtin/providers/influxdb/resource_user_test.go create mode 100644 website/source/docs/providers/influxdb/r/continuous_query.html.md create mode 100644 website/source/docs/providers/influxdb/r/user.html.md diff --git a/builtin/providers/influxdb/continuous_query.go b/builtin/providers/influxdb/continuous_query.go new file mode 100644 index 000000000..2bc921aa6 --- /dev/null +++ b/builtin/providers/influxdb/continuous_query.go @@ -0,0 +1,120 @@ +package influxdb + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/influxdata/influxdb/client" +) + +func resourceContinuousQuery() *schema.Resource { + return &schema.Resource{ + Create: createContinuousQuery, + Read: readContinuousQuery, + Delete: deleteContinuousQuery, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "database": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "query": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func createContinuousQuery(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*client.Client) + + name := d.Get("name").(string) + database := d.Get("database").(string) + + queryStr := fmt.Sprintf("CREATE CONTINUOUS QUERY %s ON %s BEGIN %s END", name, quoteIdentifier(database), d.Get("query").(string)) + query := client.Query{ + Command: queryStr, + } + + resp, err := conn.Query(query) + if err != nil { + return err + } + if resp.Err != nil { + return resp.Err + } + + d.Set("name", name) + d.Set("database", database) + d.Set("query", d.Get("query").(string)) + d.SetId(fmt.Sprintf("influxdb-cq:%s", name)) + + return readContinuousQuery(d, meta) +} + +func readContinuousQuery(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*client.Client) + name := d.Get("name").(string) + database := d.Get("database").(string) + + // InfluxDB doesn't have a command to check the existence of a single + // ContinuousQuery, so we instead must read the list of all ContinuousQuerys and see + // if ours is present in it. + query := client.Query{ + Command: "SHOW CONTINUOUS QUERIES", + } + + resp, err := conn.Query(query) + if err != nil { + return err + } + if resp.Err != nil { + return resp.Err + } + + for _, series := range resp.Results[0].Series { + if series.Name == database { + for _, result := range series.Values { + if result[0].(string) == name { + return nil + } + } + } + } + + // If we fell out here then we didn't find our ContinuousQuery in the list. + d.SetId("") + + return nil +} + +func deleteContinuousQuery(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*client.Client) + name := d.Get("name").(string) + database := d.Get("database").(string) + + queryStr := fmt.Sprintf("DROP CONTINUOUS QUERY %s ON %s", name, quoteIdentifier(database)) + query := client.Query{ + Command: queryStr, + } + + resp, err := conn.Query(query) + if err != nil { + return err + } + if resp.Err != nil { + return resp.Err + } + + d.SetId("") + + return nil +} diff --git a/builtin/providers/influxdb/continuous_query_test.go b/builtin/providers/influxdb/continuous_query_test.go new file mode 100644 index 000000000..78fdaa21b --- /dev/null +++ b/builtin/providers/influxdb/continuous_query_test.go @@ -0,0 +1,87 @@ +package influxdb + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/influxdata/influxdb/client" +) + +func TestAccInfluxDBContiuousQuery(t *testing.T) { + resource.Test(t, resource.TestCase{ + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccContiuousQueryConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckContiuousQueryExists("influxdb_continuous_query.minnie"), + resource.TestCheckResourceAttr( + "influxdb_continuous_query.minnie", "name", "minnie", + ), + resource.TestCheckResourceAttr( + "influxdb_continuous_query.minnie", "database", "terraform-test", + ), + resource.TestCheckResourceAttr( + "influxdb_continuous_query.minnie", "query", "SELECT min(mouse) INTO min_mouse FROM zoo GROUP BY time(30m)", + ), + ), + }, + }, + }) +} + +func testAccCheckContiuousQueryExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ContiuousQuery id set") + } + + conn := testAccProvider.Meta().(*client.Client) + + query := client.Query{ + Command: "SHOW CONTINUOUS QUERIES", + } + + resp, err := conn.Query(query) + if err != nil { + return err + } + + if resp.Err != nil { + return resp.Err + } + + for _, series := range resp.Results[0].Series { + if series.Name == rs.Primary.Attributes["database"] { + for _, result := range series.Values { + if result[0].(string) == rs.Primary.Attributes["name"] { + return nil + } + } + } + } + + return fmt.Errorf("ContiuousQuery %q does not exist", rs.Primary.Attributes["name"]) + } +} + +var testAccContiuousQueryConfig = ` + +resource "influxdb_database" "test" { + name = "terraform-test" +} + +resource "influxdb_continuous_query" "minnie" { + name = "minnie" + database = "${influxdb_database.test.name}" + query = "SELECT min(mouse) INTO min_mouse FROM zoo GROUP BY time(30m)" +} + +` diff --git a/builtin/providers/influxdb/provider.go b/builtin/providers/influxdb/provider.go index 5d33b1a61..0917d22b6 100644 --- a/builtin/providers/influxdb/provider.go +++ b/builtin/providers/influxdb/provider.go @@ -16,7 +16,9 @@ var quoteReplacer = strings.NewReplacer(`"`, `\"`) func Provider() terraform.ResourceProvider { return &schema.Provider{ ResourcesMap: map[string]*schema.Resource{ - "influxdb_database": ResourceDatabase(), + "influxdb_database": resourceDatabase(), + "influxdb_user": resourceUser(), + "influxdb_continuous_query": resourceContinuousQuery(), }, Schema: map[string]*schema.Schema{ @@ -39,11 +41,11 @@ func Provider() terraform.ResourceProvider { }, }, - ConfigureFunc: Configure, + ConfigureFunc: configure, } } -func Configure(d *schema.ResourceData) (interface{}, error) { +func configure(d *schema.ResourceData) (interface{}, error) { url, err := url.Parse(d.Get("url").(string)) if err != nil { return nil, fmt.Errorf("invalid InfluxDB URL: %s", err) @@ -69,5 +71,5 @@ func Configure(d *schema.ResourceData) (interface{}, error) { } func quoteIdentifier(ident string) string { - return fmt.Sprintf(`"%s"`, quoteReplacer.Replace(ident)) + return fmt.Sprintf(`%q`, quoteReplacer.Replace(ident)) } diff --git a/builtin/providers/influxdb/resource_database.go b/builtin/providers/influxdb/resource_database.go index b183957f1..257e95054 100644 --- a/builtin/providers/influxdb/resource_database.go +++ b/builtin/providers/influxdb/resource_database.go @@ -7,11 +7,11 @@ import ( "github.com/influxdata/influxdb/client" ) -func ResourceDatabase() *schema.Resource { +func resourceDatabase() *schema.Resource { return &schema.Resource{ - Create: CreateDatabase, - Read: ReadDatabase, - Delete: DeleteDatabase, + Create: createDatabase, + Read: readDatabase, + Delete: deleteDatabase, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ @@ -23,7 +23,7 @@ func ResourceDatabase() *schema.Resource { } } -func CreateDatabase(d *schema.ResourceData, meta interface{}) error { +func createDatabase(d *schema.ResourceData, meta interface{}) error { conn := meta.(*client.Client) name := d.Get("name").(string) @@ -45,7 +45,7 @@ func CreateDatabase(d *schema.ResourceData, meta interface{}) error { return nil } -func ReadDatabase(d *schema.ResourceData, meta interface{}) error { +func readDatabase(d *schema.ResourceData, meta interface{}) error { conn := meta.(*client.Client) name := d.Id() @@ -76,7 +76,7 @@ func ReadDatabase(d *schema.ResourceData, meta interface{}) error { return nil } -func DeleteDatabase(d *schema.ResourceData, meta interface{}) error { +func deleteDatabase(d *schema.ResourceData, meta interface{}) error { conn := meta.(*client.Client) name := d.Id() diff --git a/builtin/providers/influxdb/resource_database_test.go b/builtin/providers/influxdb/resource_database_test.go index 3c2bf422d..07f4cf6a2 100644 --- a/builtin/providers/influxdb/resource_database_test.go +++ b/builtin/providers/influxdb/resource_database_test.go @@ -1,18 +1,22 @@ package influxdb import ( + "fmt" "testing" "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/influxdata/influxdb/client" ) -func TestAccDatabase(t *testing.T) { +func TestAccInfluxDBDatabase(t *testing.T) { resource.Test(t, resource.TestCase{ Providers: testAccProviders, Steps: []resource.TestStep{ resource.TestStep{ Config: testAccDatabaseConfig, Check: resource.ComposeTestCheckFunc( + testAccCheckDatabaseExists("influxdb_database.test"), resource.TestCheckResourceAttr( "influxdb_database.test", "name", "terraform-test", ), @@ -22,6 +26,42 @@ func TestAccDatabase(t *testing.T) { }) } +func testAccCheckDatabaseExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No database id set") + } + + conn := testAccProvider.Meta().(*client.Client) + + query := client.Query{ + Command: "SHOW DATABASES", + } + + resp, err := conn.Query(query) + if err != nil { + return err + } + + if resp.Err != nil { + return resp.Err + } + + for _, result := range resp.Results[0].Series[0].Values { + if result[0] == rs.Primary.Attributes["name"] { + return nil + } + } + + return fmt.Errorf("Database %q does not exist", rs.Primary.Attributes["name"]) + } +} + var testAccDatabaseConfig = ` resource "influxdb_database" "test" { diff --git a/builtin/providers/influxdb/resource_user.go b/builtin/providers/influxdb/resource_user.go new file mode 100644 index 000000000..21475a314 --- /dev/null +++ b/builtin/providers/influxdb/resource_user.go @@ -0,0 +1,271 @@ +package influxdb + +import ( + "fmt" + "strings" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/influxdata/influxdb/client" +) + +func resourceUser() *schema.Resource { + return &schema.Resource{ + Create: createUser, + Read: readUser, + Update: updateUser, + Delete: deleteUser, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "password": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "admin": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + "grant": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "database": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "privilege": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + } +} + +func createUser(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*client.Client) + + name := d.Get("name").(string) + password := d.Get("password").(string) + + is_admin := d.Get("admin").(bool) + admin_privileges := "" + if is_admin { + admin_privileges = "WITH ALL PRIVILEGES" + } + + queryStr := fmt.Sprintf("CREATE USER %s WITH PASSWORD '%s' %s", name, password, admin_privileges) + query := client.Query{ + Command: queryStr, + } + + resp, err := conn.Query(query) + if err != nil { + return err + } + if resp.Err != nil { + return resp.Err + } + + d.SetId(fmt.Sprintf("influxdb-user:%s", name)) + + if v, ok := d.GetOk("grant"); ok { + grants := v.([]interface{}) + for _, vv := range grants { + grant := vv.(map[string]interface{}) + if err := grantPrivilegeOn(conn, grant["privilege"].(string), grant["database"].(string), name); err != nil { + return err + } + } + } + + return readUser(d, meta) +} + +func exec(conn *client.Client, query string) error { + resp, err := conn.Query(client.Query{ + Command: query, + }) + if err != nil { + return err + } + if resp.Err != nil { + return resp.Err + } + return nil +} + +func grantPrivilegeOn(conn *client.Client, privilege, database, user string) error { + return exec(conn, fmt.Sprintf("GRANT %s ON %s TO %s", privilege, quoteIdentifier(database), user)) +} + +func revokePrivilegeOn(conn *client.Client, privilege, database, user string) error { + return exec(conn, fmt.Sprintf("REVOKE %s ON %s FROM %s", privilege, quoteIdentifier(database), user)) +} + +func grantAllOn(conn *client.Client, user string) error { + return exec(conn, fmt.Sprintf("GRANT ALL PRIVILEGES TO %s", user)) +} + +func revokeAllOn(conn *client.Client, user string) error { + return exec(conn, fmt.Sprintf("REVOKE ALL PRIVILEGES FROM %s", user)) +} + +func readUser(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*client.Client) + name := d.Get("name").(string) + + // InfluxDB doesn't have a command to check the existence of a single + // User, so we instead must read the list of all Users and see + // if ours is present in it. + query := client.Query{ + Command: "SHOW USERS", + } + + resp, err := conn.Query(query) + if err != nil { + return err + } + if resp.Err != nil { + return resp.Err + } + + var found = false + for _, result := range resp.Results[0].Series[0].Values { + if result[0] == name { + found = true + d.Set("admin", result[1].(bool)) + break + } + } + + if !found { + // If we fell out here then we didn't find our User in the list. + d.SetId("") + + return nil + } + + return readGrants(d, meta) +} + +func readGrants(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*client.Client) + name := d.Get("name").(string) + + query := client.Query{ + Command: fmt.Sprintf("SHOW GRANTS FOR %s", name), + } + + resp, err := conn.Query(query) + if err != nil { + return err + } + + if resp.Err != nil { + return resp.Err + } + + var grants = []map[string]string{} + for _, result := range resp.Results[0].Series[0].Values { + if result[1].(string) != "NO PRIVILEGES" { + var grant = map[string]string{ + "database": result[0].(string), + "privilege": strings.ToLower(result[1].(string)), + } + grants = append(grants, grant) + } + } + d.Set("grant", grants) + return nil +} + +func updateUser(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*client.Client) + name := d.Get("name").(string) + + if d.HasChange("admin") { + if !d.Get("admin").(bool) { + revokeAllOn(conn, name) + } else { + grantAllOn(conn, name) + } + } + + if d.HasChange("grant") { + oldGrantV, newGrantV := d.GetChange("grant") + oldGrant := oldGrantV.([]interface{}) + newGrant := newGrantV.([]interface{}) + + for _, oGV := range oldGrant { + oldGrant := oGV.(map[string]interface{}) + + exists := false + privilege := oldGrant["privilege"].(string) + for _, nGV := range newGrant { + newGrant := nGV.(map[string]interface{}) + + if newGrant["database"].(string) == oldGrant["database"].(string) { + exists = true + privilege = newGrant["privilege"].(string) + } + } + + if !exists { + revokePrivilegeOn(conn, oldGrant["privilege"].(string), oldGrant["database"].(string), name) + } else { + if privilege != oldGrant["privilege"].(string) { + grantPrivilegeOn(conn, privilege, oldGrant["database"].(string), name) + } + } + } + + for _, nGV := range newGrant { + newGrant := nGV.(map[string]interface{}) + + exists := false + for _, oGV := range oldGrant { + oldGrant := oGV.(map[string]interface{}) + + exists = exists || (newGrant["database"].(string) == oldGrant["database"].(string) && newGrant["privilege"].(string) == oldGrant["privilege"].(string)) + } + + if !exists { + grantPrivilegeOn(conn, newGrant["privilege"].(string), newGrant["database"].(string), name) + } + } + } + + return readUser(d, meta) +} + +func deleteUser(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*client.Client) + name := d.Get("name").(string) + + queryStr := fmt.Sprintf("DROP USER %s", name) + query := client.Query{ + Command: queryStr, + } + + resp, err := conn.Query(query) + if err != nil { + return err + } + if resp.Err != nil { + return resp.Err + } + + d.SetId("") + + return nil +} diff --git a/builtin/providers/influxdb/resource_user_test.go b/builtin/providers/influxdb/resource_user_test.go new file mode 100644 index 000000000..587796e79 --- /dev/null +++ b/builtin/providers/influxdb/resource_user_test.go @@ -0,0 +1,349 @@ +package influxdb + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/influxdata/influxdb/client" +) + +func TestAccInfluxDBUser_admin(t *testing.T) { + resource.Test(t, resource.TestCase{ + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccUserConfig_admin, + Check: resource.ComposeTestCheckFunc( + testAccCheckUserExists("influxdb_user.test"), + resource.TestCheckResourceAttr( + "influxdb_user.test", "name", "terraform_test", + ), + resource.TestCheckResourceAttr( + "influxdb_user.test", "password", "terraform", + ), + resource.TestCheckResourceAttr( + "influxdb_user.test", "admin", "true", + ), + ), + }, + resource.TestStep{ + Config: testAccUserConfig_revoke, + Check: resource.ComposeTestCheckFunc( + testAccCheckUserExists("influxdb_user.test"), + testAccCheckUserNoAdmin("influxdb_user.test"), + resource.TestCheckResourceAttr( + "influxdb_user.test", "name", "terraform_test", + ), + resource.TestCheckResourceAttr( + "influxdb_user.test", "password", "terraform", + ), + resource.TestCheckResourceAttr( + "influxdb_user.test", "admin", "false", + ), + ), + }, + }, + }) +} + +func TestAccInfluxDBUser_grant(t *testing.T) { + resource.Test(t, resource.TestCase{ + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccUserConfig_grant, + Check: resource.ComposeTestCheckFunc( + testAccCheckUserExists("influxdb_user.test"), + testAccCheckUserGrants("influxdb_user.test", "terraform-green", "READ"), + resource.TestCheckResourceAttr( + "influxdb_user.test", "name", "terraform_test", + ), + resource.TestCheckResourceAttr( + "influxdb_user.test", "password", "terraform", + ), + resource.TestCheckResourceAttr( + "influxdb_user.test", "admin", "false", + ), + resource.TestCheckResourceAttr( + "influxdb_user.test", "grant.#", "1", + ), + ), + }, + resource.TestStep{ + Config: testAccUserConfig_grantUpdate, + Check: resource.ComposeTestCheckFunc( + testAccCheckUserGrants("influxdb_user.test", "terraform-green", "WRITE"), + testAccCheckUserGrants("influxdb_user.test", "terraform-blue", "READ"), + resource.TestCheckResourceAttr( + "influxdb_user.test", "name", "terraform_test", + ), + resource.TestCheckResourceAttr( + "influxdb_user.test", "password", "terraform", + ), + resource.TestCheckResourceAttr( + "influxdb_user.test", "admin", "false", + ), + resource.TestCheckResourceAttr( + "influxdb_user.test", "grant.#", "2", + ), + ), + }, + }, + }) +} + +func TestAccInfluxDBUser_revoke(t *testing.T) { + resource.Test(t, resource.TestCase{ + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccUserConfig_grant, + Check: resource.ComposeTestCheckFunc( + testAccCheckUserExists("influxdb_user.test"), + testAccCheckUserGrants("influxdb_user.test", "terraform-green", "READ"), + resource.TestCheckResourceAttr( + "influxdb_user.test", "name", "terraform_test", + ), + resource.TestCheckResourceAttr( + "influxdb_user.test", "password", "terraform", + ), + resource.TestCheckResourceAttr( + "influxdb_user.test", "admin", "false", + ), + resource.TestCheckResourceAttr( + "influxdb_user.test", "grant.#", "1", + ), + ), + }, + resource.TestStep{ + Config: testAccUserConfig_revoke, + Check: resource.ComposeTestCheckFunc( + testAccCheckUserGrantsEmpty("influxdb_user.test"), + resource.TestCheckResourceAttr( + "influxdb_user.test", "name", "terraform_test", + ), + resource.TestCheckResourceAttr( + "influxdb_user.test", "password", "terraform", + ), + resource.TestCheckResourceAttr( + "influxdb_user.test", "admin", "false", + ), + resource.TestCheckResourceAttr( + "influxdb_user.test", "grant.#", "0", + ), + ), + }, + }, + }) +} + +func testAccCheckUserExists(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No user id set") + } + + conn := testAccProvider.Meta().(*client.Client) + + query := client.Query{ + Command: "SHOW USERS", + } + + resp, err := conn.Query(query) + if err != nil { + return err + } + + if resp.Err != nil { + return resp.Err + } + + for _, result := range resp.Results[0].Series[0].Values { + if result[0] == rs.Primary.Attributes["name"] { + return nil + } + } + + return fmt.Errorf("User %q does not exist", rs.Primary.Attributes["name"]) + } +} + +func testAccCheckUserNoAdmin(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No user id set") + } + + conn := testAccProvider.Meta().(*client.Client) + + query := client.Query{ + Command: "SHOW USERS", + } + + resp, err := conn.Query(query) + if err != nil { + return err + } + + if resp.Err != nil { + return resp.Err + } + + for _, result := range resp.Results[0].Series[0].Values { + if result[0] == rs.Primary.Attributes["name"] { + if result[1].(bool) == true { + return fmt.Errorf("User %q is admin", rs.Primary.ID) + } + + return nil + } + } + + return fmt.Errorf("User %q does not exist", rs.Primary.Attributes["name"]) + } +} + +func testAccCheckUserGrantsEmpty(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No user id set") + } + + conn := testAccProvider.Meta().(*client.Client) + + query := client.Query{ + Command: fmt.Sprintf("SHOW GRANTS FOR %s", rs.Primary.Attributes["name"]), + } + + resp, err := conn.Query(query) + if err != nil { + return err + } + + if resp.Err != nil { + return resp.Err + } + + for _, result := range resp.Results[0].Series[0].Values { + if result[1].(string) != "NO PRIVILEGES" { + return fmt.Errorf("User %q still has grants: %#v", rs.Primary.ID, resp.Results[0].Series[0].Values) + } + } + + return nil + } +} + +func testAccCheckUserGrants(n, database, privilege string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No user id set") + } + + conn := testAccProvider.Meta().(*client.Client) + + query := client.Query{ + Command: fmt.Sprintf("SHOW GRANTS FOR %s", rs.Primary.Attributes["name"]), + } + + resp, err := conn.Query(query) + if err != nil { + return err + } + + if resp.Err != nil { + return resp.Err + } + + for _, result := range resp.Results[0].Series[0].Values { + if result[0].(string) == database && result[1].(string) == privilege { + return nil + } + } + + return fmt.Errorf("Privilege %q on %q for %q does not exist", privilege, database, rs.Primary.Attributes["name"]) + } +} + +var testAccUserConfig_admin = ` +resource "influxdb_user" "test" { + name = "terraform_test" + password = "terraform" + admin = true +} +` + +var testAccUserConfig_grant = ` +resource "influxdb_database" "green" { + name = "terraform-green" +} + +resource "influxdb_user" "test" { + name = "terraform_test" + password = "terraform" + + grant { + database = "${influxdb_database.green.name}" + privilege = "read" + } +} +` + +var testAccUserConfig_revoke = ` +resource "influxdb_database" "green" { + name = "terraform-green" +} + +resource "influxdb_user" "test" { + name = "terraform_test" + password = "terraform" + admin = false +} +` + +var testAccUserConfig_grantUpdate = ` +resource "influxdb_database" "green" { + name = "terraform-green" +} + +resource "influxdb_database" "blue" { + name = "terraform-blue" +} + +resource "influxdb_user" "test" { + name = "terraform_test" + password = "terraform" + + grant { + database = "${influxdb_database.green.name}" + privilege = "write" + } + + grant { + database = "${influxdb_database.blue.name}" + privilege = "read" + } +} +` diff --git a/website/source/docs/providers/influxdb/index.html.markdown b/website/source/docs/providers/influxdb/index.html.markdown index bf8f49c98..315c8cb0a 100644 --- a/website/source/docs/providers/influxdb/index.html.markdown +++ b/website/source/docs/providers/influxdb/index.html.markdown @@ -37,4 +37,15 @@ provider "influxdb" { resource "influxdb_database" "metrics" { name = "awesome_app" } + +resource "influxdb_continuous_query" "minnie" { + name = "minnie" + database = "${influxdb_database.metrics.name}" + query = "SELECT min(mouse) INTO min_mouse FROM zoo GROUP BY time(30m)" +} + +resource "influxdb_user" "paul" { + name = "paul" + password = "super-secret" +} ``` diff --git a/website/source/docs/providers/influxdb/r/continuous_query.html.md b/website/source/docs/providers/influxdb/r/continuous_query.html.md new file mode 100644 index 000000000..1f62af799 --- /dev/null +++ b/website/source/docs/providers/influxdb/r/continuous_query.html.md @@ -0,0 +1,38 @@ +--- +layout: "influxdb" +page_title: "InfluxDB: influxdb_continuous_query" +sidebar_current: "docs-influxdb-resource-continuous_query" +description: |- + The influxdb_continuous_query resource allows an InfluxDB continuous query to be managed. +--- + +# influxdb\_continuous\_query + +The continuous_query resource allows a continuous query to be created on an InfluxDB server. + +## Example Usage + +``` +resource "influxdb_database" "test" { + name = "terraform-test" +} + +resource "influxdb_continuous_query" "minnie" { + name = "minnie" + database = "${influxdb_database.test.name}" + query = "SELECT min(mouse) INTO min_mouse FROM zoo GROUP BY time(30m)" +} + +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name for the continuous_query. This must be unique on the InfluxDB server. +* `database` - (Required) The database for the continuous_query. This must be an existing influxdb database. +* `query` - (Required) The query for the continuous_query. + +## Attributes Reference + +This resource exports no further attributes. diff --git a/website/source/docs/providers/influxdb/r/user.html.md b/website/source/docs/providers/influxdb/r/user.html.md new file mode 100644 index 000000000..6ff0397e4 --- /dev/null +++ b/website/source/docs/providers/influxdb/r/user.html.md @@ -0,0 +1,47 @@ +--- +layout: "influxdb" +page_title: "InfluxDB: influxdb_user" +sidebar_current: "docs-influxdb-resource-user" +description: |- + The influxdb_user resource allows an InfluxDB users to be managed. +--- + +# influxdb\_user + +The user resource allows a user to be created on an InfluxDB server. + +## Example Usage + +``` +resource "influxdb_database" "green" { + name = "terraform-green" +} + +resource "influxdb_user" "paul" { + name = "paul" + password = "super-secret" + + grant { + database = "${influxdb_database.green.name}" + privilege = "write" + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name for the user. +* `password` - (Required) The password for the user. +* `admin` - (Optional) Mark the user as admin. +* `grant` - (Optional) A list of grants for non-admin users + +Each `grant` supports the following: + +* `database` - (Required) The name of the database the privilege is associated with +* `privilege` - (Required) The privilege to grant (READ|WRITE|ALL) + +## Attributes Reference + +* `admin` - (Bool) indication if the user is an admin or not. diff --git a/website/source/layouts/influxdb.erb b/website/source/layouts/influxdb.erb index 24a764e9f..5044dc903 100644 --- a/website/source/layouts/influxdb.erb +++ b/website/source/layouts/influxdb.erb @@ -14,7 +14,13 @@ Resources From 744b266995d9a4be137148f50f84abfcdc7cf076 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Fri, 5 Aug 2016 17:59:15 +1000 Subject: [PATCH 0546/1238] provider/aws: Support `aws_elasticsearch_domain` upgrades to (#7860) `elasticsearch_version` 2.3 Fixes #7836 This will allow ElasticSearch domains to be deployed with version 2.3 of ElasticSearch The other slight modifications are to stop dereferencing values before passing to d.Set in the Read func. It is safer to pass the pointer to d.Set and allow that to dereference if there is a value ``` % make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSElasticSearchDomain_' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSElasticSearchDomain_ -timeout 120m === RUN TestAccAWSElasticSearchDomain_basic --- PASS: TestAccAWSElasticSearchDomain_basic (1611.74s) === RUN TestAccAWSElasticSearchDomain_v23 --- PASS: TestAccAWSElasticSearchDomain_v23 (1898.80s) === RUN TestAccAWSElasticSearchDomain_complex --- PASS: TestAccAWSElasticSearchDomain_complex (1802.44s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 5313.006s ``` Update resource_aws_elasticsearch_domain.go --- .../aws/resource_aws_elasticsearch_domain.go | 17 +++-- .../resource_aws_elasticsearch_domain_test.go | 68 +++++++++++++++---- .../aws/r/elasticsearch_domain.html.markdown | 2 + 3 files changed, 70 insertions(+), 17 deletions(-) diff --git a/builtin/providers/aws/resource_aws_elasticsearch_domain.go b/builtin/providers/aws/resource_aws_elasticsearch_domain.go index 35bffc89a..b7ba0a843 100644 --- a/builtin/providers/aws/resource_aws_elasticsearch_domain.go +++ b/builtin/providers/aws/resource_aws_elasticsearch_domain.go @@ -129,6 +129,13 @@ func resourceAwsElasticSearchDomain() *schema.Resource { }, }, }, + "elasticsearch_version": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "1.5", + ForceNew: true, + }, + "tags": tagsSchema(), }, } @@ -138,7 +145,8 @@ func resourceAwsElasticSearchDomainCreate(d *schema.ResourceData, meta interface conn := meta.(*AWSClient).esconn input := elasticsearch.CreateElasticsearchDomainInput{ - DomainName: aws.String(d.Get("domain_name").(string)), + DomainName: aws.String(d.Get("domain_name").(string)), + ElasticsearchVersion: aws.String(d.Get("elasticsearch_version").(string)), } if v, ok := d.GetOk("access_policies"); ok { @@ -262,8 +270,9 @@ func resourceAwsElasticSearchDomainRead(d *schema.ResourceData, meta interface{} if err != nil { return err } - d.Set("domain_id", *ds.DomainId) - d.Set("domain_name", *ds.DomainName) + d.Set("domain_id", ds.DomainId) + d.Set("domain_name", ds.DomainName) + d.Set("elasticsearch_version", ds.ElasticsearchVersion) if ds.Endpoint != nil { d.Set("endpoint", *ds.Endpoint) } @@ -282,7 +291,7 @@ func resourceAwsElasticSearchDomainRead(d *schema.ResourceData, meta interface{} }) } - d.Set("arn", *ds.ARN) + d.Set("arn", ds.ARN) listOut, err := conn.ListTags(&elasticsearch.ListTagsInput{ ARN: ds.ARN, diff --git a/builtin/providers/aws/resource_aws_elasticsearch_domain_test.go b/builtin/providers/aws/resource_aws_elasticsearch_domain_test.go index 881d92322..85dd37289 100644 --- a/builtin/providers/aws/resource_aws_elasticsearch_domain_test.go +++ b/builtin/providers/aws/resource_aws_elasticsearch_domain_test.go @@ -7,12 +7,14 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" elasticsearch "github.com/aws/aws-sdk-go/service/elasticsearchservice" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" ) func TestAccAWSElasticSearchDomain_basic(t *testing.T) { var domain elasticsearch.ElasticsearchDomainStatus + ri := acctest.RandInt() resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -20,9 +22,32 @@ func TestAccAWSElasticSearchDomain_basic(t *testing.T) { CheckDestroy: testAccCheckESDomainDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccESDomainConfig, + Config: testAccESDomainConfig(ri), Check: resource.ComposeTestCheckFunc( testAccCheckESDomainExists("aws_elasticsearch_domain.example", &domain), + resource.TestCheckResourceAttr( + "aws_elasticsearch_domain.example", "elasticsearch_version", "1.5"), + ), + }, + }, + }) +} + +func TestAccAWSElasticSearchDomain_v23(t *testing.T) { + var domain elasticsearch.ElasticsearchDomainStatus + ri := acctest.RandInt() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckESDomainDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccESDomainConfigV23(ri), + Check: resource.ComposeTestCheckFunc( + testAccCheckESDomainExists("aws_elasticsearch_domain.example", &domain), + resource.TestCheckResourceAttr( + "aws_elasticsearch_domain.example", "elasticsearch_version", "2.3"), ), }, }, @@ -31,6 +56,7 @@ func TestAccAWSElasticSearchDomain_basic(t *testing.T) { func TestAccAWSElasticSearchDomain_complex(t *testing.T) { var domain elasticsearch.ElasticsearchDomainStatus + ri := acctest.RandInt() resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -38,7 +64,7 @@ func TestAccAWSElasticSearchDomain_complex(t *testing.T) { CheckDestroy: testAccCheckESDomainDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccESDomainConfig_complex, + Config: testAccESDomainConfig_complex(ri), Check: resource.ComposeTestCheckFunc( testAccCheckESDomainExists("aws_elasticsearch_domain.example", &domain), ), @@ -50,6 +76,7 @@ func TestAccAWSElasticSearchDomain_complex(t *testing.T) { func TestAccAWSElasticSearch_tags(t *testing.T) { var domain elasticsearch.ElasticsearchDomainStatus var td elasticsearch.ListTagsOutput + ri := acctest.RandInt() resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -57,14 +84,14 @@ func TestAccAWSElasticSearch_tags(t *testing.T) { CheckDestroy: testAccCheckAWSELBDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccESDomainConfig, + Config: testAccESDomainConfig(ri), Check: resource.ComposeTestCheckFunc( testAccCheckESDomainExists("aws_elasticsearch_domain.example", &domain), ), }, resource.TestStep{ - Config: testAccESDomainConfig_TagUpdate, + Config: testAccESDomainConfig_TagUpdate(ri), Check: resource.ComposeTestCheckFunc( testAccCheckESDomainExists("aws_elasticsearch_domain.example", &domain), testAccLoadESTags(&domain, &td), @@ -144,26 +171,31 @@ func testAccCheckESDomainDestroy(s *terraform.State) error { return nil } -const testAccESDomainConfig = ` +func testAccESDomainConfig(randInt int) string { + return fmt.Sprintf(` resource "aws_elasticsearch_domain" "example" { - domain_name = "tf-test-1" + domain_name = "tf-test-%d" +} +`, randInt) } -` -const testAccESDomainConfig_TagUpdate = ` +func testAccESDomainConfig_TagUpdate(randInt int) string { + return fmt.Sprintf(` resource "aws_elasticsearch_domain" "example" { - domain_name = "tf-test-1" + domain_name = "tf-test-%d" tags { foo = "bar" new = "type" } } -` +`, randInt) +} -const testAccESDomainConfig_complex = ` +func testAccESDomainConfig_complex(randInt int) string { + return fmt.Sprintf(` resource "aws_elasticsearch_domain" "example" { - domain_name = "tf-test-2" + domain_name = "tf-test-%d" advanced_options { "indices.fielddata.cache.size" = 80 @@ -186,4 +218,14 @@ resource "aws_elasticsearch_domain" "example" { bar = "complex" } } -` +`, randInt) +} + +func testAccESDomainConfigV23(randInt int) string { + return fmt.Sprintf(` +resource "aws_elasticsearch_domain" "example" { + domain_name = "tf-test-%d" + elasticsearch_version = "2.3" +} +`, randInt) +} diff --git a/website/source/docs/providers/aws/r/elasticsearch_domain.html.markdown b/website/source/docs/providers/aws/r/elasticsearch_domain.html.markdown index dac78a87c..6dd083f0f 100644 --- a/website/source/docs/providers/aws/r/elasticsearch_domain.html.markdown +++ b/website/source/docs/providers/aws/r/elasticsearch_domain.html.markdown @@ -14,6 +14,7 @@ description: |- ``` resource "aws_elasticsearch_domain" "es" { domain_name = "tf-test" + elasticsearch_version = "1.5" advanced_options { "rest.action.multi.allow_explicit_index" = true } @@ -54,6 +55,7 @@ The following arguments are supported: * `ebs_options` - (Optional) EBS related options, see below. * `cluster_config` - (Optional) Cluster configuration of the domain, see below. * `snapshot_options` - (Optional) Snapshot related options, see below. +* `elasticsearch_version` - (Optional) The version of ElasticSearch to deploy. Only valid values are `1.5` and `2.3`. Defaults to `1.5` * `tags` - (Optional) A mapping of tags to assign to the resource **ebs_options** supports the following attributes: From fa2d6e35a705a9c6a92a6b58393f359106b93bba Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Fri, 5 Aug 2016 09:01:41 +0100 Subject: [PATCH 0547/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6465ec47b..ffb5ee77f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ FEATURES: IMPROVEMENTS: * provider/vsphere: Improved SCSI controller handling in `vsphere_virtual_machine` [GH-7908] + * provider/aws: Introduce `aws_elasticsearch_domain` `elasticsearch_version` field (to specify ES version) [GH-7860] BUG FIXES: * provider/aws: guard against missing image_digest in `aws_ecs_task_definition` [GH-7966] From 6899246b98d2b103a7eeaeb1608ad814aef80188 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Fri, 5 Aug 2016 18:44:10 +1000 Subject: [PATCH 0548/1238] provider/aws: Updates `aws_cloudformation_stack` Update timeout (#7997) Fixes #7996 The Create func was using the timeout that we were passing to the resource. Update func was not. ``` % make testacc TEST=./builtin/providers/aws % TESTARGS='-run=TestAccAWSCloudFormation_' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSCloudFormation_ -timeout 120m === RUN TestAccAWSCloudFormation_basic --- PASS: TestAccAWSCloudFormation_basic (120.61s) === RUN TestAccAWSCloudFormation_defaultParams --- PASS: TestAccAWSCloudFormation_defaultParams (121.40s) === RUN TestAccAWSCloudFormation_allAttributes --- PASS: TestAccAWSCloudFormation_allAttributes (263.29s) === RUN TestAccAWSCloudFormation_withParams --- PASS: TestAccAWSCloudFormation_withParams (205.52s) === RUN TestAccAWSCloudFormation_withUrl_withParams --- PASS: TestAccAWSCloudFormation_withUrl_withParams (402.71s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 1113.552s ``` --- .../providers/aws/resource_aws_cloudformation_stack.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_cloudformation_stack.go b/builtin/providers/aws/resource_aws_cloudformation_stack.go index 28935e33c..56249587b 100644 --- a/builtin/providers/aws/resource_aws_cloudformation_stack.go +++ b/builtin/providers/aws/resource_aws_cloudformation_stack.go @@ -268,6 +268,7 @@ func resourceAwsCloudFormationStackRead(d *schema.ResourceData, meta interface{} } func resourceAwsCloudFormationStackUpdate(d *schema.ResourceData, meta interface{}) error { + retryTimeout := int64(30) conn := meta.(*AWSClient).cfconn input := &cloudformation.UpdateStackInput{ @@ -314,6 +315,13 @@ func resourceAwsCloudFormationStackUpdate(d *schema.ResourceData, meta interface return err } + if v, ok := d.GetOk("timeout_in_minutes"); ok { + m := int64(v.(int)) + if m > retryTimeout { + retryTimeout = m + 5 + log.Printf("[DEBUG] CloudFormation timeout: %d", retryTimeout) + } + } wait := resource.StateChangeConf{ Pending: []string{ "UPDATE_COMPLETE_CLEANUP_IN_PROGRESS", @@ -323,7 +331,7 @@ func resourceAwsCloudFormationStackUpdate(d *schema.ResourceData, meta interface "UPDATE_ROLLBACK_COMPLETE", }, Target: []string{"UPDATE_COMPLETE"}, - Timeout: 15 * time.Minute, + Timeout: time.Duration(retryTimeout) * time.Minute, MinTimeout: 5 * time.Second, Refresh: func() (interface{}, string, error) { resp, err := conn.DescribeStacks(&cloudformation.DescribeStacksInput{ From 4c6e0d94f9ed22a192064b420d25a95f110ec3ba Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Fri, 5 Aug 2016 09:46:44 +0100 Subject: [PATCH 0549/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ffb5ee77f..c9f3d694d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ IMPROVEMENTS: BUG FIXES: * provider/aws: guard against missing image_digest in `aws_ecs_task_definition` [GH-7966] + * provider/aws: `aws_cloudformation_stack` now respects `timeout_in_minutes` field when waiting for CF API to finish an update operation [GH-7997] * provider/aws: Add state filter to `aws_availability_zone`s data source [GH-7965] * provider/aws: Handle lack of snapshot ID for a volume in `ami_copy` [GH-7995] * provider/aws: Retry association of IAM Role & instance profile [GH-7938] From c990e386015a07cb679ef99e387dd7a278c89b0d Mon Sep 17 00:00:00 2001 From: Simon Westcott Date: Fri, 5 Aug 2016 14:01:28 +0100 Subject: [PATCH 0550/1238] Correct AWS secret key variable name --- .github/CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 8720a0888..29b8554fa 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -373,7 +373,7 @@ to a single resource. Most tests follow a similar structure. 1. Pre-flight checks are made to ensure that sufficient provider configuration is available to be able to proceed - for example in an acceptance test - targetting AWS, `AWS_ACCESS_KEY_ID` and `AWS_SECRET_KEY` must be set prior + targetting AWS, `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` must be set prior to running acceptance tests. This is common to all tests exercising a single provider. From 0a3714eaac8d785ef22f3545c059adcb2b86ae98 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Fri, 5 Aug 2016 11:38:10 -0400 Subject: [PATCH 0551/1238] Don't send access_token in request params Always send the access_token in the X-Atlas-Token header. --- state/remote/atlas.go | 6 +++++- state/remote/atlas_test.go | 11 +++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/state/remote/atlas.go b/state/remote/atlas.go index 24e81f177..5343c0236 100644 --- a/state/remote/atlas.go +++ b/state/remote/atlas.go @@ -23,6 +23,7 @@ import ( const ( // defaultAtlasServer is used when no address is given defaultAtlasServer = "https://atlas.hashicorp.com/" + atlasTokenHeader = "X-Atlas-Token" ) func atlasFactory(conf map[string]string) (Client, error) { @@ -92,6 +93,8 @@ func (c *AtlasClient) Get() (*Payload, error) { return nil, fmt.Errorf("Failed to make HTTP request: %v", err) } + req.Header.Set(atlasTokenHeader, c.AccessToken) + // Request the url client, err := c.http() if err != nil { @@ -170,6 +173,7 @@ func (c *AtlasClient) Put(state []byte) error { } // Prepare the request + req.Header.Set(atlasTokenHeader, c.AccessToken) req.Header.Set("Content-MD5", b64) req.Header.Set("Content-Type", "application/json") req.ContentLength = int64(len(state)) @@ -204,6 +208,7 @@ func (c *AtlasClient) Delete() error { if err != nil { return fmt.Errorf("Failed to make HTTP request: %v", err) } + req.Header.Set(atlasTokenHeader, c.AccessToken) // Make the request client, err := c.http() @@ -249,7 +254,6 @@ func (c *AtlasClient) url() *url.URL { values := url.Values{} values.Add("atlas_run_id", c.RunId) - values.Add("access_token", c.AccessToken) return &url.URL{ Scheme: c.ServerURL.Scheme, diff --git a/state/remote/atlas_test.go b/state/remote/atlas_test.go index 1d73540a4..9d4f226fe 100644 --- a/state/remote/atlas_test.go +++ b/state/remote/atlas_test.go @@ -218,6 +218,17 @@ func (f *fakeAtlas) NoConflictAllowed(b bool) { } func (f *fakeAtlas) handler(resp http.ResponseWriter, req *http.Request) { + // access tokens should only be sent as a header + if req.FormValue("access_token") != "" { + http.Error(resp, "access_token in request params", http.StatusBadRequest) + return + } + + if req.Header.Get(atlasTokenHeader) == "" { + http.Error(resp, "missing access token", http.StatusBadRequest) + return + } + switch req.Method { case "GET": // Respond with the current stored state. From e21203a36973ef3dd7ec47b8202da6ea96a3e1f0 Mon Sep 17 00:00:00 2001 From: Joern Barthel Date: Fri, 5 Aug 2016 17:08:22 +0200 Subject: [PATCH 0552/1238] Changed sync token to int and use it as id. --- .../providers/aws/data_source_aws_ip_ranges.go | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/builtin/providers/aws/data_source_aws_ip_ranges.go b/builtin/providers/aws/data_source_aws_ip_ranges.go index c530d981d..c71eb0a72 100644 --- a/builtin/providers/aws/data_source_aws_ip_ranges.go +++ b/builtin/providers/aws/data_source_aws_ip_ranges.go @@ -6,8 +6,8 @@ import ( "io/ioutil" "log" "sort" + "strconv" "strings" - "time" "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/terraform/helper/schema" @@ -50,7 +50,7 @@ func dataSourceAwsIPRanges() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, }, "sync_token": &schema.Schema{ - Type: schema.TypeString, + Type: schema.TypeInt, Computed: true, }, }, @@ -62,7 +62,6 @@ func dataSourceAwsIPRangesRead(d *schema.ResourceData, meta interface{}) error { conn := cleanhttp.DefaultClient() log.Printf("[DEBUG] Reading IP ranges") - d.SetId(time.Now().UTC().String()) res, err := conn.Get("https://ip-ranges.amazonaws.com/ip-ranges.json") @@ -88,7 +87,15 @@ func dataSourceAwsIPRangesRead(d *schema.ResourceData, meta interface{}) error { return fmt.Errorf("Error setting create date: %s", err) } - if err := d.Set("sync_token", result.SyncToken); err != nil { + syncToken, err := strconv.Atoi(result.SyncToken) + + if err != nil { + return fmt.Errorf("Error while converting sync token: %s", err) + } + + d.SetId(result.SyncToken) + + if err := d.Set("sync_token", syncToken); err != nil { return fmt.Errorf("Error setting sync token: %s", err) } From 67b4b4cbfb453ceba45d39e9c43e187448843d98 Mon Sep 17 00:00:00 2001 From: Joern Barthel Date: Fri, 5 Aug 2016 17:08:50 +0200 Subject: [PATCH 0553/1238] Use content as id. --- builtin/providers/fastly/data_source_ip_ranges.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/builtin/providers/fastly/data_source_ip_ranges.go b/builtin/providers/fastly/data_source_ip_ranges.go index bc01cd232..080ac081b 100644 --- a/builtin/providers/fastly/data_source_ip_ranges.go +++ b/builtin/providers/fastly/data_source_ip_ranges.go @@ -6,9 +6,10 @@ import ( "io/ioutil" "log" "sort" - "time" + "strconv" "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/terraform/helper/hashcode" "github.com/hashicorp/terraform/helper/schema" ) @@ -35,7 +36,6 @@ func dataSourceFastlyIPRangesRead(d *schema.ResourceData, meta interface{}) erro conn := cleanhttp.DefaultClient() log.Printf("[DEBUG] Reading IP ranges") - d.SetId(time.Now().UTC().String()) res, err := conn.Get("https://api.fastly.com/public-ip-list") @@ -51,6 +51,8 @@ func dataSourceFastlyIPRangesRead(d *schema.ResourceData, meta interface{}) erro return fmt.Errorf("Error reading response body: %s", err) } + d.SetId(strconv.Itoa(hashcode.String(string(data)))) + result := new(dataSourceFastlyIPRangesResult) if err := json.Unmarshal(data, result); err != nil { From 2caae49a3b773217e8efff95fcf2345031e947dc Mon Sep 17 00:00:00 2001 From: Joern Barthel Date: Fri, 5 Aug 2016 17:09:12 +0200 Subject: [PATCH 0554/1238] Fail if filter yields no results. --- builtin/providers/aws/data_source_aws_ip_ranges.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/providers/aws/data_source_aws_ip_ranges.go b/builtin/providers/aws/data_source_aws_ip_ranges.go index c71eb0a72..b03fc3632 100644 --- a/builtin/providers/aws/data_source_aws_ip_ranges.go +++ b/builtin/providers/aws/data_source_aws_ip_ranges.go @@ -121,7 +121,7 @@ func dataSourceAwsIPRangesRead(d *schema.ResourceData, meta interface{}) error { } if len(prefixes) == 0 { - log.Printf("[WARN] No ip ranges result from filters") + return fmt.Errorf(" No IP ranges result from filters") } sort.Strings(prefixes) From 9f565285d1eb281ca00b62ca17c8ad4e3e69db77 Mon Sep 17 00:00:00 2001 From: Joern Barthel Date: Fri, 5 Aug 2016 17:12:28 +0200 Subject: [PATCH 0555/1238] Renamed blocks to cidr_blocks. --- builtin/providers/aws/data_source_aws_ip_ranges.go | 4 ++-- builtin/providers/fastly/data_source_ip_ranges.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/builtin/providers/aws/data_source_aws_ip_ranges.go b/builtin/providers/aws/data_source_aws_ip_ranges.go index b03fc3632..799af0ca7 100644 --- a/builtin/providers/aws/data_source_aws_ip_ranges.go +++ b/builtin/providers/aws/data_source_aws_ip_ranges.go @@ -30,7 +30,7 @@ func dataSourceAwsIPRanges() *schema.Resource { Read: dataSourceAwsIPRangesRead, Schema: map[string]*schema.Schema{ - "blocks": &schema.Schema{ + "cidr_blocks": &schema.Schema{ Type: schema.TypeList, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, @@ -126,7 +126,7 @@ func dataSourceAwsIPRangesRead(d *schema.ResourceData, meta interface{}) error { sort.Strings(prefixes) - if err := d.Set("blocks", prefixes); err != nil { + if err := d.Set("cidr_blocks", prefixes); err != nil { return fmt.Errorf("Error setting ip ranges: %s", err) } diff --git a/builtin/providers/fastly/data_source_ip_ranges.go b/builtin/providers/fastly/data_source_ip_ranges.go index 080ac081b..cc418465c 100644 --- a/builtin/providers/fastly/data_source_ip_ranges.go +++ b/builtin/providers/fastly/data_source_ip_ranges.go @@ -22,7 +22,7 @@ func dataSourceFastlyIPRanges() *schema.Resource { Read: dataSourceFastlyIPRangesRead, Schema: map[string]*schema.Schema{ - "blocks": &schema.Schema{ + "cidr_blocks": &schema.Schema{ Type: schema.TypeList, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, @@ -61,7 +61,7 @@ func dataSourceFastlyIPRangesRead(d *schema.ResourceData, meta interface{}) erro sort.Strings(result.Addresses) - if err := d.Set("blocks", result.Addresses); err != nil { + if err := d.Set("cidr_blocks", result.Addresses); err != nil { return fmt.Errorf("Error setting ip ranges: %s", err) } From 67bf13fccf21d7c4411b69761d58fd53c222631d Mon Sep 17 00:00:00 2001 From: Joern Barthel Date: Fri, 5 Aug 2016 17:12:46 +0200 Subject: [PATCH 0556/1238] Added documentation. --- .../providers/aws/d/ip_ranges.html.markdown | 59 +++++++++++++++++++ .../fastly/d/ip_ranges.html.markdown | 37 ++++++++++++ website/source/layouts/aws.erb | 3 + website/source/layouts/fastly.erb | 9 +++ 4 files changed, 108 insertions(+) create mode 100644 website/source/docs/providers/aws/d/ip_ranges.html.markdown create mode 100644 website/source/docs/providers/fastly/d/ip_ranges.html.markdown diff --git a/website/source/docs/providers/aws/d/ip_ranges.html.markdown b/website/source/docs/providers/aws/d/ip_ranges.html.markdown new file mode 100644 index 000000000..f7fe90a8c --- /dev/null +++ b/website/source/docs/providers/aws/d/ip_ranges.html.markdown @@ -0,0 +1,59 @@ +--- +layout: "aws" +page_title: "AWS: aws_ip_ranges" +sidebar_current: "docs-aws-datasource-ip_ranges" +description: |- + Get information on AWS IP ranges. +--- + +# aws\_ip_ranges + +Use this data source to get the [IP ranges][1] of various AWS products and services. + +## Example Usage + +``` +data "aws_ip_ranges" "european_ec2" { + regions = [ "eu-west-1", "eu-central-1" ] + services = [ "ec2" ] +} + +resource "aws_security_group" "from_europe" { + + name = "from_europe" + + ingress { + from_port = "443" + to_port = "443" + protocol = "tcp" + cidr_blocks = [ "${data.aws_ip_ranges.european_ec2.blocks}" ] + } + + tags { + CreateDate = "${data.aws_ip_ranges.european_ec2.create_date}" + SyncToken = "${data.aws_ip_ranges.european_ec2.sync_token}" + } + +} +``` + +## Argument Reference + +* `regions` - (Optional) Filter IP ranges by regions (or include all regions, if +omitted). Valid items are `global` (for `cloudfront`) as well as all AWS regions +(e.g. `eu-central-1`) + +* `services` - (Required) Filter IP ranges by services. Valid items are `amazon` +(for amazon.com), `cloudfront`, `ec2`, `route53` and `route53_healthchecks`. + +~> **NOTE:** If the specified combination of regions and services does not yield any +CIDR blocks, Terraform will fail. + +## Attributes Reference + +* `cidr_blocks` - The lexically ordered list of CIDR blocks. +* `create_date` - The publication time of the IP ranges (e.g. `2016-08-03-23-46-05`). +* `sync_token` - The publication time of the IP ranges, in Unix epoch time format + (e.g. `1470267965`). + +[1]: http://docs.aws.amazon.com/general/latest/gr/aws-ip-ranges.html diff --git a/website/source/docs/providers/fastly/d/ip_ranges.html.markdown b/website/source/docs/providers/fastly/d/ip_ranges.html.markdown new file mode 100644 index 000000000..75cd59b00 --- /dev/null +++ b/website/source/docs/providers/fastly/d/ip_ranges.html.markdown @@ -0,0 +1,37 @@ +--- +layout: "fastly" +page_title: "Fastly: fastly_ip_ranges" +sidebar_current: "docs-fastly-datasource-ip_ranges" +description: |- + Get information on Fastly IP ranges. +--- + +# fastly\_ip_ranges + +Use this data source to get the [IP ranges][1] of Fastly edge nodes. + +## Example Usage + +``` +data "fastly_ip_ranges" "fastly" { +} + +resource "aws_security_group" "from_fastly" { + + name = "from_fastly" + + ingress { + from_port = "443" + to_port = "443" + protocol = "tcp" + cidr_blocks = [ "${data.fastly_ip_ranges.fastly.cidr_blocks}" ] + } + +} +``` + +## Attributes Reference + +* `cidr_blocks` - The lexically ordered list of CIDR blocks. + +[1]: https://docs.fastly.com/guides/securing-communications/accessing-fastlys-ip-ranges diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb index c4765aafe..ed7e12a0d 100644 --- a/website/source/layouts/aws.erb +++ b/website/source/layouts/aws.erb @@ -25,6 +25,9 @@ > aws_iam_policy_document + > + aws_ip_ranges + > aws_s3_bucket_object diff --git a/website/source/layouts/fastly.erb b/website/source/layouts/fastly.erb index 1958464a0..b1cd59d34 100644 --- a/website/source/layouts/fastly.erb +++ b/website/source/layouts/fastly.erb @@ -10,6 +10,15 @@ Fastly Provider + > + Data Sources + + + > Resources From 9c54e9c955104863e9de7266ea383795f6dd834b Mon Sep 17 00:00:00 2001 From: Krzysztof Wilczynski Date: Sun, 7 Aug 2016 08:29:51 +0900 Subject: [PATCH 0557/1238] Add aws_vpn_gateway_attachment resource. (#7870) This commit adds VPN Gateway attachment resource, and also an initial tests and documentation stubs. Signed-off-by: Krzysztof Wilczynski --- builtin/providers/aws/provider.go | 1 + .../providers/aws/resource_aws_vpn_gateway.go | 21 +- .../resource_aws_vpn_gateway_attachment.go | 210 ++++++++++++++++++ ...esource_aws_vpn_gateway_attachment_test.go | 163 ++++++++++++++ .../aws/resource_aws_vpn_gateway_test.go | 124 ++++++++--- .../r/vpn_gateway_attachment.html.markdown | 57 +++++ website/source/layouts/aws.erb | 4 + 7 files changed, 540 insertions(+), 40 deletions(-) create mode 100644 builtin/providers/aws/resource_aws_vpn_gateway_attachment.go create mode 100644 builtin/providers/aws/resource_aws_vpn_gateway_attachment_test.go create mode 100644 website/source/docs/providers/aws/r/vpn_gateway_attachment.html.markdown diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index 69e264dd9..234a9c90e 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -282,6 +282,7 @@ func Provider() terraform.ResourceProvider { "aws_vpn_connection": resourceAwsVpnConnection(), "aws_vpn_connection_route": resourceAwsVpnConnectionRoute(), "aws_vpn_gateway": resourceAwsVpnGateway(), + "aws_vpn_gateway_attachment": resourceAwsVpnGatewayAttachment(), }, ConfigureFunc: providerConfigure, } diff --git a/builtin/providers/aws/resource_aws_vpn_gateway.go b/builtin/providers/aws/resource_aws_vpn_gateway.go index 27f4a45f7..845e11cd3 100644 --- a/builtin/providers/aws/resource_aws_vpn_gateway.go +++ b/builtin/providers/aws/resource_aws_vpn_gateway.go @@ -32,6 +32,7 @@ func resourceAwsVpnGateway() *schema.Resource { "vpc_id": &schema.Schema{ Type: schema.TypeString, Optional: true, + Computed: true, }, "tags": tagsSchema(), @@ -80,17 +81,18 @@ func resourceAwsVpnGatewayRead(d *schema.ResourceData, meta interface{}) error { } vpnGateway := resp.VpnGateways[0] - if vpnGateway == nil { + if vpnGateway == nil || *vpnGateway.State == "deleted" { // Seems we have lost our VPN gateway d.SetId("") return nil } - if len(vpnGateway.VpcAttachments) == 0 || *vpnGateway.VpcAttachments[0].State == "detached" || *vpnGateway.VpcAttachments[0].State == "deleted" { + vpnAttachment := vpnGatewayGetAttachment(vpnGateway) + if len(vpnGateway.VpcAttachments) == 0 || *vpnAttachment.State == "detached" { // Gateway exists but not attached to the VPC d.Set("vpc_id", "") } else { - d.Set("vpc_id", vpnGateway.VpcAttachments[0].VpcId) + d.Set("vpc_id", *vpnAttachment.VpcId) } d.Set("availability_zone", vpnGateway.AvailabilityZone) d.Set("tags", tagsToMap(vpnGateway.Tags)) @@ -301,12 +303,21 @@ func vpnGatewayAttachStateRefreshFunc(conn *ec2.EC2, id string, expected string) } vpnGateway := resp.VpnGateways[0] - if len(vpnGateway.VpcAttachments) == 0 { // No attachments, we're detached return vpnGateway, "detached", nil } - return vpnGateway, *vpnGateway.VpcAttachments[0].State, nil + vpnAttachment := vpnGatewayGetAttachment(vpnGateway) + return vpnGateway, *vpnAttachment.State, nil } } + +func vpnGatewayGetAttachment(vgw *ec2.VpnGateway) *ec2.VpcAttachment { + for _, v := range vgw.VpcAttachments { + if *v.State == "attached" { + return v + } + } + return &ec2.VpcAttachment{State: aws.String("detached")} +} diff --git a/builtin/providers/aws/resource_aws_vpn_gateway_attachment.go b/builtin/providers/aws/resource_aws_vpn_gateway_attachment.go new file mode 100644 index 000000000..b19393bfb --- /dev/null +++ b/builtin/providers/aws/resource_aws_vpn_gateway_attachment.go @@ -0,0 +1,210 @@ +package aws + +import ( + "fmt" + "log" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsVpnGatewayAttachment() *schema.Resource { + return &schema.Resource{ + Create: resourceAwsVpnGatewayAttachmentCreate, + Read: resourceAwsVpnGatewayAttachmentRead, + Delete: resourceAwsVpnGatewayAttachmentDelete, + + Schema: map[string]*schema.Schema{ + "vpc_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "vpn_gateway_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceAwsVpnGatewayAttachmentCreate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + vpcId := d.Get("vpc_id").(string) + vgwId := d.Get("vpn_gateway_id").(string) + + createOpts := &ec2.AttachVpnGatewayInput{ + VpcId: aws.String(vpcId), + VpnGatewayId: aws.String(vgwId), + } + log.Printf("[DEBUG] VPN Gateway attachment options: %#v", *createOpts) + + _, err := conn.AttachVpnGateway(createOpts) + if err != nil { + return fmt.Errorf("Error attaching VPN Gateway %q to VPC %q: %s", + vgwId, vpcId, err) + } + + d.SetId(vpnGatewayAttachmentId(vpcId, vgwId)) + log.Printf("[INFO] VPN Gateway %q attachment ID: %s", vgwId, d.Id()) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"detached", "attaching"}, + Target: []string{"attached"}, + Refresh: vpnGatewayAttachmentStateRefresh(conn, vpcId, vgwId), + Timeout: 5 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for VPN Gateway %q to attach to VPC %q: %s", + vgwId, vpcId, err) + } + log.Printf("[DEBUG] VPN Gateway %q attached to VPC %q.", vgwId, vpcId) + + return resourceAwsVpnGatewayAttachmentRead(d, meta) +} + +func resourceAwsVpnGatewayAttachmentRead(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + vgwId := d.Get("vpn_gateway_id").(string) + + resp, err := conn.DescribeVpnGateways(&ec2.DescribeVpnGatewaysInput{ + VpnGatewayIds: []*string{aws.String(vgwId)}, + }) + + if err != nil { + awsErr, ok := err.(awserr.Error) + if ok && awsErr.Code() == "InvalidVPNGatewayID.NotFound" { + log.Printf("[WARN] VPN Gateway %q not found.", vgwId) + d.SetId("") + return nil + } + return err + } + + vgw := resp.VpnGateways[0] + if *vgw.State == "deleted" { + log.Printf("[INFO] VPN Gateway %q appears to have been deleted.", vgwId) + d.SetId("") + return nil + } + + vga := vpnGatewayGetAttachment(vgw) + if len(vgw.VpcAttachments) == 0 || *vga.State == "detached" { + d.Set("vpc_id", "") + return nil + } + + d.Set("vpc_id", *vga.VpcId) + return nil +} + +func resourceAwsVpnGatewayAttachmentDelete(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).ec2conn + + vpcId := d.Get("vpc_id").(string) + vgwId := d.Get("vpn_gateway_id").(string) + + if vpcId == "" { + log.Printf("[DEBUG] Not detaching VPN Gateway %q as no VPC ID is set.", vgwId) + return nil + } + + _, err := conn.DetachVpnGateway(&ec2.DetachVpnGatewayInput{ + VpcId: aws.String(vpcId), + VpnGatewayId: aws.String(vgwId), + }) + + if err != nil { + awsErr, ok := err.(awserr.Error) + if ok { + switch awsErr.Code() { + case "InvalidVPNGatewayID.NotFound": + log.Printf("[WARN] VPN Gateway %q not found.", vgwId) + d.SetId("") + return nil + case "InvalidVpnGatewayAttachment.NotFound": + log.Printf( + "[WARN] VPN Gateway %q attachment to VPC %q not found.", + vgwId, vpcId) + d.SetId("") + return nil + } + } + + return fmt.Errorf("Error detaching VPN Gateway %q from VPC %q: %s", + vgwId, vpcId, err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"attached", "detaching"}, + Target: []string{"detached"}, + Refresh: vpnGatewayAttachmentStateRefresh(conn, vpcId, vgwId), + Timeout: 5 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for VPN Gateway %q to detach from VPC %q: %s", + vgwId, vpcId, err) + } + log.Printf("[DEBUG] VPN Gateway %q detached from VPC %q.", vgwId, vpcId) + + d.SetId("") + return nil +} + +func vpnGatewayAttachmentStateRefresh(conn *ec2.EC2, vpcId, vgwId string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + resp, err := conn.DescribeVpnGateways(&ec2.DescribeVpnGatewaysInput{ + Filters: []*ec2.Filter{ + &ec2.Filter{ + Name: aws.String("attachment.vpc-id"), + Values: []*string{aws.String(vpcId)}, + }, + }, + VpnGatewayIds: []*string{aws.String(vgwId)}, + }) + + if err != nil { + awsErr, ok := err.(awserr.Error) + if ok { + switch awsErr.Code() { + case "InvalidVPNGatewayID.NotFound": + fallthrough + case "InvalidVpnGatewayAttachment.NotFound": + return nil, "", nil + } + } + + return nil, "", err + } + + vgw := resp.VpnGateways[0] + if len(vgw.VpcAttachments) == 0 { + return vgw, "detached", nil + } + + vga := vpnGatewayGetAttachment(vgw) + + log.Printf("[DEBUG] VPN Gateway %q attachment status: %s", vgwId, *vga.State) + return vgw, *vga.State, nil + } +} + +func vpnGatewayAttachmentId(vpcId, vgwId string) string { + return fmt.Sprintf("vpn-attachment-%x", hashcode.String(fmt.Sprintf("%s-%s", vpcId, vgwId))) +} diff --git a/builtin/providers/aws/resource_aws_vpn_gateway_attachment_test.go b/builtin/providers/aws/resource_aws_vpn_gateway_attachment_test.go new file mode 100644 index 000000000..5f12d6fb8 --- /dev/null +++ b/builtin/providers/aws/resource_aws_vpn_gateway_attachment_test.go @@ -0,0 +1,163 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/ec2" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSVpnGatewayAttachment_basic(t *testing.T) { + var vpc ec2.Vpc + var vgw ec2.VpnGateway + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: "aws_vpn_gateway_attachment.test", + Providers: testAccProviders, + CheckDestroy: testAccCheckVpnGatewayAttachmentDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccVpnGatewayAttachmentConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckVpcExists( + "aws_vpc.test", + &vpc), + testAccCheckVpnGatewayExists( + "aws_vpn_gateway.test", + &vgw), + testAccCheckVpnGatewayAttachmentExists( + "aws_vpn_gateway_attachment.test", + &vpc, &vgw), + ), + }, + }, + }) +} + +func TestAccAWSVpnGatewayAttachment_deleted(t *testing.T) { + var vpc ec2.Vpc + var vgw ec2.VpnGateway + + testDeleted := func(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + _, ok := s.RootModule().Resources[n] + if ok { + return fmt.Errorf("Expected VPN Gateway attachment resource %q to be deleted.", n) + } + return nil + } + } + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + IDRefreshName: "aws_vpn_gateway_attachment.test", + Providers: testAccProviders, + CheckDestroy: testAccCheckVpnGatewayAttachmentDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccVpnGatewayAttachmentConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckVpcExists( + "aws_vpc.test", + &vpc), + testAccCheckVpnGatewayExists( + "aws_vpn_gateway.test", + &vgw), + testAccCheckVpnGatewayAttachmentExists( + "aws_vpn_gateway_attachment.test", + &vpc, &vgw), + ), + }, + resource.TestStep{ + Config: testAccNoVpnGatewayAttachmentConfig, + Check: resource.ComposeTestCheckFunc( + testDeleted("aws_vpn_gateway_attachment.test"), + ), + }, + }, + }) +} + +func testAccCheckVpnGatewayAttachmentExists(n string, vpc *ec2.Vpc, vgw *ec2.VpnGateway) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + vpcId := rs.Primary.Attributes["vpc_id"] + vgwId := rs.Primary.Attributes["vpn_gateway_id"] + + if len(vgw.VpcAttachments) == 0 { + return fmt.Errorf("VPN Gateway %q has no attachments.", vgwId) + } + + if *vgw.VpcAttachments[0].State != "attached" { + return fmt.Errorf("Expected VPN Gateway %q to be in attached state, but got: %q", + vgwId, *vgw.VpcAttachments[0].State) + } + + if *vgw.VpcAttachments[0].VpcId != *vpc.VpcId { + return fmt.Errorf("Expected VPN Gateway %q to be attached to VPC %q, but got: %q", + vgwId, vpcId, *vgw.VpcAttachments[0].VpcId) + } + + return nil + } +} + +func testAccCheckVpnGatewayAttachmentDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).ec2conn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_vpn_gateway_attachment" { + continue + } + + vgwId := rs.Primary.Attributes["vpn_gateway_id"] + + resp, err := conn.DescribeVpnGateways(&ec2.DescribeVpnGatewaysInput{ + VpnGatewayIds: []*string{aws.String(vgwId)}, + }) + if err != nil { + return err + } + + vgw := resp.VpnGateways[0] + if *vgw.VpcAttachments[0].State != "detached" { + return fmt.Errorf("Expected VPN Gateway %q to be in detached state, but got: %q", + vgwId, *vgw.VpcAttachments[0].State) + } + } + + return nil +} + +const testAccNoVpnGatewayAttachmentConfig = ` +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_vpn_gateway" "test" { } +` + +const testAccVpnGatewayAttachmentConfig = ` +resource "aws_vpc" "test" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_vpn_gateway" "test" { } + +resource "aws_vpn_gateway_attachment" "test" { + vpc_id = "${aws_vpc.test.id}" + vpn_gateway_id = "${aws_vpn_gateway.test.id}" +} +` diff --git a/builtin/providers/aws/resource_aws_vpn_gateway_test.go b/builtin/providers/aws/resource_aws_vpn_gateway_test.go index 0e3677d6f..c9d2d921a 100644 --- a/builtin/providers/aws/resource_aws_vpn_gateway_test.go +++ b/builtin/providers/aws/resource_aws_vpn_gateway_test.go @@ -16,10 +16,10 @@ func TestAccAWSVpnGateway_basic(t *testing.T) { testNotEqual := func(*terraform.State) error { if len(v.VpcAttachments) == 0 { - return fmt.Errorf("VPN gateway A is not attached") + return fmt.Errorf("VPN Gateway A is not attached") } if len(v2.VpcAttachments) == 0 { - return fmt.Errorf("VPN gateway B is not attached") + return fmt.Errorf("VPN Gateway B is not attached") } id1 := v.VpcAttachments[0].VpcId @@ -58,20 +58,38 @@ func TestAccAWSVpnGateway_basic(t *testing.T) { } func TestAccAWSVpnGateway_reattach(t *testing.T) { - var v ec2.VpnGateway + var vpc1, vpc2 ec2.Vpc + var vgw1, vgw2 ec2.VpnGateway - genTestStateFunc := func(expectedState string) func(*terraform.State) error { + testAttachmentFunc := func(vgw *ec2.VpnGateway, vpc *ec2.Vpc) func(*terraform.State) error { return func(*terraform.State) error { - if len(v.VpcAttachments) == 0 { - if expectedState != "detached" { - return fmt.Errorf("VPN gateway has no VPC attachments") + if len(vgw.VpcAttachments) == 0 { + return fmt.Errorf("VPN Gateway %q has no VPC attachments.", + *vgw.VpnGatewayId) + } + + if len(vgw.VpcAttachments) > 1 { + count := 0 + for _, v := range vgw.VpcAttachments { + if *v.State == "attached" { + count += 1 + } } - } else if len(v.VpcAttachments) == 1 { - if *v.VpcAttachments[0].State != expectedState { - return fmt.Errorf("Expected VPC gateway VPC attachment to be in '%s' state, but was not: %s", expectedState, v) + if count > 1 { + return fmt.Errorf( + "VPN Gateway %q has an unexpected number of VPC attachments (more than 1): %#v", + *vgw.VpnGatewayId, vgw.VpcAttachments) } - } else { - return fmt.Errorf("VPN gateway has unexpected number of VPC attachments(more than 1): %s", v) + } + + if *vgw.VpcAttachments[0].State != "attached" { + return fmt.Errorf("Expected VPN Gateway %q to be attached.", + *vgw.VpnGatewayId) + } + + if *vgw.VpcAttachments[0].VpcId != *vpc.VpcId { + return fmt.Errorf("Expected VPN Gateway %q to be attached to VPC %q, but got: %q", + *vgw.VpnGatewayId, *vpc.VpcId, *vgw.VpcAttachments[0].VpcId) } return nil } @@ -84,27 +102,38 @@ func TestAccAWSVpnGateway_reattach(t *testing.T) { CheckDestroy: testAccCheckVpnGatewayDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccVpnGatewayConfig, + Config: testAccCheckVpnGatewayConfigReattach, Check: resource.ComposeTestCheckFunc( + testAccCheckVpcExists("aws_vpc.foo", &vpc1), + testAccCheckVpcExists("aws_vpc.bar", &vpc2), testAccCheckVpnGatewayExists( - "aws_vpn_gateway.foo", &v), - genTestStateFunc("attached"), + "aws_vpn_gateway.foo", &vgw1), + testAccCheckVpnGatewayExists( + "aws_vpn_gateway.bar", &vgw2), + testAttachmentFunc(&vgw1, &vpc1), + testAttachmentFunc(&vgw2, &vpc2), ), }, resource.TestStep{ - Config: testAccVpnGatewayConfigDetach, + Config: testAccCheckVpnGatewayConfigReattachChange, Check: resource.ComposeTestCheckFunc( testAccCheckVpnGatewayExists( - "aws_vpn_gateway.foo", &v), - genTestStateFunc("detached"), + "aws_vpn_gateway.foo", &vgw1), + testAccCheckVpnGatewayExists( + "aws_vpn_gateway.bar", &vgw2), + testAttachmentFunc(&vgw2, &vpc1), + testAttachmentFunc(&vgw1, &vpc2), ), }, resource.TestStep{ - Config: testAccVpnGatewayConfig, + Config: testAccCheckVpnGatewayConfigReattach, Check: resource.ComposeTestCheckFunc( testAccCheckVpnGatewayExists( - "aws_vpn_gateway.foo", &v), - genTestStateFunc("attached"), + "aws_vpn_gateway.foo", &vgw1), + testAccCheckVpnGatewayExists( + "aws_vpn_gateway.bar", &vgw2), + testAttachmentFunc(&vgw1, &vpc1), + testAttachmentFunc(&vgw2, &vpc2), ), }, }, @@ -118,7 +147,7 @@ func TestAccAWSVpnGateway_delete(t *testing.T) { return func(s *terraform.State) error { _, ok := s.RootModule().Resources[r] if ok { - return fmt.Errorf("VPN Gateway %q should have been deleted", r) + return fmt.Errorf("VPN Gateway %q should have been deleted.", r) } return nil } @@ -159,7 +188,6 @@ func TestAccAWSVpnGateway_tags(t *testing.T) { testAccCheckTags(&v.Tags, "foo", "bar"), ), }, - resource.TestStep{ Config: testAccCheckVpnGatewayConfigTagsUpdate, Check: resource.ComposeTestCheckFunc( @@ -198,7 +226,7 @@ func testAccCheckVpnGatewayDestroy(s *terraform.State) error { } if *v.State != "deleted" { - return fmt.Errorf("Expected VpnGateway to be in deleted state, but was not: %s", v) + return fmt.Errorf("Expected VPN Gateway to be in deleted state, but was not: %s", v) } return nil } @@ -235,7 +263,7 @@ func testAccCheckVpnGatewayExists(n string, ig *ec2.VpnGateway) resource.TestChe return err } if len(resp.VpnGateways) == 0 { - return fmt.Errorf("VPNGateway not found") + return fmt.Errorf("VPN Gateway not found") } *ig = *resp.VpnGateways[0] @@ -270,16 +298,6 @@ resource "aws_vpn_gateway" "foo" { } ` -const testAccVpnGatewayConfigDetach = ` -resource "aws_vpc" "foo" { - cidr_block = "10.1.0.0/16" -} - -resource "aws_vpn_gateway" "foo" { - vpc_id = "" -} -` - const testAccCheckVpnGatewayConfigTags = ` resource "aws_vpc" "foo" { cidr_block = "10.1.0.0/16" @@ -305,3 +323,39 @@ resource "aws_vpn_gateway" "foo" { } } ` + +const testAccCheckVpnGatewayConfigReattach = ` +resource "aws_vpc" "foo" { + cidr_block = "10.1.0.0/16" +} + +resource "aws_vpc" "bar" { + cidr_block = "10.2.0.0/16" +} + +resource "aws_vpn_gateway" "foo" { + vpc_id = "${aws_vpc.foo.id}" +} + +resource "aws_vpn_gateway" "bar" { + vpc_id = "${aws_vpc.bar.id}" +} +` + +const testAccCheckVpnGatewayConfigReattachChange = ` +resource "aws_vpc" "foo" { + cidr_block = "10.1.0.0/16" +} + +resource "aws_vpc" "bar" { + cidr_block = "10.2.0.0/16" +} + +resource "aws_vpn_gateway" "foo" { + vpc_id = "${aws_vpc.bar.id}" +} + +resource "aws_vpn_gateway" "bar" { + vpc_id = "${aws_vpc.foo.id}" +} +` diff --git a/website/source/docs/providers/aws/r/vpn_gateway_attachment.html.markdown b/website/source/docs/providers/aws/r/vpn_gateway_attachment.html.markdown new file mode 100644 index 000000000..809912831 --- /dev/null +++ b/website/source/docs/providers/aws/r/vpn_gateway_attachment.html.markdown @@ -0,0 +1,57 @@ +--- +layout: "aws" +page_title: "AWS: aws_vpn_gateway_attachment" +sidebar_current: "docs-aws-resource-vpn-gateway-attachment" +description: |- + Provides a Virtual Private Gateway attachment resource. +--- + +# aws\_vpn\_gateway\_attachment + +Provides a Virtual Private Gateway attachment resource, allowing for an existing +hardware VPN gateway to be attached and/or detached from a VPC. + +-> **Note:** The [`aws_vpn_gateway`](vpn_gateway.html) +resource can also automatically attach the Virtual Private Gateway it creates +to an existing VPC by setting the [`vpc_id`](vpn_gateway.html#vpc_id) attribute accordingly. + +## Example Usage + +``` +resource "aws_vpc" "network" { + cidr_block = "10.0.0.0/16" +} + +resource "aws_vpn_gateway" "vpn" { + tags { + Name = "example-vpn-gateway" + } +} + +resource "aws_vpn_gateway_attachment" "vpn_attachment" { + vpc_id = "${aws_vpc.network.id}" + vpn_gateway_id = "${aws_vpn_gateway.vpn.id}" +} +``` + +See [Virtual Private Cloud](http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Introduction.html) +and [Virtual Private Gateway](http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) user +guides for more information. + +## Argument Reference + +The following arguments are supported: + +* `vpc_id` - (Required) The ID of the VPC. +* `vpn_gateway_id` - (Required) The ID of the Virtual Private Gateway. + +## Attributes Reference + +The following attributes are exported: + +* `vpc_id` - The ID of the VPC that Virtual Private Gateway is attached to. +* `vpn_gateway_id` - The ID of the Virtual Private Gateway. + +## Import + +This resource does not support importing. diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb index 8ba2ae80e..4eec00ae6 100644 --- a/website/source/layouts/aws.erb +++ b/website/source/layouts/aws.erb @@ -885,6 +885,10 @@ aws_vpn_gateway + > + aws_vpn_gateway_attachment + + From 6aff11e664b10db8f3158de5272dbc635671093a Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Sun, 7 Aug 2016 09:34:53 +1000 Subject: [PATCH 0558/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c9f3d694d..a26631291 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,7 @@ ## 0.7.1 (Unreleased) FEATURES: + * **New Resource:** `aws_vpn_gateway_attachment` [GH-7870] IMPROVEMENTS: * provider/vsphere: Improved SCSI controller handling in `vsphere_virtual_machine` [GH-7908] From 09f7fb0c34e0581b4ef8615bd3dfd4579bb58827 Mon Sep 17 00:00:00 2001 From: Jared Biel Date: Sun, 7 Aug 2016 02:16:31 -0500 Subject: [PATCH 0559/1238] Fix S3 provider redirect_all_requests_to behavior. #5142 (#7883) The S3 API has two parameters that can be passed to it (HostName and Protocol) for the RedirectAllRequestsTo functionality. HostName is somewhat poorly named because it need not be only a hostname (it can contain a path too.) The terraform code for this was treating the API as the parameter name suggests and was truncating out any paths that were passed. --- .../providers/aws/resource_aws_s3_bucket.go | 21 +++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/builtin/providers/aws/resource_aws_s3_bucket.go b/builtin/providers/aws/resource_aws_s3_bucket.go index 6897f1e7f..29ac708f5 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket.go +++ b/builtin/providers/aws/resource_aws_s3_bucket.go @@ -495,8 +495,20 @@ func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error { if v.Protocol == nil { w["redirect_all_requests_to"] = *v.HostName } else { + var host string + var path string + parsedHostName, err := url.Parse(*v.HostName) + if err == nil { + host = parsedHostName.Host + path = parsedHostName.Path + } else { + host = *v.HostName + path = "" + } + w["redirect_all_requests_to"] = (&url.URL{ - Host: *v.HostName, + Host: host, + Path: path, Scheme: *v.Protocol, }).String() } @@ -947,7 +959,12 @@ func resourceAwsS3BucketWebsitePut(s3conn *s3.S3, d *schema.ResourceData, websit if redirectAllRequestsTo != "" { redirect, err := url.Parse(redirectAllRequestsTo) if err == nil && redirect.Scheme != "" { - websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{HostName: aws.String(redirect.Host), Protocol: aws.String(redirect.Scheme)} + var redirectHostBuf bytes.Buffer + redirectHostBuf.WriteString(redirect.Host) + if redirect.Path != "" { + redirectHostBuf.WriteString(redirect.Path) + } + websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{HostName: aws.String(redirectHostBuf.String()), Protocol: aws.String(redirect.Scheme)} } else { websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{HostName: aws.String(redirectAllRequestsTo)} } From ccb4907eef3130cb41c221d02b6f48c5050ecef2 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Sun, 7 Aug 2016 19:17:51 +1200 Subject: [PATCH 0560/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a26631291..20f7a236a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ BUG FIXES: * provider/aws: Add state filter to `aws_availability_zone`s data source [GH-7965] * provider/aws: Handle lack of snapshot ID for a volume in `ami_copy` [GH-7995] * provider/aws: Retry association of IAM Role & instance profile [GH-7938] + * provider/aws: Fix `aws_s3_bucket` resource `redirect_all_requests_to` action [GH-7883] * provider/google: Use resource specific project when making queries/changes [GH-7029] ## 0.7.0 (August 2, 2016) From 68991a531026f4cbb9d57fcbc8a2693b2f5151ad Mon Sep 17 00:00:00 2001 From: stack72 Date: Sun, 7 Aug 2016 19:28:14 +1200 Subject: [PATCH 0561/1238] provider/aws: Add an acceptance test that covers the new behaviour in the `aws_iam_group_membership` resource ``` % make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSGroupMembership_' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSGroupMembership_ -timeout 120m === RUN TestAccAWSGroupMembership_basic --- PASS: TestAccAWSGroupMembership_basic (74.14s) === RUN TestAccAWSGroupMembership_paginatedUserList --- PASS: TestAccAWSGroupMembership_paginatedUserList (273.29s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 347.447s ``` --- .../resource_aws_iam_group_membership_test.go | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/builtin/providers/aws/resource_aws_iam_group_membership_test.go b/builtin/providers/aws/resource_aws_iam_group_membership_test.go index 9fd65b5a6..5b1930543 100644 --- a/builtin/providers/aws/resource_aws_iam_group_membership_test.go +++ b/builtin/providers/aws/resource_aws_iam_group_membership_test.go @@ -57,6 +57,26 @@ func TestAccAWSGroupMembership_basic(t *testing.T) { }) } +func TestAccAWSGroupMembership_paginatedUserList(t *testing.T) { + var group iam.GetGroupOutput + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSGroupMembershipDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSGroupMemberConfigPaginatedUserList, + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSGroupMembershipExists("aws_iam_group_membership.team", &group), + resource.TestCheckResourceAttr( + "aws_iam_group_membership.team", "users.#", "101"), + ), + }, + }, + }) +} + func testAccCheckAWSGroupMembershipDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).iamconn @@ -202,3 +222,22 @@ resource "aws_iam_group_membership" "team" { group = "${aws_iam_group.group.name}" } ` + +const testAccAWSGroupMemberConfigPaginatedUserList = ` +resource "aws_iam_group" "group" { + name = "test-paginated-group" + path = "/" +} + +resource "aws_iam_group_membership" "team" { + name = "tf-testing-paginated-group-membership" + users = ["${aws_iam_user.user.*.name}"] + group = "${aws_iam_group.group.name}" +} + +resource "aws_iam_user" "user" { + count = 101 + name = "${format("paged-test-user-%d", count.index + 1)}" + path = "/" +} +` From 664626a35259cdd428551ec6606c10c6cd3cc304 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Sun, 7 Aug 2016 19:30:42 +1200 Subject: [PATCH 0562/1238] Update CHANGELOG.md --- CHANGELOG.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 20f7a236a..fe8c18d95 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,9 +3,10 @@ FEATURES: * **New Resource:** `aws_vpn_gateway_attachment` [GH-7870] -IMPROVEMENTS: - * provider/vsphere: Improved SCSI controller handling in `vsphere_virtual_machine` [GH-7908] +IMPROVEMENTS * provider/aws: Introduce `aws_elasticsearch_domain` `elasticsearch_version` field (to specify ES version) [GH-7860] + * provider/aws: Query all pages of group membership [GH-6726] + * provider/vsphere: Improved SCSI controller handling in `vsphere_virtual_machine` [GH-7908] BUG FIXES: * provider/aws: guard against missing image_digest in `aws_ecs_task_definition` [GH-7966] From 1903208ebc53fcd22c8baffb54121e406952490b Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Sun, 7 Aug 2016 19:34:37 +1200 Subject: [PATCH 0563/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fe8c18d95..ad06a5bf1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ BUG FIXES: * provider/aws: Handle lack of snapshot ID for a volume in `ami_copy` [GH-7995] * provider/aws: Retry association of IAM Role & instance profile [GH-7938] * provider/aws: Fix `aws_s3_bucket` resource `redirect_all_requests_to` action [GH-7883] + * provider/aws: Fix issue updating ElasticBeanstalk Environment Settings [GH-7777] * provider/google: Use resource specific project when making queries/changes [GH-7029] ## 0.7.0 (August 2, 2016) From 6edf1b497299953fe2fe9a79850670bcd72da7ce Mon Sep 17 00:00:00 2001 From: Clint Date: Sun, 7 Aug 2016 02:34:58 -0500 Subject: [PATCH 0564/1238] provider/aws: Fix issue updating ElasticBeanstalk Environment Settings (#7777) * provider/aws: Fix issue updating ElasticBeanstalk Environment Settings Fixes the logic that updated settings for Elastic Beanstalk Environments. Because the update is done in the same API call, we need to split removals / additions. Fixes #6890 * add acc test that fails on master --- ...ource_aws_elastic_beanstalk_environment.go | 46 +++++- ..._aws_elastic_beanstalk_environment_test.go | 154 ++++++++++++++++++ 2 files changed, 198 insertions(+), 2 deletions(-) diff --git a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go b/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go index cf1748733..4d98189f9 100644 --- a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go +++ b/builtin/providers/aws/resource_aws_elastic_beanstalk_environment.go @@ -97,7 +97,6 @@ func resourceAwsElasticBeanstalkEnvironment() *schema.Resource { "setting": &schema.Schema{ Type: schema.TypeSet, Optional: true, - Computed: true, Elem: resourceAwsElasticBeanstalkOptionSetting(), Set: optionSettingValueHash, }, @@ -329,7 +328,50 @@ func resourceAwsElasticBeanstalkEnvironmentUpdate(d *schema.ResourceData, meta i os := o.(*schema.Set) ns := n.(*schema.Set) - updateOpts.OptionSettings = extractOptionSettings(ns.Difference(os)) + rm := extractOptionSettings(os.Difference(ns)) + add := extractOptionSettings(ns.Difference(os)) + + // Additions and removals of options are done in a single API call, so we + // can't do our normal "remove these" and then later "add these", re-adding + // any updated settings. + // Because of this, we need to remove any settings in the "removable" + // settings that are also found in the "add" settings, otherwise they + // conflict. Here we loop through all the initial removables from the set + // difference, and we build up a slice of settings not found in the "add" + // set + var remove []*elasticbeanstalk.ConfigurationOptionSetting + if len(add) > 0 { + for _, r := range rm { + for _, a := range add { + // ResourceNames are optional. Some defaults come with it, some do + // not. We need to guard against nil/empty in state as well as + // nil/empty from the API + if a.ResourceName != nil { + if r.ResourceName == nil { + continue + } + if *r.ResourceName != *a.ResourceName { + continue + } + } + if *r.Namespace == *a.Namespace && *r.OptionName == *a.OptionName { + continue + } + remove = append(remove, r) + } + } + } else { + remove = rm + } + + for _, elem := range remove { + updateOpts.OptionsToRemove = append(updateOpts.OptionsToRemove, &elasticbeanstalk.OptionSpecification{ + Namespace: elem.Namespace, + OptionName: elem.OptionName, + }) + } + + updateOpts.OptionSettings = add } if d.HasChange("template_name") { diff --git a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment_test.go b/builtin/providers/aws/resource_aws_elastic_beanstalk_environment_test.go index ab4744351..9f23b0fae 100644 --- a/builtin/providers/aws/resource_aws_elastic_beanstalk_environment_test.go +++ b/builtin/providers/aws/resource_aws_elastic_beanstalk_environment_test.go @@ -3,7 +3,9 @@ package aws import ( "fmt" "log" + "reflect" "regexp" + "sort" "testing" "github.com/aws/aws-sdk-go/aws" @@ -212,6 +214,93 @@ func TestAccAWSBeanstalkEnv_template_change(t *testing.T) { }) } +func TestAccAWSBeanstalkEnv_basic_settings_update(t *testing.T) { + var app elasticbeanstalk.EnvironmentDescription + + rInt := acctest.RandInt() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckBeanstalkEnvDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccBeanstalkEnvConfig_empty_settings(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckBeanstalkEnvExists("aws_elastic_beanstalk_environment.tfenvtest", &app), + testAccVerifyBeanstalkConfig(&app, []string{}), + ), + }, + resource.TestStep{ + Config: testAccBeanstalkEnvConfig_settings(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckBeanstalkEnvExists("aws_elastic_beanstalk_environment.tfenvtest", &app), + testAccVerifyBeanstalkConfig(&app, []string{"TF_LOG", "TF_SOME_VAR"}), + ), + }, + resource.TestStep{ + Config: testAccBeanstalkEnvConfig_empty_settings(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckBeanstalkEnvExists("aws_elastic_beanstalk_environment.tfenvtest", &app), + testAccVerifyBeanstalkConfig(&app, []string{}), + ), + }, + }, + }) +} + +func testAccVerifyBeanstalkConfig(env *elasticbeanstalk.EnvironmentDescription, expected []string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if env == nil { + return fmt.Errorf("Nil environment in testAccVerifyBeanstalkConfig") + } + conn := testAccProvider.Meta().(*AWSClient).elasticbeanstalkconn + + resp, err := conn.DescribeConfigurationSettings(&elasticbeanstalk.DescribeConfigurationSettingsInput{ + ApplicationName: env.ApplicationName, + EnvironmentName: env.EnvironmentName, + }) + + if err != nil { + return fmt.Errorf("Error describing config settings in testAccVerifyBeanstalkConfig: %s", err) + } + + // should only be 1 environment + if len(resp.ConfigurationSettings) != 1 { + return fmt.Errorf("Expected only 1 set of Configuration Settings in testAccVerifyBeanstalkConfig, got (%d)", len(resp.ConfigurationSettings)) + } + + cs := resp.ConfigurationSettings[0] + + var foundEnvs []string + testStrings := []string{"TF_LOG", "TF_SOME_VAR"} + for _, os := range cs.OptionSettings { + for _, k := range testStrings { + if *os.OptionName == k { + foundEnvs = append(foundEnvs, k) + } + } + } + + // if expected is zero, then we should not have found any of the predefined + // env vars + if len(expected) == 0 { + if len(foundEnvs) > 0 { + return fmt.Errorf("Found configs we should not have: %#v", foundEnvs) + } + return nil + } + + sort.Strings(testStrings) + sort.Strings(expected) + if !reflect.DeepEqual(testStrings, expected) { + return fmt.Errorf("Error matching strings, expected:\n\n%#v\n\ngot:\n\n%#v\n", testStrings, foundEnvs) + } + + return nil + } +} + func testAccCheckBeanstalkEnvDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).elasticbeanstalkconn @@ -376,6 +465,71 @@ resource "aws_elastic_beanstalk_environment" "tfenvtest" { } ` +func testAccBeanstalkEnvConfig_empty_settings(r int) string { + return fmt.Sprintf(` +resource "aws_elastic_beanstalk_application" "tftest" { + name = "tf-test-name-%d" + description = "tf-test-desc" +} + +resource "aws_elastic_beanstalk_environment" "tfenvtest" { + name = "tf-test-name-%d" + application = "${aws_elastic_beanstalk_application.tftest.name}" + solution_stack_name = "64bit Amazon Linux running Python" + + wait_for_ready_timeout = "15m" +}`, r, r) +} + +func testAccBeanstalkEnvConfig_settings(r int) string { + return fmt.Sprintf(` +resource "aws_elastic_beanstalk_application" "tftest" { + name = "tf-test-name-%d" + description = "tf-test-desc" +} + +resource "aws_elastic_beanstalk_environment" "tfenvtest" { + name = "tf-test-name-%d" + application = "${aws_elastic_beanstalk_application.tftest.name}" + solution_stack_name = "64bit Amazon Linux running Python" + + wait_for_ready_timeout = "15m" + + setting { + namespace = "aws:elasticbeanstalk:application:environment" + name = "TF_LOG" + value = "true" + } + + setting { + namespace = "aws:elasticbeanstalk:application:environment" + name = "TF_SOME_VAR" + value = "true" + } + + setting { + namespace = "aws:autoscaling:scheduledaction" + resource = "ScheduledAction01" + name = "MinSize" + value = 2 + } + + setting { + namespace = "aws:autoscaling:scheduledaction" + resource = "ScheduledAction01" + name = "MaxSize" + value = 3 + } + + setting { + namespace = "aws:autoscaling:scheduledaction" + resource = "ScheduledAction01" + name = "StartTime" + value = "2016-07-28T04:07:02Z" + } +}`, r, r) +} + const testAccBeanstalkWorkerEnvConfig = ` resource "aws_iam_instance_profile" "tftest" { name = "tftest_profile" From 5c0662f0eb7c1b2ac2bae0d81cc20a485589a79b Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Sun, 7 Aug 2016 17:36:00 +1000 Subject: [PATCH 0565/1238] provider/aws: Change the way ARNs are built (#7151) ARNs used to be build using the iamconn.GetUser func call. This wouldn't work on some scenarios and was changed so that we can expose the AccountId and Region via meta This commit just changes the build ARN funcs to use this new way of doing things --- .../providers/aws/resource_aws_db_instance.go | 20 +++++++----------- .../aws/resource_aws_db_instance_test.go | 4 ++-- .../aws/resource_aws_db_parameter_group.go | 20 +++++++----------- .../aws/resource_aws_db_security_group.go | 21 +++++++------------ .../aws/resource_aws_db_subnet_group.go | 20 +++++++----------- .../aws/resource_aws_elasticache_cluster.go | 20 +++++++----------- .../aws/resource_aws_rds_cluster_instance.go | 4 ++-- ...esource_aws_rds_cluster_parameter_group.go | 21 +++++++------------ 8 files changed, 46 insertions(+), 84 deletions(-) diff --git a/builtin/providers/aws/resource_aws_db_instance.go b/builtin/providers/aws/resource_aws_db_instance.go index c7615161b..4fe66a2b4 100644 --- a/builtin/providers/aws/resource_aws_db_instance.go +++ b/builtin/providers/aws/resource_aws_db_instance.go @@ -9,7 +9,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/rds" "github.com/hashicorp/terraform/helper/resource" @@ -693,7 +692,7 @@ func resourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error { // list tags for resource // set tags conn := meta.(*AWSClient).rdsconn - arn, err := buildRDSARN(d.Id(), meta) + arn, err := buildRDSARN(d.Id(), meta.(*AWSClient).accountid, meta.(*AWSClient).region) if err != nil { name := "" if v.DBName != nil && *v.DBName != "" { @@ -975,7 +974,7 @@ func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error } } - if arn, err := buildRDSARN(d.Id(), meta); err == nil { + if arn, err := buildRDSARN(d.Id(), meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil { if err := setTagsRDS(conn, d, arn); err != nil { return err } else { @@ -1051,16 +1050,11 @@ func resourceAwsDbInstanceStateRefreshFunc( } } -func buildRDSARN(identifier string, meta interface{}) (string, error) { - iamconn := meta.(*AWSClient).iamconn - region := meta.(*AWSClient).region - // An zero value GetUserInput{} defers to the currently logged in user - resp, err := iamconn.GetUser(&iam.GetUserInput{}) - if err != nil { - return "", err +func buildRDSARN(identifier, accountid, region string) (string, error) { + if accountid == "" { + return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS Account ID") } - userARN := *resp.User.Arn - accountID := strings.Split(userARN, ":")[4] - arn := fmt.Sprintf("arn:aws:rds:%s:%s:db:%s", region, accountID, identifier) + arn := fmt.Sprintf("arn:aws:rds:%s:%s:db:%s", region, accountid, identifier) return arn, nil + } diff --git a/builtin/providers/aws/resource_aws_db_instance_test.go b/builtin/providers/aws/resource_aws_db_instance_test.go index 509f875e3..11bfd75b2 100644 --- a/builtin/providers/aws/resource_aws_db_instance_test.go +++ b/builtin/providers/aws/resource_aws_db_instance_test.go @@ -347,9 +347,9 @@ func testAccCheckAWSDBInstanceSnapshot(s *terraform.State) error { if newerr.Code() == "DBSnapshotNotFound" { return fmt.Errorf("Snapshot %s not found", snapshot_identifier) } - } else { // snapshot was found + } else { // snapshot was found, // verify we have the tags copied to the snapshot - instanceARN, err := buildRDSARN(snapshot_identifier, testAccProvider.Meta()) + instanceARN, err := buildRDSARN(snapshot_identifier, testAccProvider.Meta().(*AWSClient).accountid, testAccProvider.Meta().(*AWSClient).region) // tags have a different ARN, just swapping :db: for :snapshot: tagsARN := strings.Replace(instanceARN, ":db:", ":snapshot:", 1) if err != nil { diff --git a/builtin/providers/aws/resource_aws_db_parameter_group.go b/builtin/providers/aws/resource_aws_db_parameter_group.go index 67ada3048..cd35fc36a 100644 --- a/builtin/providers/aws/resource_aws_db_parameter_group.go +++ b/builtin/providers/aws/resource_aws_db_parameter_group.go @@ -13,7 +13,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/rds" ) @@ -150,7 +149,7 @@ func resourceAwsDbParameterGroupRead(d *schema.ResourceData, meta interface{}) e d.Set("parameter", flattenParameters(describeParametersResp.Parameters)) paramGroup := describeResp.DBParameterGroups[0] - arn, err := buildRDSPGARN(d, meta) + arn, err := buildRDSPGARN(d.Id(), meta.(*AWSClient).accountid, meta.(*AWSClient).region) if err != nil { name := "" if paramGroup.DBParameterGroupName != nil && *paramGroup.DBParameterGroupName != "" { @@ -226,7 +225,7 @@ func resourceAwsDbParameterGroupUpdate(d *schema.ResourceData, meta interface{}) } } - if arn, err := buildRDSPGARN(d, meta); err == nil { + if arn, err := buildRDSPGARN(d.Id(), meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil { if err := setTagsRDS(rdsconn, d, arn); err != nil { return err } else { @@ -287,16 +286,11 @@ func resourceAwsDbParameterHash(v interface{}) int { return hashcode.String(buf.String()) } -func buildRDSPGARN(d *schema.ResourceData, meta interface{}) (string, error) { - iamconn := meta.(*AWSClient).iamconn - region := meta.(*AWSClient).region - // An zero value GetUserInput{} defers to the currently logged in user - resp, err := iamconn.GetUser(&iam.GetUserInput{}) - if err != nil { - return "", err +func buildRDSPGARN(identifier, accountid, region string) (string, error) { + if accountid == "" { + return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS Account ID") } - userARN := *resp.User.Arn - accountID := strings.Split(userARN, ":")[4] - arn := fmt.Sprintf("arn:aws:rds:%s:%s:pg:%s", region, accountID, d.Id()) + arn := fmt.Sprintf("arn:aws:rds:%s:%s:pg:%s", region, accountid, identifier) return arn, nil + } diff --git a/builtin/providers/aws/resource_aws_db_security_group.go b/builtin/providers/aws/resource_aws_db_security_group.go index 3cb9693ef..972cfd8b3 100644 --- a/builtin/providers/aws/resource_aws_db_security_group.go +++ b/builtin/providers/aws/resource_aws_db_security_group.go @@ -4,12 +4,10 @@ import ( "bytes" "fmt" "log" - "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/rds" "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform/helper/hashcode" @@ -173,7 +171,7 @@ func resourceAwsDbSecurityGroupRead(d *schema.ResourceData, meta interface{}) er d.Set("ingress", rules) conn := meta.(*AWSClient).rdsconn - arn, err := buildRDSSecurityGroupARN(d, meta) + arn, err := buildRDSSecurityGroupARN(d.Id(), meta.(*AWSClient).accountid, meta.(*AWSClient).region) if err != nil { name := "" if sg.DBSecurityGroupName != nil && *sg.DBSecurityGroupName != "" { @@ -204,7 +202,7 @@ func resourceAwsDbSecurityGroupUpdate(d *schema.ResourceData, meta interface{}) conn := meta.(*AWSClient).rdsconn d.Partial(true) - if arn, err := buildRDSSecurityGroupARN(d, meta); err == nil { + if arn, err := buildRDSSecurityGroupARN(d.Id(), meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil { if err := setTagsRDS(conn, d, arn); err != nil { return err } else { @@ -418,16 +416,11 @@ func resourceAwsDbSecurityGroupStateRefreshFunc( } } -func buildRDSSecurityGroupARN(d *schema.ResourceData, meta interface{}) (string, error) { - iamconn := meta.(*AWSClient).iamconn - region := meta.(*AWSClient).region - // An zero value GetUserInput{} defers to the currently logged in user - resp, err := iamconn.GetUser(&iam.GetUserInput{}) - if err != nil { - return "", err +func buildRDSSecurityGroupARN(identifier, accountid, region string) (string, error) { + if accountid == "" { + return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS Account ID") } - userARN := *resp.User.Arn - accountID := strings.Split(userARN, ":")[4] - arn := fmt.Sprintf("arn:aws:rds:%s:%s:secgrp:%s", region, accountID, d.Id()) + arn := fmt.Sprintf("arn:aws:rds:%s:%s:secgrp:%s", region, accountid, identifier) return arn, nil + } diff --git a/builtin/providers/aws/resource_aws_db_subnet_group.go b/builtin/providers/aws/resource_aws_db_subnet_group.go index 5d1ae1191..24852a75f 100644 --- a/builtin/providers/aws/resource_aws_db_subnet_group.go +++ b/builtin/providers/aws/resource_aws_db_subnet_group.go @@ -9,7 +9,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/rds" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" @@ -131,7 +130,7 @@ func resourceAwsDbSubnetGroupRead(d *schema.ResourceData, meta interface{}) erro // list tags for resource // set tags conn := meta.(*AWSClient).rdsconn - arn, err := buildRDSsubgrpARN(d, meta) + arn, err := buildRDSsubgrpARN(d.Id(), meta.(*AWSClient).accountid, meta.(*AWSClient).region) if err != nil { log.Printf("[DEBUG] Error building ARN for DB Subnet Group, not setting Tags for group %s", *subnetGroup.DBSubnetGroupName) } else { @@ -179,7 +178,7 @@ func resourceAwsDbSubnetGroupUpdate(d *schema.ResourceData, meta interface{}) er } } - if arn, err := buildRDSsubgrpARN(d, meta); err == nil { + if arn, err := buildRDSsubgrpARN(d.Id(), meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil { if err := setTagsRDS(conn, d, arn); err != nil { return err } else { @@ -228,18 +227,13 @@ func resourceAwsDbSubnetGroupDeleteRefreshFunc( } } -func buildRDSsubgrpARN(d *schema.ResourceData, meta interface{}) (string, error) { - iamconn := meta.(*AWSClient).iamconn - region := meta.(*AWSClient).region - // An zero value GetUserInput{} defers to the currently logged in user - resp, err := iamconn.GetUser(&iam.GetUserInput{}) - if err != nil { - return "", err +func buildRDSsubgrpARN(identifier, accountid, region string) (string, error) { + if accountid == "" { + return "", fmt.Errorf("Unable to construct RDS ARN because of missing AWS Account ID") } - userARN := *resp.User.Arn - accountID := strings.Split(userARN, ":")[4] - arn := fmt.Sprintf("arn:aws:rds:%s:%s:subgrp:%s", region, accountID, d.Id()) + arn := fmt.Sprintf("arn:aws:rds:%s:%s:subgrp:%s", region, accountid, identifier) return arn, nil + } func validateSubnetGroupName(v interface{}, k string) (ws []string, errors []error) { diff --git a/builtin/providers/aws/resource_aws_elasticache_cluster.go b/builtin/providers/aws/resource_aws_elasticache_cluster.go index 7ff086a26..cd79a2b1c 100644 --- a/builtin/providers/aws/resource_aws_elasticache_cluster.go +++ b/builtin/providers/aws/resource_aws_elasticache_cluster.go @@ -10,7 +10,6 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/service/elasticache" - "github.com/aws/aws-sdk-go/service/iam" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" ) @@ -347,7 +346,7 @@ func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{}) } // list tags for resource // set tags - arn, err := buildECARN(d, meta) + arn, err := buildECARN(d.Id(), meta.(*AWSClient).accountid, meta.(*AWSClient).region) if err != nil { log.Printf("[DEBUG] Error building ARN for ElastiCache Cluster, not setting Tags for cluster %s", *c.CacheClusterId) } else { @@ -372,7 +371,7 @@ func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{}) func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).elasticacheconn - arn, err := buildECARN(d, meta) + arn, err := buildECARN(d.Id(), meta.(*AWSClient).accountid, meta.(*AWSClient).region) if err != nil { log.Printf("[DEBUG] Error building ARN for ElastiCache Cluster, not updating Tags for cluster %s", d.Id()) } else { @@ -619,16 +618,11 @@ func cacheClusterStateRefreshFunc(conn *elasticache.ElastiCache, clusterID, give } } -func buildECARN(d *schema.ResourceData, meta interface{}) (string, error) { - iamconn := meta.(*AWSClient).iamconn - region := meta.(*AWSClient).region - // An zero value GetUserInput{} defers to the currently logged in user - resp, err := iamconn.GetUser(&iam.GetUserInput{}) - if err != nil { - return "", err +func buildECARN(identifier, accountid, region string) (string, error) { + if accountid == "" { + return "", fmt.Errorf("Unable to construct ElastiCache ARN because of missing AWS Account ID") } - userARN := *resp.User.Arn - accountID := strings.Split(userARN, ":")[4] - arn := fmt.Sprintf("arn:aws:elasticache:%s:%s:cluster:%s", region, accountID, d.Id()) + arn := fmt.Sprintf("arn:aws:elasticache:%s:%s:cluster:%s", region, accountid, identifier) return arn, nil + } diff --git a/builtin/providers/aws/resource_aws_rds_cluster_instance.go b/builtin/providers/aws/resource_aws_rds_cluster_instance.go index 745674c43..0ea5f13a2 100644 --- a/builtin/providers/aws/resource_aws_rds_cluster_instance.go +++ b/builtin/providers/aws/resource_aws_rds_cluster_instance.go @@ -212,7 +212,7 @@ func resourceAwsRDSClusterInstanceRead(d *schema.ResourceData, meta interface{}) } // Fetch and save tags - arn, err := buildRDSARN(d.Id(), meta) + arn, err := buildRDSARN(d.Id(), meta.(*AWSClient).accountid, meta.(*AWSClient).region) if err != nil { log.Printf("[DEBUG] Error building ARN for RDS Cluster Instance (%s), not setting Tags", *db.DBInstanceIdentifier) } else { @@ -271,7 +271,7 @@ func resourceAwsRDSClusterInstanceUpdate(d *schema.ResourceData, meta interface{ } - if arn, err := buildRDSARN(d.Id(), meta); err == nil { + if arn, err := buildRDSARN(d.Id(), meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil { if err := setTagsRDS(conn, d, arn); err != nil { return err } diff --git a/builtin/providers/aws/resource_aws_rds_cluster_parameter_group.go b/builtin/providers/aws/resource_aws_rds_cluster_parameter_group.go index 5c9d7e407..31b40fd02 100644 --- a/builtin/providers/aws/resource_aws_rds_cluster_parameter_group.go +++ b/builtin/providers/aws/resource_aws_rds_cluster_parameter_group.go @@ -3,12 +3,10 @@ package aws import ( "fmt" "log" - "strings" "time" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/rds" "github.com/hashicorp/terraform/helper/resource" @@ -148,7 +146,7 @@ func resourceAwsRDSClusterParameterGroupRead(d *schema.ResourceData, meta interf d.Set("parameter", flattenParameters(describeParametersResp.Parameters)) paramGroup := describeResp.DBClusterParameterGroups[0] - arn, err := buildRDSCPGARN(d, meta) + arn, err := buildRDSCPGARN(d.Id(), meta.(*AWSClient).accountid, meta.(*AWSClient).region) if err != nil { name := "" if paramGroup.DBClusterParameterGroupName != nil && *paramGroup.DBClusterParameterGroupName != "" { @@ -213,7 +211,7 @@ func resourceAwsRDSClusterParameterGroupUpdate(d *schema.ResourceData, meta inte d.SetPartial("parameter") } - if arn, err := buildRDSCPGARN(d, meta); err == nil { + if arn, err := buildRDSCPGARN(d.Id(), meta.(*AWSClient).accountid, meta.(*AWSClient).region); err == nil { if err := setTagsRDS(rdsconn, d, arn); err != nil { return err } else { @@ -264,16 +262,11 @@ func resourceAwsRDSClusterParameterGroupDeleteRefreshFunc( } } -func buildRDSCPGARN(d *schema.ResourceData, meta interface{}) (string, error) { - iamconn := meta.(*AWSClient).iamconn - region := meta.(*AWSClient).region - // An zero value GetUserInput{} defers to the currently logged in user - resp, err := iamconn.GetUser(&iam.GetUserInput{}) - if err != nil { - return "", err +func buildRDSCPGARN(identifier, accountid, region string) (string, error) { + if accountid == "" { + return "", fmt.Errorf("Unable to construct RDS Cluster ARN because of missing AWS Account ID") } - userARN := *resp.User.Arn - accountID := strings.Split(userARN, ":")[4] - arn := fmt.Sprintf("arn:aws:rds:%s:%s:cluster-pg:%s", region, accountID, d.Id()) + arn := fmt.Sprintf("arn:aws:rds:%s:%s:cluster-pg:%s", region, accountid, identifier) return arn, nil + } From a56a1450d43bac55a163b2315cee264791dc5a28 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Sun, 7 Aug 2016 19:36:20 +1200 Subject: [PATCH 0566/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ad06a5bf1..606bb047b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ FEATURES: IMPROVEMENTS * provider/aws: Introduce `aws_elasticsearch_domain` `elasticsearch_version` field (to specify ES version) [GH-7860] * provider/aws: Query all pages of group membership [GH-6726] + * provider/aws: Change the way ARNs are built [GH-7151] * provider/vsphere: Improved SCSI controller handling in `vsphere_virtual_machine` [GH-7908] BUG FIXES: From 43ac64e2b7f233a27ee17a50359bfd0849860420 Mon Sep 17 00:00:00 2001 From: stack72 Date: Mon, 8 Aug 2016 12:08:37 +1200 Subject: [PATCH 0567/1238] provider/digitalocean: Acceptance Tests needed a new Image Name --- .../resource_digitalocean_droplet_test.go | 16 ++++++++-------- .../resource_digitalocean_floating_ip_test.go | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go b/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go index 23485cfd6..d6f2b190d 100644 --- a/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go +++ b/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go @@ -29,7 +29,7 @@ func TestAccDigitalOceanDroplet_Basic(t *testing.T) { resource.TestCheckResourceAttr( "digitalocean_droplet.foobar", "size", "512mb"), resource.TestCheckResourceAttr( - "digitalocean_droplet.foobar", "image", "centos-5-8-x32"), + "digitalocean_droplet.foobar", "image", "centos-7-x64"), resource.TestCheckResourceAttr( "digitalocean_droplet.foobar", "region", "nyc3"), resource.TestCheckResourceAttr( @@ -191,7 +191,7 @@ func testAccCheckDigitalOceanDropletDestroy(s *terraform.State) error { func testAccCheckDigitalOceanDropletAttributes(droplet *godo.Droplet) resource.TestCheckFunc { return func(s *terraform.State) error { - if droplet.Image.Slug != "centos-5-8-x32" { + if droplet.Image.Slug != "centos-7-x64" { return fmt.Errorf("Bad image_slug: %s", droplet.Image.Slug) } @@ -228,7 +228,7 @@ func testAccCheckDigitalOceanDropletRenamedAndResized(droplet *godo.Droplet) res func testAccCheckDigitalOceanDropletAttributes_PrivateNetworkingIpv6(droplet *godo.Droplet) resource.TestCheckFunc { return func(s *terraform.State) error { - if droplet.Image.Slug != "centos-5-8-x32" { + if droplet.Image.Slug != "centos-7-x64" { return fmt.Errorf("Bad image_slug: %s", droplet.Image.Slug) } @@ -336,7 +336,7 @@ resource "digitalocean_ssh_key" "foobar" { resource "digitalocean_droplet" "foobar" { name = "foo" size = "512mb" - image = "centos-5-8-x32" + image = "centos-7-x64" region = "nyc3" user_data = "foobar" ssh_keys = ["${digitalocean_ssh_key.foobar.id}"] @@ -356,7 +356,7 @@ resource "digitalocean_ssh_key" "foobar" { resource "digitalocean_droplet" "foobar" { name = "foo" size = "512mb" - image = "centos-5-8-x32" + image = "centos-7-x64" region = "nyc3" user_data = "foobar" ssh_keys = ["${digitalocean_ssh_key.foobar.id}"] @@ -373,7 +373,7 @@ resource "digitalocean_ssh_key" "foobar" { resource "digitalocean_droplet" "foobar" { name = "foo" size = "512mb" - image = "centos-5-8-x32" + image = "centos-7-x64" region = "nyc3" user_data = "foobar foobar" ssh_keys = ["${digitalocean_ssh_key.foobar.id}"] @@ -389,7 +389,7 @@ resource "digitalocean_ssh_key" "foobar" { resource "digitalocean_droplet" "foobar" { name = "baz" size = "1gb" - image = "centos-5-8-x32" + image = "centos-7-x64" region = "nyc3" ssh_keys = ["${digitalocean_ssh_key.foobar.id}"] } @@ -405,7 +405,7 @@ resource "digitalocean_ssh_key" "foobar" { resource "digitalocean_droplet" "foobar" { name = "baz" size = "1gb" - image = "centos-5-8-x32" + image = "centos-7-x64" region = "sgp1" ipv6 = true private_networking = true diff --git a/builtin/providers/digitalocean/resource_digitalocean_floating_ip_test.go b/builtin/providers/digitalocean/resource_digitalocean_floating_ip_test.go index 5b57aa8dc..6cb3ba417 100644 --- a/builtin/providers/digitalocean/resource_digitalocean_floating_ip_test.go +++ b/builtin/providers/digitalocean/resource_digitalocean_floating_ip_test.go @@ -113,7 +113,7 @@ resource "digitalocean_ssh_key" "foobar" { resource "digitalocean_droplet" "foobar" { name = "baz" size = "1gb" - image = "centos-5-8-x32" + image = "centos-7-x64" region = "nyc3" ipv6 = true private_networking = true From 5ac8ae1338f1750ce661b52da5c84b6683223fec Mon Sep 17 00:00:00 2001 From: Andy Chan Date: Sun, 7 Aug 2016 17:21:18 -0700 Subject: [PATCH 0568/1238] Adding firehose to elastic search support (#7839) Add firehose elasticsearch configuration documentation Adding CRUD for elastic search as firehose destination Updated the firehose stream documentation to add elastic search as destination example. Adding testing for es as firehose destination Update the test case for es --- ...ce_aws_kinesis_firehose_delivery_stream.go | 235 ++++++++++++++++-- ...s_kinesis_firehose_delivery_stream_test.go | 122 ++++++++- ...sis_firehose_delivery_stream.html.markdown | 41 ++- 3 files changed, 370 insertions(+), 28 deletions(-) diff --git a/builtin/providers/aws/resource_aws_kinesis_firehose_delivery_stream.go b/builtin/providers/aws/resource_aws_kinesis_firehose_delivery_stream.go index e645a0c13..9db58e52b 100644 --- a/builtin/providers/aws/resource_aws_kinesis_firehose_delivery_stream.go +++ b/builtin/providers/aws/resource_aws_kinesis_firehose_delivery_stream.go @@ -27,6 +27,14 @@ func resourceAwsKinesisFirehoseDeliveryStream() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 64 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 64 characters", k)) + } + return + }, }, "destination": &schema.Schema{ @@ -37,6 +45,14 @@ func resourceAwsKinesisFirehoseDeliveryStream() *schema.Resource { value := v.(string) return strings.ToLower(value) }, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "s3" && value != "redshift" && value != "elasticsearch" { + errors = append(errors, fmt.Errorf( + "%q must be one of 's3', 'redshift', 'elasticsearch'", k)) + } + return + }, }, // elements removed in v0.7.0 @@ -167,6 +183,113 @@ func resourceAwsKinesisFirehoseDeliveryStream() *schema.Resource { }, }, + "elasticsearch_configuration": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "buffering_interval": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 300, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + if value < 60 || value > 900 { + errors = append(errors, fmt.Errorf( + "%q must be in the range from 60 to 900 seconds.", k)) + } + return + }, + }, + + "buffering_size": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 5, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + if value < 1 || value > 100 { + errors = append(errors, fmt.Errorf( + "%q must be in the range from 1 to 100 MB.", k)) + } + return + }, + }, + + "domain_arn": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "index_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "index_rotation_period": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "OneDay", + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "NoRotation" && value != "OneHour" && value != "OneDay" && value != "OneWeek" && value != "OneMonth" { + errors = append(errors, fmt.Errorf( + "%q must be one of 'NoRotation', 'OneHour', 'OneDay', 'OneWeek', 'OneMonth'", k)) + } + return + }, + }, + + "retry_duration": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 300, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + if value < 0 || value > 7200 { + errors = append(errors, fmt.Errorf( + "%q must be in the range from 0 to 7200 seconds.", k)) + } + return + }, + }, + + "role_arn": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "s3_backup_mode": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "FailedDocumentsOnly", + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "FailedDocumentsOnly" && value != "AllDocuments" { + errors = append(errors, fmt.Errorf( + "%q must be one of 'FailedDocumentsOnly', 'AllDocuments'", k)) + } + return + }, + }, + + "type_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if len(value) > 100 { + errors = append(errors, fmt.Errorf( + "%q cannot be longer than 100 characters", k)) + } + return + }, + }, + }, + }, + }, + "arn": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -188,15 +311,6 @@ func resourceAwsKinesisFirehoseDeliveryStream() *schema.Resource { } } -func validateConfiguration(d *schema.ResourceData) error { - destination := d.Get("destination").(string) - if destination != "s3" && destination != "redshift" { - return fmt.Errorf("[ERROR] Destination must be s3 or redshift") - } - - return nil -} - func createS3Config(d *schema.ResourceData) *firehose.S3DestinationConfiguration { s3 := d.Get("s3_configuration").([]interface{})[0].(map[string]interface{}) @@ -289,6 +403,85 @@ func updateRedshiftConfig(d *schema.ResourceData, s3Update *firehose.S3Destinati }, nil } +func createElasticsearchConfig(d *schema.ResourceData, s3Config *firehose.S3DestinationConfiguration) (*firehose.ElasticsearchDestinationConfiguration, error) { + esConfig, ok := d.GetOk("elasticsearch_configuration") + if !ok { + return nil, fmt.Errorf("[ERR] Error loading Elasticsearch Configuration for Kinesis Firehose: elasticsearch_configuration not found") + } + esList := esConfig.([]interface{}) + + es := esList[0].(map[string]interface{}) + + config := &firehose.ElasticsearchDestinationConfiguration{ + BufferingHints: extractBufferingHints(es), + DomainARN: aws.String(es["domain_arn"].(string)), + IndexName: aws.String(es["index_name"].(string)), + RetryOptions: extractRetryOptions(es), + RoleARN: aws.String(es["role_arn"].(string)), + TypeName: aws.String(es["type_name"].(string)), + S3Configuration: s3Config, + } + + if indexRotationPeriod, ok := es["index_rotation_period"]; ok { + config.IndexRotationPeriod = aws.String(indexRotationPeriod.(string)) + } + if s3BackupMode, ok := es["s3_backup_mode"]; ok { + config.S3BackupMode = aws.String(s3BackupMode.(string)) + } + + return config, nil +} + +func updateElasticsearchConfig(d *schema.ResourceData, s3Update *firehose.S3DestinationUpdate) (*firehose.ElasticsearchDestinationUpdate, error) { + esConfig, ok := d.GetOk("elasticsearch_configuration") + if !ok { + return nil, fmt.Errorf("[ERR] Error loading Elasticsearch Configuration for Kinesis Firehose: elasticsearch_configuration not found") + } + esList := esConfig.([]interface{}) + + es := esList[0].(map[string]interface{}) + + update := &firehose.ElasticsearchDestinationUpdate{ + BufferingHints: extractBufferingHints(es), + DomainARN: aws.String(es["domain_arn"].(string)), + IndexName: aws.String(es["index_name"].(string)), + RetryOptions: extractRetryOptions(es), + RoleARN: aws.String(es["role_arn"].(string)), + TypeName: aws.String(es["type_name"].(string)), + S3Update: s3Update, + } + + if indexRotationPeriod, ok := es["index_rotation_period"]; ok { + update.IndexRotationPeriod = aws.String(indexRotationPeriod.(string)) + } + + return update, nil +} + +func extractBufferingHints(es map[string]interface{}) *firehose.ElasticsearchBufferingHints { + bufferingHints := &firehose.ElasticsearchBufferingHints{} + + if bufferingInterval, ok := es["buffering_hints"].(int); ok { + bufferingHints.IntervalInSeconds = aws.Int64(int64(bufferingInterval)) + } + if bufferingSize, ok := es["buffering_size"].(int); ok { + bufferingHints.SizeInMBs = aws.Int64(int64(bufferingSize)) + } + + return bufferingHints + +} + +func extractRetryOptions(es map[string]interface{}) *firehose.ElasticsearchRetryOptions { + retryOptions := &firehose.ElasticsearchRetryOptions{} + + if retryDuration, ok := es["retry_duration"].(int); ok { + retryOptions.DurationInSeconds = aws.Int64(int64(retryDuration)) + } + + return retryOptions +} + func extractCopyCommandConfiguration(redshift map[string]interface{}) *firehose.CopyCommand { cmd := &firehose.CopyCommand{ DataTableName: aws.String(redshift["data_table_name"].(string)), @@ -306,10 +499,6 @@ func extractCopyCommandConfiguration(redshift map[string]interface{}) *firehose. func resourceAwsKinesisFirehoseDeliveryStreamCreate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).firehoseconn - if err := validateConfiguration(d); err != nil { - return err - } - sn := d.Get("name").(string) s3Config := createS3Config(d) @@ -319,6 +508,12 @@ func resourceAwsKinesisFirehoseDeliveryStreamCreate(d *schema.ResourceData, meta if d.Get("destination").(string) == "s3" { createInput.S3DestinationConfiguration = s3Config + } else if d.Get("destination").(string) == "elasticsearch" { + esConfig, err := createElasticsearchConfig(d, s3Config) + if err != nil { + return err + } + createInput.ElasticsearchDestinationConfiguration = esConfig } else { rc, err := createRedshiftConfig(d, s3Config) if err != nil { @@ -359,7 +554,7 @@ func resourceAwsKinesisFirehoseDeliveryStreamCreate(d *schema.ResourceData, meta Pending: []string{"CREATING"}, Target: []string{"ACTIVE"}, Refresh: firehoseStreamStateRefreshFunc(conn, sn), - Timeout: 5 * time.Minute, + Timeout: 20 * time.Minute, Delay: 10 * time.Second, MinTimeout: 3 * time.Second, } @@ -381,10 +576,6 @@ func resourceAwsKinesisFirehoseDeliveryStreamCreate(d *schema.ResourceData, meta func resourceAwsKinesisFirehoseDeliveryStreamUpdate(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).firehoseconn - if err := validateConfiguration(d); err != nil { - return err - } - sn := d.Get("name").(string) s3Config := updateS3Config(d) @@ -396,6 +587,12 @@ func resourceAwsKinesisFirehoseDeliveryStreamUpdate(d *schema.ResourceData, meta if d.Get("destination").(string) == "s3" { updateInput.S3DestinationUpdate = s3Config + } else if d.Get("destination").(string) == "elasticsearch" { + esUpdate, err := updateElasticsearchConfig(d, s3Config) + if err != nil { + return err + } + updateInput.ElasticsearchDestinationUpdate = esUpdate } else { rc, err := updateRedshiftConfig(d, s3Config) if err != nil { @@ -459,7 +656,7 @@ func resourceAwsKinesisFirehoseDeliveryStreamDelete(d *schema.ResourceData, meta Pending: []string{"DELETING"}, Target: []string{"DESTROYED"}, Refresh: firehoseStreamStateRefreshFunc(conn, sn), - Timeout: 5 * time.Minute, + Timeout: 20 * time.Minute, Delay: 10 * time.Second, MinTimeout: 3 * time.Second, } diff --git a/builtin/providers/aws/resource_aws_kinesis_firehose_delivery_stream_test.go b/builtin/providers/aws/resource_aws_kinesis_firehose_delivery_stream_test.go index 07bab51bf..b2c95dce8 100644 --- a/builtin/providers/aws/resource_aws_kinesis_firehose_delivery_stream_test.go +++ b/builtin/providers/aws/resource_aws_kinesis_firehose_delivery_stream_test.go @@ -29,7 +29,7 @@ func TestAccAWSKinesisFirehoseDeliveryStream_s3basic(t *testing.T) { Config: config, Check: resource.ComposeTestCheckFunc( testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream", &stream), - testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil), + testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, nil), ), }, }, @@ -61,7 +61,7 @@ func TestAccAWSKinesisFirehoseDeliveryStream_s3ConfigUpdates(t *testing.T) { Config: preConfig, Check: resource.ComposeTestCheckFunc( testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream", &stream), - testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil), + testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, nil), ), }, @@ -69,7 +69,7 @@ func TestAccAWSKinesisFirehoseDeliveryStream_s3ConfigUpdates(t *testing.T) { Config: postConfig, Check: resource.ComposeTestCheckFunc( testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream", &stream), - testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, updatedS3DestinationConfig, nil), + testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, updatedS3DestinationConfig, nil, nil), ), }, }, @@ -100,7 +100,7 @@ func TestAccAWSKinesisFirehoseDeliveryStream_RedshiftConfigUpdates(t *testing.T) Config: preConfig, Check: resource.ComposeTestCheckFunc( testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream", &stream), - testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil), + testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, nil), ), }, @@ -108,7 +108,46 @@ func TestAccAWSKinesisFirehoseDeliveryStream_RedshiftConfigUpdates(t *testing.T) Config: postConfig, Check: resource.ComposeTestCheckFunc( testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream", &stream), - testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, updatedRedshiftConfig), + testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, updatedRedshiftConfig, nil), + ), + }, + }, + }) +} + +func TestAccAWSKinesisFirehoseDeliveryStream_ElasticsearchConfigUpdates(t *testing.T) { + var stream firehose.DeliveryStreamDescription + + ri := acctest.RandInt() + awsAccountId := os.Getenv("AWS_ACCOUNT_ID") + preConfig := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_ElasticsearchBasic, + ri, awsAccountId, ri, ri, ri, awsAccountId, awsAccountId, ri, ri) + postConfig := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_ElasticsearchUpdate, + ri, awsAccountId, ri, ri, ri, awsAccountId, awsAccountId, ri, ri) + + updatedElasticSearchConfig := &firehose.ElasticsearchDestinationDescription{ + BufferingHints: &firehose.ElasticsearchBufferingHints{ + IntervalInSeconds: aws.Int64(500), + }, + } + + resource.Test(t, resource.TestCase{ + PreCheck: testAccKinesisFirehosePreCheck(t), + Providers: testAccProviders, + CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: preConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream_es", &stream), + testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, nil), + ), + }, + resource.TestStep{ + Config: postConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream_es", &stream), + testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, updatedElasticSearchConfig), ), }, }, @@ -142,9 +181,7 @@ func testAccCheckKinesisFirehoseDeliveryStreamExists(n string, stream *firehose. } } -func testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(stream *firehose.DeliveryStreamDescription, s3config interface{}, redshiftConfig interface{}) resource.TestCheckFunc { - // *firehose.RedshiftDestinationDescription - // *firehose.S3DestinationDescription +func testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(stream *firehose.DeliveryStreamDescription, s3config interface{}, redshiftConfig interface{}, elasticsearchConfig interface{}) resource.TestCheckFunc { return func(s *terraform.State) error { if !strings.HasPrefix(*stream.DeliveryStreamName, "terraform-kinesis-firehose") { return fmt.Errorf("Bad Stream name: %s", *stream.DeliveryStreamName) @@ -193,6 +230,19 @@ func testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(stream *firehose.Del } } + if elasticsearchConfig != nil { + es := elasticsearchConfig.(*firehose.ElasticsearchDestinationDescription) + // Range over the Stream Destinations, looking for the matching Elasticsearch destination + var match bool + for _, d := range stream.Destinations { + if d.ElasticsearchDestinationDescription != nil { + match = true + } + } + if !match { + return fmt.Errorf("Mismatch Elasticsearch Buffering Interval, expected: %s, got: %s", es, stream.Destinations) + } + } } return nil } @@ -365,3 +415,59 @@ resource "aws_kinesis_firehose_delivery_stream" "test_stream" { data_table_columns = "test-col" } }` + +var testAccKinesisFirehoseDeliveryStreamBaseElasticsearchConfig = testAccKinesisFirehoseDeliveryStreamBaseConfig + ` +resource "aws_elasticsearch_domain" "test_cluster" { + domain_name = "es-test-%d" + + access_policies = < **NOTE:** Kinesis Firehose is currently only supported in us-east-1, us-west-2 and eu-west-1. ## Argument Reference @@ -92,7 +119,7 @@ The following arguments are supported: * `name` - (Required) A name to identify the stream. This is unique to the AWS account and region the Stream is created in. -* `destination` – (Required) This is the destination to where the data is delivered. The only options are `s3` & `redshift`. +* `destination` – (Required) This is the destination to where the data is delivered. The only options are `s3`, `redshift`, and `elasticsearch`. * `s3_configuration` - (Required) Configuration options for the s3 destination (or the intermediate bucket if the destination is redshift). More details are given below. * `redshift_configuration` - (Optional) Configuration options if redshift is the destination. @@ -121,6 +148,18 @@ The `redshift_configuration` object supports the following: * `copy_options` - (Optional) Copy options for copying the data from the s3 intermediate bucket into redshift. * `data_table_columns` - (Optional) The data table columns that will be targeted by the copy command. +The `elasticsearch_configuration` object supports the following: + +* `buffering_interval` - (Optional) Buffer incoming data for the specified period of time, in seconds between 60 to 900, before deliverying it to the destination. The default value is 300s. +* `buffering_size` - (Optional) Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB. +* `domain_arn` - (Required) The ARN of the Amazon ES domain. The IAM role must have permission for `DescribeElasticsearchDomain`, `DescribeElasticsearchDomains`, and `DescribeElasticsearchDomainConfig` after assuming `RoleARN`. The pattern needs to be `arn:.*`. +* `index_name` - (Required) The Elasticsearch index name. +* `index_rotation_period` - (Optional) The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are `NoRotation`, `OneHour`, `OneDay`, `OneWeek`, and `OneMonth`. The default value is `OneDay`. +* `retry_duration` - (Optional) After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are writtn to Amazon S3. The default value is 300s. There will be no retry if the value is 0. +* `role_arn` - (Required) The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The pattern needs to be `arn:.*`. +* `s3_backup_mode` - (Optional) Defines how documents should be delivered to Aamazon S3. Valid values are `FailedDocumentsOnly` and `AllDocuments`. Default value is `FailedDocumentsOnly`. +* `type_name` - (Required) The Elasticsearch type name with maximum length of 100 characters. + ## Attributes Reference * `arn` - The Amazon Resource Name (ARN) specifying the Stream From f1e5505d436f1ec63b122ff5d9480dd3b0ad3d3b Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 8 Aug 2016 12:22:23 +1200 Subject: [PATCH 0569/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 606bb047b..c0dfc42f3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ IMPROVEMENTS * provider/aws: Introduce `aws_elasticsearch_domain` `elasticsearch_version` field (to specify ES version) [GH-7860] * provider/aws: Query all pages of group membership [GH-6726] * provider/aws: Change the way ARNs are built [GH-7151] + * provider/aws: Add support for Elasticsearch destination to firehose delivery streams [GH-7839] * provider/vsphere: Improved SCSI controller handling in `vsphere_virtual_machine` [GH-7908] BUG FIXES: From 4968f6a5ffd4380dcb34ea21ae133a8b2fdbae29 Mon Sep 17 00:00:00 2001 From: Evan Brown Date: Sun, 7 Aug 2016 17:28:43 -0700 Subject: [PATCH 0570/1238] providers/google: Fix read for the backend service resource (#7476) --- .../resource_compute_backend_service.go | 21 ++++---- .../resource_compute_backend_service_test.go | 50 +++++++++++++++++-- 2 files changed, 56 insertions(+), 15 deletions(-) diff --git a/builtin/providers/google/resource_compute_backend_service.go b/builtin/providers/google/resource_compute_backend_service.go index dcc3410e5..08eb432f0 100644 --- a/builtin/providers/google/resource_compute_backend_service.go +++ b/builtin/providers/google/resource_compute_backend_service.go @@ -255,20 +255,21 @@ func resourceComputeBackendServiceUpdate(d *schema.ResourceData, meta interface{ HealthChecks: healthChecks, } - if d.HasChange("backend") { - service.Backends = expandBackends(d.Get("backend").(*schema.Set).List()) + // Optional things + if v, ok := d.GetOk("backend"); ok { + service.Backends = expandBackends(v.(*schema.Set).List()) } - if d.HasChange("description") { - service.Description = d.Get("description").(string) + if v, ok := d.GetOk("description"); ok { + service.Description = v.(string) } - if d.HasChange("port_name") { - service.PortName = d.Get("port_name").(string) + if v, ok := d.GetOk("port_name"); ok { + service.PortName = v.(string) } - if d.HasChange("protocol") { - service.Protocol = d.Get("protocol").(string) + if v, ok := d.GetOk("protocol"); ok { + service.Protocol = v.(string) } - if d.HasChange("timeout_sec") { - service.TimeoutSec = int64(d.Get("timeout_sec").(int)) + if v, ok := d.GetOk("timeout_sec"); ok { + service.TimeoutSec = int64(v.(int)) } if d.HasChange("enable_cdn") { diff --git a/builtin/providers/google/resource_compute_backend_service_test.go b/builtin/providers/google/resource_compute_backend_service_test.go index 01b0d3d38..41be583c1 100644 --- a/builtin/providers/google/resource_compute_backend_service_test.go +++ b/builtin/providers/google/resource_compute_backend_service_test.go @@ -46,7 +46,6 @@ func TestAccComputeBackendService_withBackend(t *testing.T) { itName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) var svc compute.BackendService - resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -54,7 +53,7 @@ func TestAccComputeBackendService_withBackend(t *testing.T) { Steps: []resource.TestStep{ resource.TestStep{ Config: testAccComputeBackendService_withBackend( - serviceName, igName, itName, checkName), + serviceName, igName, itName, checkName, 10), Check: resource.ComposeTestCheckFunc( testAccCheckComputeBackendServiceExists( "google_compute_backend_service.lipsum", &svc), @@ -74,6 +73,47 @@ func TestAccComputeBackendService_withBackend(t *testing.T) { } } +func TestAccComputeBackendService_withBackendAndUpdate(t *testing.T) { + serviceName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + igName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + itName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + checkName := fmt.Sprintf("tf-test-%s", acctest.RandString(10)) + var svc compute.BackendService + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeBackendServiceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeBackendService_withBackend( + serviceName, igName, itName, checkName, 10), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeBackendServiceExists( + "google_compute_backend_service.lipsum", &svc), + ), + }, + resource.TestStep{ + Config: testAccComputeBackendService_withBackend( + serviceName, igName, itName, checkName, 20), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeBackendServiceExists( + "google_compute_backend_service.lipsum", &svc), + ), + }, + }, + }) + + if svc.TimeoutSec != 20 { + t.Errorf("Expected TimeoutSec == 20, got %d", svc.TimeoutSec) + } + if svc.Protocol != "HTTP" { + t.Errorf("Expected Protocol to be HTTP, got %q", svc.Protocol) + } + if len(svc.Backends) != 1 { + t.Errorf("Expected 1 backend, got %d", len(svc.Backends)) + } +} + func testAccCheckComputeBackendServiceDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -204,14 +244,14 @@ resource "google_compute_http_health_check" "one" { } func testAccComputeBackendService_withBackend( - serviceName, igName, itName, checkName string) string { + serviceName, igName, itName, checkName string, timeout int64) string { return fmt.Sprintf(` resource "google_compute_backend_service" "lipsum" { name = "%s" description = "Hello World 1234" port_name = "http" protocol = "HTTP" - timeout_sec = 10 + timeout_sec = %v backend { group = "${google_compute_instance_group_manager.foobar.instance_group}" @@ -249,5 +289,5 @@ resource "google_compute_http_health_check" "default" { check_interval_sec = 1 timeout_sec = 1 } -`, serviceName, igName, itName, checkName) +`, serviceName, timeout, igName, itName, checkName) } From ccb9c24ee324c50bd040459ad87240097bc5757a Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 8 Aug 2016 12:29:19 +1200 Subject: [PATCH 0571/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c0dfc42f3..4d280bc3b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ BUG FIXES: * provider/aws: Fix `aws_s3_bucket` resource `redirect_all_requests_to` action [GH-7883] * provider/aws: Fix issue updating ElasticBeanstalk Environment Settings [GH-7777] * provider/google: Use resource specific project when making queries/changes [GH-7029] + * provider/google: Fix read for the backend service resource [GH-7476] ## 0.7.0 (August 2, 2016) From 9ddb2fd6f82e8ffb1e08d29a8702d7819792f65a Mon Sep 17 00:00:00 2001 From: bill fumerola Date: Sun, 7 Aug 2016 17:36:27 -0700 Subject: [PATCH 0572/1238] provider/google: atomic Cloud DNS record changes (#6575) Closes #6129 --- builtin/providers/google/dns_change.go | 9 +- .../google/resource_dns_record_set.go | 105 +++++++++++++----- .../google/resource_dns_record_set_test.go | 95 ++++++++++++++-- 3 files changed, 168 insertions(+), 41 deletions(-) diff --git a/builtin/providers/google/dns_change.go b/builtin/providers/google/dns_change.go index 38a34135e..f2f827a3b 100644 --- a/builtin/providers/google/dns_change.go +++ b/builtin/providers/google/dns_change.go @@ -1,6 +1,8 @@ package google import ( + "time" + "google.golang.org/api/dns/v1" "github.com/hashicorp/terraform/helper/resource" @@ -30,9 +32,14 @@ func (w *DnsChangeWaiter) RefreshFunc() resource.StateRefreshFunc { } func (w *DnsChangeWaiter) Conf() *resource.StateChangeConf { - return &resource.StateChangeConf{ + state := &resource.StateChangeConf{ Pending: []string{"pending"}, Target: []string{"done"}, Refresh: w.RefreshFunc(), } + state.Delay = 10 * time.Second + state.Timeout = 10 * time.Minute + state.MinTimeout = 2 * time.Second + return state + } diff --git a/builtin/providers/google/resource_dns_record_set.go b/builtin/providers/google/resource_dns_record_set.go index 22f9c60c3..49a56d9b1 100644 --- a/builtin/providers/google/resource_dns_record_set.go +++ b/builtin/providers/google/resource_dns_record_set.go @@ -3,7 +3,6 @@ package google import ( "fmt" "log" - "time" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/dns/v1" @@ -15,6 +14,7 @@ func resourceDnsRecordSet() *schema.Resource { Create: resourceDnsRecordSetCreate, Read: resourceDnsRecordSetRead, Delete: resourceDnsRecordSetDelete, + Update: resourceDnsRecordSetUpdate, Schema: map[string]*schema.Schema{ "managed_zone": &schema.Schema{ @@ -32,7 +32,6 @@ func resourceDnsRecordSet() *schema.Resource { "rrdatas": &schema.Schema{ Type: schema.TypeList, Required: true, - ForceNew: true, Elem: &schema.Schema{ Type: schema.TypeString, }, @@ -41,13 +40,11 @@ func resourceDnsRecordSet() *schema.Resource { "ttl": &schema.Schema{ Type: schema.TypeInt, Required: true, - ForceNew: true, }, "type": &schema.Schema{ Type: schema.TypeString, Required: true, - ForceNew: true, }, "project": &schema.Schema{ @@ -69,8 +66,6 @@ func resourceDnsRecordSetCreate(d *schema.ResourceData, meta interface{}) error zone := d.Get("managed_zone").(string) - rrdatasCount := d.Get("rrdatas.#").(int) - // Build the change chg := &dns.Change{ Additions: []*dns.ResourceRecordSet{ @@ -78,16 +73,11 @@ func resourceDnsRecordSetCreate(d *schema.ResourceData, meta interface{}) error Name: d.Get("name").(string), Type: d.Get("type").(string), Ttl: int64(d.Get("ttl").(int)), - Rrdatas: make([]string, rrdatasCount), + Rrdatas: rrdata(d), }, }, } - for i := 0; i < rrdatasCount; i++ { - rrdata := fmt.Sprintf("rrdatas.%d", i) - chg.Additions[0].Rrdatas[i] = d.Get(rrdata).(string) - } - log.Printf("[DEBUG] DNS Record create request: %#v", chg) chg, err = config.clientDns.Changes.Create(project, zone, chg).Do() if err != nil { @@ -102,11 +92,7 @@ func resourceDnsRecordSetCreate(d *schema.ResourceData, meta interface{}) error Project: project, ManagedZone: zone, } - state := w.Conf() - state.Delay = 10 * time.Second - state.Timeout = 10 * time.Minute - state.MinTimeout = 2 * time.Second - _, err = state.WaitForState() + _, err = w.Conf().WaitForState() if err != nil { return fmt.Errorf("Error waiting for Google DNS change: %s", err) } @@ -167,8 +153,6 @@ func resourceDnsRecordSetDelete(d *schema.ResourceData, meta interface{}) error zone := d.Get("managed_zone").(string) - rrdatasCount := d.Get("rrdatas.#").(int) - // Build the change chg := &dns.Change{ Deletions: []*dns.ResourceRecordSet{ @@ -176,15 +160,11 @@ func resourceDnsRecordSetDelete(d *schema.ResourceData, meta interface{}) error Name: d.Get("name").(string), Type: d.Get("type").(string), Ttl: int64(d.Get("ttl").(int)), - Rrdatas: make([]string, rrdatasCount), + Rrdatas: rrdata(d), }, }, } - for i := 0; i < rrdatasCount; i++ { - rrdata := fmt.Sprintf("rrdatas.%d", i) - chg.Deletions[0].Rrdatas[i] = d.Get(rrdata).(string) - } log.Printf("[DEBUG] DNS Record delete request: %#v", chg) chg, err = config.clientDns.Changes.Create(project, zone, chg).Do() if err != nil { @@ -197,11 +177,7 @@ func resourceDnsRecordSetDelete(d *schema.ResourceData, meta interface{}) error Project: project, ManagedZone: zone, } - state := w.Conf() - state.Delay = 10 * time.Second - state.Timeout = 10 * time.Minute - state.MinTimeout = 2 * time.Second - _, err = state.WaitForState() + _, err = w.Conf().WaitForState() if err != nil { return fmt.Errorf("Error waiting for Google DNS change: %s", err) } @@ -209,3 +185,74 @@ func resourceDnsRecordSetDelete(d *schema.ResourceData, meta interface{}) error d.SetId("") return nil } + +func resourceDnsRecordSetUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + zone := d.Get("managed_zone").(string) + recordName := d.Get("name").(string) + + oldTtl, newTtl := d.GetChange("ttl") + oldType, newType := d.GetChange("type") + + oldCountRaw, _ := d.GetChange("rrdatas.#") + oldCount := oldCountRaw.(int) + + chg := &dns.Change{ + Deletions: []*dns.ResourceRecordSet{ + &dns.ResourceRecordSet{ + Name: recordName, + Type: oldType.(string), + Ttl: int64(oldTtl.(int)), + Rrdatas: make([]string, oldCount), + }, + }, + Additions: []*dns.ResourceRecordSet{ + &dns.ResourceRecordSet{ + Name: recordName, + Type: newType.(string), + Ttl: int64(newTtl.(int)), + Rrdatas: rrdata(d), + }, + }, + } + + for i := 0; i < oldCount; i++ { + rrKey := fmt.Sprintf("rrdatas.%d", i) + oldRR, _ := d.GetChange(rrKey) + chg.Deletions[0].Rrdatas[i] = oldRR.(string) + } + log.Printf("[DEBUG] DNS Record change request: %#v old: %#v new: %#v", chg, chg.Deletions[0], chg.Additions[0]) + chg, err = config.clientDns.Changes.Create(project, zone, chg).Do() + if err != nil { + return fmt.Errorf("Error changing DNS RecordSet: %s", err) + } + + w := &DnsChangeWaiter{ + Service: config.clientDns, + Change: chg, + Project: project, + ManagedZone: zone, + } + if _, err = w.Conf().WaitForState(); err != nil { + return fmt.Errorf("Error waiting for Google DNS change: %s", err) + } + + return resourceDnsRecordSetRead(d, meta) +} + +func rrdata( + d *schema.ResourceData, +) []string { + rrdatasCount := d.Get("rrdatas.#").(int) + data := make([]string, rrdatasCount) + for i := 0; i < rrdatasCount; i++ { + data[i] = d.Get(fmt.Sprintf("rrdatas.%d", i)).(string) + } + return data +} diff --git a/builtin/providers/google/resource_dns_record_set_test.go b/builtin/providers/google/resource_dns_record_set_test.go index 94c7fce16..1a128b7d3 100644 --- a/builtin/providers/google/resource_dns_record_set_test.go +++ b/builtin/providers/google/resource_dns_record_set_test.go @@ -17,7 +17,64 @@ func TestAccDnsRecordSet_basic(t *testing.T) { CheckDestroy: testAccCheckDnsRecordSetDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccDnsRecordSet_basic(zoneName), + Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.10", 300), + Check: resource.ComposeTestCheckFunc( + testAccCheckDnsRecordSetExists( + "google_dns_record_set.foobar", zoneName), + ), + }, + }, + }) +} + +func TestAccDnsRecordSet_modify(t *testing.T) { + zoneName := fmt.Sprintf("dnszone-test-%s", acctest.RandString(10)) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckDnsRecordSetDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.10", 300), + Check: resource.ComposeTestCheckFunc( + testAccCheckDnsRecordSetExists( + "google_dns_record_set.foobar", zoneName), + ), + }, + resource.TestStep{ + Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.11", 300), + Check: resource.ComposeTestCheckFunc( + testAccCheckDnsRecordSetExists( + "google_dns_record_set.foobar", zoneName), + ), + }, + resource.TestStep{ + Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.11", 600), + Check: resource.ComposeTestCheckFunc( + testAccCheckDnsRecordSetExists( + "google_dns_record_set.foobar", zoneName), + ), + }, + }, + }) +} + +func TestAccDnsRecordSet_changeType(t *testing.T) { + zoneName := fmt.Sprintf("dnszone-test-%s", acctest.RandString(10)) + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckDnsRecordSetDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccDnsRecordSet_basic(zoneName, "127.0.0.10", 300), + Check: resource.ComposeTestCheckFunc( + testAccCheckDnsRecordSetExists( + "google_dns_record_set.foobar", zoneName), + ), + }, + resource.TestStep{ + Config: testAccDnsRecordSet_bigChange(zoneName, 600), Check: resource.ComposeTestCheckFunc( testAccCheckDnsRecordSetExists( "google_dns_record_set.foobar", zoneName), @@ -65,20 +122,19 @@ func testAccCheckDnsRecordSetExists(resourceType, resourceName string) resource. if err != nil { return fmt.Errorf("Error confirming DNS RecordSet existence: %#v", err) } - if len(resp.Rrsets) == 0 { + switch len(resp.Rrsets) { + case 0: // The resource doesn't exist anymore return fmt.Errorf("DNS RecordSet not found") - } - - if len(resp.Rrsets) > 1 { + case 1: + return nil + default: return fmt.Errorf("Only expected 1 record set, got %d", len(resp.Rrsets)) } - - return nil } } -func testAccDnsRecordSet_basic(zoneName string) string { +func testAccDnsRecordSet_basic(zoneName string, addr2 string, ttl int) string { return fmt.Sprintf(` resource "google_dns_managed_zone" "parent-zone" { name = "%s" @@ -89,8 +145,25 @@ func testAccDnsRecordSet_basic(zoneName string) string { managed_zone = "${google_dns_managed_zone.parent-zone.name}" name = "test-record.terraform.test." type = "A" - rrdatas = ["127.0.0.1", "127.0.0.10"] - ttl = 600 + rrdatas = ["127.0.0.1", "%s"] + ttl = %d } - `, zoneName) + `, zoneName, addr2, ttl) +} + +func testAccDnsRecordSet_bigChange(zoneName string, ttl int) string { + return fmt.Sprintf(` + resource "google_dns_managed_zone" "parent-zone" { + name = "%s" + dns_name = "terraform.test." + description = "Test Description" + } + resource "google_dns_record_set" "foobar" { + managed_zone = "${google_dns_managed_zone.parent-zone.name}" + name = "test-record.terraform.test." + type = "CNAME" + rrdatas = ["www.terraform.io."] + ttl = %d + } + `, zoneName, ttl) } From 8e4e66a2607a2e831f90ecc1122d20b86e04c9da Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 8 Aug 2016 12:37:09 +1200 Subject: [PATCH 0573/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4d280bc3b..73de7cb5c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ IMPROVEMENTS * provider/aws: Query all pages of group membership [GH-6726] * provider/aws: Change the way ARNs are built [GH-7151] * provider/aws: Add support for Elasticsearch destination to firehose delivery streams [GH-7839] + * provider/google: allows atomic Cloud DNS record changes [GH-6575] * provider/vsphere: Improved SCSI controller handling in `vsphere_virtual_machine` [GH-7908] BUG FIXES: From 8f0fdc9800b817fd360661ef5114e88feb6b62db Mon Sep 17 00:00:00 2001 From: Evan Brown Date: Sun, 7 Aug 2016 17:47:05 -0700 Subject: [PATCH 0574/1238] providers/google: Move URLMap hosts to TypeSet from TypeList (#7472) Using TypeSet allows host entries to be ordered arbitrarily in a manifest. --- .../google/resource_compute_url_map.go | 34 ++++++++++++++----- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/builtin/providers/google/resource_compute_url_map.go b/builtin/providers/google/resource_compute_url_map.go index 9caebb1cb..46f226248 100644 --- a/builtin/providers/google/resource_compute_url_map.go +++ b/builtin/providers/google/resource_compute_url_map.go @@ -40,8 +40,10 @@ func resourceComputeUrlMap() *schema.Resource { }, "host_rule": &schema.Schema{ - Type: schema.TypeList, + Type: schema.TypeSet, Optional: true, + // TODO(evandbrown): Enable when lists support validation + //ValidateFunc: validateHostRules, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "description": &schema.Schema{ @@ -258,10 +260,10 @@ func resourceComputeUrlMapCreate(d *schema.ResourceData, meta interface{}) error urlMap.Description = v.(string) } - _hostRules := d.Get("host_rule").([]interface{}) - urlMap.HostRules = make([]*compute.HostRule, len(_hostRules)) + _hostRules := d.Get("host_rule").(*schema.Set) + urlMap.HostRules = make([]*compute.HostRule, _hostRules.Len()) - for i, v := range _hostRules { + for i, v := range _hostRules.List() { urlMap.HostRules[i] = createHostRule(v) } @@ -332,7 +334,7 @@ func resourceComputeUrlMapRead(d *schema.ResourceData, meta interface{}) error { } /* Only read host rules into our TF state that we have defined */ - _hostRules := d.Get("host_rule").([]interface{}) + _hostRules := d.Get("host_rule").(*schema.Set).List() _newHostRules := make([]interface{}, 0) for _, v := range _hostRules { _hostRule := v.(map[string]interface{}) @@ -463,12 +465,12 @@ func resourceComputeUrlMapUpdate(d *schema.ResourceData, meta interface{}) error _oldHostRulesMap := make(map[string]interface{}) _newHostRulesMap := make(map[string]interface{}) - for _, v := range _oldHostRules.([]interface{}) { + for _, v := range _oldHostRules.(*schema.Set).List() { _hostRule := v.(map[string]interface{}) _oldHostRulesMap[_hostRule["path_matcher"].(string)] = v } - for _, v := range _newHostRules.([]interface{}) { + for _, v := range _newHostRules.(*schema.Set).List() { _hostRule := v.(map[string]interface{}) _newHostRulesMap[_hostRule["path_matcher"].(string)] = v } @@ -515,7 +517,7 @@ func resourceComputeUrlMapUpdate(d *schema.ResourceData, meta interface{}) error } /* Now add in the brand new entries */ - for host, _ := range _oldHostsSet { + for host, _ := range _newHostsSet { hostRule.Hosts = append(hostRule.Hosts, host) } @@ -644,7 +646,6 @@ func resourceComputeUrlMapUpdate(d *schema.ResourceData, meta interface{}) error urlMap.Tests = newTests } - op, err := config.clientCompute.UrlMaps.Update(project, urlMap.Name, urlMap).Do() if err != nil { @@ -684,3 +685,18 @@ func resourceComputeUrlMapDelete(d *schema.ResourceData, meta interface{}) error return nil } + +func validateHostRules(v interface{}, k string) (ws []string, es []error) { + pathMatchers := make(map[string]bool) + hostRules := v.([]interface{}) + for _, hri := range hostRules { + hr := hri.(map[string]interface{}) + pm := hr["path_matcher"].(string) + if pathMatchers[pm] { + es = append(es, fmt.Errorf("Multiple host_rule entries with the same path_matcher are not allowed. Please collapse all hosts with the same path_matcher into one host_rule")) + return + } + pathMatchers[pm] = true + } + return +} From 725d60ab578872fcc1f7c0a577e2f20d160aadc2 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 8 Aug 2016 12:47:43 +1200 Subject: [PATCH 0575/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 73de7cb5c..4d57a7a79 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ IMPROVEMENTS * provider/aws: Change the way ARNs are built [GH-7151] * provider/aws: Add support for Elasticsearch destination to firehose delivery streams [GH-7839] * provider/google: allows atomic Cloud DNS record changes [GH-6575] + * provider/google: Move URLMap hosts to TypeSet from TypeList [GH-7472] * provider/vsphere: Improved SCSI controller handling in `vsphere_virtual_machine` [GH-7908] BUG FIXES: From 70cadcf31dee6f9b4284a5d94d1d694ebb0da2ec Mon Sep 17 00:00:00 2001 From: Brad Sickles Date: Sun, 7 Aug 2016 20:56:44 -0400 Subject: [PATCH 0576/1238] Implement archive provider and "archive_file" resource. (#7322) --- builtin/bins/provider-archive/main.go | 12 ++ builtin/providers/archive/archive-content.zip | Bin 0 -> 161 bytes builtin/providers/archive/archive-dir.zip | Bin 0 -> 409 bytes builtin/providers/archive/archive-file.zip | Bin 0 -> 165 bytes builtin/providers/archive/archiver.go | 47 +++++ builtin/providers/archive/provider.go | 16 ++ builtin/providers/archive/provider_test.go | 18 ++ .../archive/resource_archive_file.go | 174 ++++++++++++++++++ .../archive/resource_archive_file_test.go | 92 +++++++++ .../archive/test-fixtures/test-dir/file1.txt | 1 + .../archive/test-fixtures/test-dir/file2.txt | 1 + .../archive/test-fixtures/test-dir/file3.txt | 1 + .../archive/test-fixtures/test-file.txt | 1 + builtin/providers/archive/zip_archiver.go | 107 +++++++++++ .../providers/archive/zip_archiver_test.go | 84 +++++++++ command/internal_plugin_list.go | 2 + .../providers/archive/index.html.markdown | 20 ++ .../docs/providers/archive/r/file.html.md | 45 +++++ website/source/layouts/archive.erb | 26 +++ 19 files changed, 647 insertions(+) create mode 100644 builtin/bins/provider-archive/main.go create mode 100644 builtin/providers/archive/archive-content.zip create mode 100644 builtin/providers/archive/archive-dir.zip create mode 100644 builtin/providers/archive/archive-file.zip create mode 100644 builtin/providers/archive/archiver.go create mode 100644 builtin/providers/archive/provider.go create mode 100644 builtin/providers/archive/provider_test.go create mode 100644 builtin/providers/archive/resource_archive_file.go create mode 100644 builtin/providers/archive/resource_archive_file_test.go create mode 100644 builtin/providers/archive/test-fixtures/test-dir/file1.txt create mode 100644 builtin/providers/archive/test-fixtures/test-dir/file2.txt create mode 100644 builtin/providers/archive/test-fixtures/test-dir/file3.txt create mode 100644 builtin/providers/archive/test-fixtures/test-file.txt create mode 100644 builtin/providers/archive/zip_archiver.go create mode 100644 builtin/providers/archive/zip_archiver_test.go create mode 100644 website/source/docs/providers/archive/index.html.markdown create mode 100644 website/source/docs/providers/archive/r/file.html.md create mode 100644 website/source/layouts/archive.erb diff --git a/builtin/bins/provider-archive/main.go b/builtin/bins/provider-archive/main.go new file mode 100644 index 000000000..994b5776b --- /dev/null +++ b/builtin/bins/provider-archive/main.go @@ -0,0 +1,12 @@ +package main + +import ( + "github.com/hashicorp/terraform/builtin/providers/archive" + "github.com/hashicorp/terraform/plugin" +) + +func main() { + plugin.Serve(&plugin.ServeOpts{ + ProviderFunc: archive.Provider, + }) +} diff --git a/builtin/providers/archive/archive-content.zip b/builtin/providers/archive/archive-content.zip new file mode 100644 index 0000000000000000000000000000000000000000..be74d13a76673d3b9cc122fc98d8c510b0a61cae GIT binary patch literal 161 zcmWIWW@Zs#-~d8&zzq~g&d)1J%`4F>sVLz(c|s?QVbP;McS2g9{Mj?<${ryHpo;(h x1H9Qe?(JfEAPMA&0C9jfBa;Y_1v3*_4rC@OK(Zmgo0SbD$q0m&KspGl5&#BGA5;JU literal 0 HcmV?d00001 diff --git a/builtin/providers/archive/archive-dir.zip b/builtin/providers/archive/archive-dir.zip new file mode 100644 index 0000000000000000000000000000000000000000..1780db33a34206de310ff331f760123d58e9b3bb GIT binary patch literal 409 zcmWIWW@Zs#-~d8&zzGyc%gjkN)GMhd;W~LjCyZgymNRogSe+e!GXMVvc(ZeO1qyZv z19^Nv96-<{BXpCL5hi_LkPs%`Bx7`wk`X4Q8OWy~Ok!jbVL + <% content_for :sidebar do %> + + <% end %> + + <%= yield %> +<% end %> From 0747c1b7b7bf9f120f7f614c7d587e2b9dc8f8dd Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 8 Aug 2016 12:57:42 +1200 Subject: [PATCH 0577/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4d57a7a79..090e9a184 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,7 @@ ## 0.7.1 (Unreleased) FEATURES: + * **New Provider:** `archive` [GH-7322] * **New Resource:** `aws_vpn_gateway_attachment` [GH-7870] IMPROVEMENTS From 25b49dfd5132d674117767af207a9f029184284f Mon Sep 17 00:00:00 2001 From: Cameron Stokes Date: Sun, 7 Aug 2016 17:59:28 -0700 Subject: [PATCH 0578/1238] docs/commands: clarify -state w/ remote state (#8015) --- website/source/docs/commands/apply.html.markdown | 4 +++- website/source/docs/commands/import.html.md | 5 +++-- website/source/docs/commands/output.html.markdown | 1 + website/source/docs/commands/plan.html.markdown | 1 + website/source/docs/commands/refresh.html.markdown | 4 +++- website/source/docs/commands/state/list.html.md | 1 + website/source/docs/commands/state/mv.html.md | 4 +++- website/source/docs/commands/state/show.html.md | 1 + website/source/docs/commands/taint.html.markdown | 4 +++- website/source/docs/commands/untaint.html.markdown | 4 +++- 10 files changed, 22 insertions(+), 7 deletions(-) diff --git a/website/source/docs/commands/apply.html.markdown b/website/source/docs/commands/apply.html.markdown index 67626df2b..d1c46f154 100644 --- a/website/source/docs/commands/apply.html.markdown +++ b/website/source/docs/commands/apply.html.markdown @@ -43,9 +43,11 @@ The command-line flags are all optional. The list of available flags are: apply. * `-state=path` - Path to the state file. Defaults to "terraform.tfstate". + Ignored when [remote state](/docs/state/remote/index.html) is used. * `-state-out=path` - Path to write updated state file. By default, the - `-state` path will be used. + `-state` path will be used. Ignored when + [remote state](/docs/state/remote/index.html) is used. * `-target=resource` - A [Resource Address](/docs/internals/resource-addressing.html) to target. Operation will diff --git a/website/source/docs/commands/import.html.md b/website/source/docs/commands/import.html.md index 011da226c..72b9ce5b8 100644 --- a/website/source/docs/commands/import.html.md +++ b/website/source/docs/commands/import.html.md @@ -38,10 +38,11 @@ The command-line flags are all optional. The list of available flags are: * `-input=true` - Whether to ask for input for provider configuration. * `-state=path` - The path to read and save state files (unless state-out is - specified). + specified). Ignored when [remote state](/docs/state/remote/index.html) is used. * `-state-out=path` - Path to write the final state file. By default, this is - the state path. + the state path. Ignored when [remote state](/docs/state/remote/index.html) is + used. ## Provider Configuration diff --git a/website/source/docs/commands/output.html.markdown b/website/source/docs/commands/output.html.markdown index 7ddd7546a..d42f7cae2 100644 --- a/website/source/docs/commands/output.html.markdown +++ b/website/source/docs/commands/output.html.markdown @@ -24,6 +24,7 @@ The command-line flags are all optional. The list of available flags are: a key per output. If `NAME` is specified, only the output specified will be returned. This can be piped into tools such as `jq` for further processing. * `-state=path` - Path to the state file. Defaults to "terraform.tfstate". + Ignored when [remote state](/docs/state/remote/index.html) is used. * `-module=module_name` - The module path which has needed output. By default this is the root path. Other modules can be specified by a period-separated list. Example: "foo" would reference the module diff --git a/website/source/docs/commands/plan.html.markdown b/website/source/docs/commands/plan.html.markdown index df0987312..84732a5cc 100644 --- a/website/source/docs/commands/plan.html.markdown +++ b/website/source/docs/commands/plan.html.markdown @@ -51,6 +51,7 @@ The command-line flags are all optional. The list of available flags are: * `-refresh=true` - Update the state prior to checking for differences. * `-state=path` - Path to the state file. Defaults to "terraform.tfstate". + Ignored when [remote state](/docs/state/remote/index.html) is used. * `-target=resource` - A [Resource Address](/docs/internals/resource-addressing.html) to target. Operation will diff --git a/website/source/docs/commands/refresh.html.markdown b/website/source/docs/commands/refresh.html.markdown index 064583437..67d6c65bd 100644 --- a/website/source/docs/commands/refresh.html.markdown +++ b/website/source/docs/commands/refresh.html.markdown @@ -32,9 +32,11 @@ The command-line flags are all optional. The list of available flags are: * `-no-color` - Disables output with coloring * `-state=path` - Path to read and write the state file to. Defaults to "terraform.tfstate". + Ignored when [remote state](/docs/state/remote/index.html) is used. * `-state-out=path` - Path to write updated state file. By default, the - `-state` path will be used. + `-state` path will be used. Ignored when + [remote state](/docs/state/remote/index.html) is used. * `-target=resource` - A [Resource Address](/docs/internals/resource-addressing.html) to target. Operation will diff --git a/website/source/docs/commands/state/list.html.md b/website/source/docs/commands/state/list.html.md index e4350d483..4970028d0 100644 --- a/website/source/docs/commands/state/list.html.md +++ b/website/source/docs/commands/state/list.html.md @@ -30,6 +30,7 @@ in [resource addressing format](/docs/commands/state/addressing.html). The command-line flags are all optional. The list of available flags are: * `-state=path` - Path to the state file. Defaults to "terraform.tfstate". + Ignored when [remote state](/docs/state/remote/index.html) is used. ## Example: All Resources diff --git a/website/source/docs/commands/state/mv.html.md b/website/source/docs/commands/state/mv.html.md index e145fbbff..089bd1d91 100644 --- a/website/source/docs/commands/state/mv.html.md +++ b/website/source/docs/commands/state/mv.html.md @@ -47,10 +47,12 @@ The command-line flags are all optional. The list of available flags are: This is only necessary if `-state-out` is specified. * `-state=path` - Path to the state file. Defaults to "terraform.tfstate". + Ignored when [remote state](/docs/state/remote/index.html) is used. * `-state-out=path` - Path to the state file to write to. If this isn't specified the state specified by `-state` will be used. This can be - a new or existing path. + a new or existing path. Ignored when + [remote state](/docs/state/remote/index.html) is used. ## Example: Rename a Resource diff --git a/website/source/docs/commands/state/show.html.md b/website/source/docs/commands/state/show.html.md index c55b90018..cda7ef378 100644 --- a/website/source/docs/commands/state/show.html.md +++ b/website/source/docs/commands/state/show.html.md @@ -30,6 +30,7 @@ in [resource addressing format](/docs/commands/state/addressing.html). The command-line flags are all optional. The list of available flags are: * `-state=path` - Path to the state file. Defaults to "terraform.tfstate". + Ignored when [remote state](/docs/state/remote/index.html) is used. ## Example: Show a Resource diff --git a/website/source/docs/commands/taint.html.markdown b/website/source/docs/commands/taint.html.markdown index 05ea6301a..32ae60b4b 100644 --- a/website/source/docs/commands/taint.html.markdown +++ b/website/source/docs/commands/taint.html.markdown @@ -56,6 +56,8 @@ The command-line flags are all optional. The list of available flags are: * `-no-color` - Disables output with coloring * `-state=path` - Path to read and write the state file to. Defaults to "terraform.tfstate". + Ignored when [remote state](/docs/state/remote/index.html) is used. * `-state-out=path` - Path to write updated state file. By default, the - `-state` path will be used. + `-state` path will be used. Ignored when + [remote state](/docs/state/remote/index.html) is used. diff --git a/website/source/docs/commands/untaint.html.markdown b/website/source/docs/commands/untaint.html.markdown index 557460d32..7850c8714 100644 --- a/website/source/docs/commands/untaint.html.markdown +++ b/website/source/docs/commands/untaint.html.markdown @@ -56,6 +56,8 @@ certain cases, see above note). The list of available flags are: * `-no-color` - Disables output with coloring * `-state=path` - Path to read and write the state file to. Defaults to "terraform.tfstate". + Ignored when [remote state](/docs/state/remote/index.html) is used. * `-state-out=path` - Path to write updated state file. By default, the - `-state` path will be used. + `-state` path will be used. Ignored when + [remote state](/docs/state/remote/index.html) is used. From 3ac351637112820f75538ee30f3ee9b73ac7f4bd Mon Sep 17 00:00:00 2001 From: Evan Brown Date: Sun, 7 Aug 2016 18:01:31 -0700 Subject: [PATCH 0579/1238] provider/google: Support static private IP addresses (#6310) * provider/google: Support static private IP addresses The private address of an instance's network interface may now be specified. If no value is provided, an address will be chosen by Google Compute Engine and that value will be read into Terraform state. * docs: GCE private static IP address information --- .../google/resource_compute_instance.go | 6 +- .../google/resource_compute_instance_test.go | 122 ++++++++++++++++++ .../google/r/compute_instance.html.markdown | 5 +- 3 files changed, 131 insertions(+), 2 deletions(-) diff --git a/builtin/providers/google/resource_compute_instance.go b/builtin/providers/google/resource_compute_instance.go index cb06822fa..9a4387b52 100644 --- a/builtin/providers/google/resource_compute_instance.go +++ b/builtin/providers/google/resource_compute_instance.go @@ -153,6 +153,8 @@ func resourceComputeInstance() *schema.Resource { "address": &schema.Schema{ Type: schema.TypeString, + Optional: true, + ForceNew: true, Computed: true, }, @@ -467,9 +469,10 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err networkInterfaces = make([]*compute.NetworkInterface, 0, networkInterfacesCount) for i := 0; i < networkInterfacesCount; i++ { prefix := fmt.Sprintf("network_interface.%d", i) - // Load up the name of this network_interfac + // Load up the name of this network_interface networkName := d.Get(prefix + ".network").(string) subnetworkName := d.Get(prefix + ".subnetwork").(string) + address := d.Get(prefix + ".address").(string) var networkLink, subnetworkLink string if networkName != "" && subnetworkName != "" { @@ -499,6 +502,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err var iface compute.NetworkInterface iface.Network = networkLink iface.Subnetwork = subnetworkLink + iface.NetworkIP = address // Handle access_config structs accessConfigsCount := d.Get(prefix + ".access_config.#").(int) diff --git a/builtin/providers/google/resource_compute_instance_test.go b/builtin/providers/google/resource_compute_instance_test.go index c133b97e7..a20e127e1 100644 --- a/builtin/providers/google/resource_compute_instance_test.go +++ b/builtin/providers/google/resource_compute_instance_test.go @@ -371,6 +371,47 @@ func TestAccComputeInstance_subnet_custom(t *testing.T) { }) } +func TestAccComputeInstance_address_auto(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_address_auto(instanceName), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasAnyAddress(&instance), + ), + }, + }, + }) +} + +func TestAccComputeInstance_address_custom(t *testing.T) { + var instance compute.Instance + var instanceName = fmt.Sprintf("instance-test-%s", acctest.RandString(10)) + var address = "10.0.200.200" + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstance_address_custom(instanceName, address), + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeInstanceExists( + "google_compute_instance.foobar", &instance), + testAccCheckComputeInstanceHasAddress(&instance, address), + ), + }, + }, + }) +} func testAccCheckComputeInstanceDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) @@ -528,6 +569,30 @@ func testAccCheckComputeInstanceHasSubnet(instance *compute.Instance) resource.T } } +func testAccCheckComputeInstanceHasAnyAddress(instance *compute.Instance) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instance.NetworkInterfaces { + if i.NetworkIP == "" { + return fmt.Errorf("no address") + } + } + + return nil + } +} + +func testAccCheckComputeInstanceHasAddress(instance *compute.Instance, address string) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, i := range instance.NetworkInterfaces { + if i.NetworkIP != address { + return fmt.Errorf("Wrong address found: expected %v, got %v", address, i.NetworkIP) + } + } + + return nil + } +} + func testAccComputeInstance_basic_deprecated_network(instance string) string { return fmt.Sprintf(` resource "google_compute_instance" "foobar" { @@ -880,3 +945,60 @@ func testAccComputeInstance_subnet_custom(instance string) string { }`, acctest.RandString(10), acctest.RandString(10), instance) } + +func testAccComputeInstance_address_auto(instance string) string { + return fmt.Sprintf(` + resource "google_compute_network" "inst-test-network" { + name = "inst-test-network-%s" + } + resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "inst-test-subnetwork-%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = "${google_compute_network.inst-test-network.self_link}" + } + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + disk { + image = "debian-7-wheezy-v20160301" + } + + network_interface { + subnetwork = "${google_compute_subnetwork.inst-test-subnetwork.name}" + access_config { } + } + + }`, acctest.RandString(10), acctest.RandString(10), instance) +} + +func testAccComputeInstance_address_custom(instance, address string) string { + return fmt.Sprintf(` + resource "google_compute_network" "inst-test-network" { + name = "inst-test-network-%s" + } + resource "google_compute_subnetwork" "inst-test-subnetwork" { + name = "inst-test-subnetwork-%s" + ip_cidr_range = "10.0.0.0/16" + region = "us-central1" + network = "${google_compute_network.inst-test-network.self_link}" + } + resource "google_compute_instance" "foobar" { + name = "%s" + machine_type = "n1-standard-1" + zone = "us-central1-a" + + disk { + image = "debian-7-wheezy-v20160301" + } + + network_interface { + subnetwork = "${google_compute_subnetwork.inst-test-subnetwork.name}" + address = "%s" + access_config { } + } + + }`, acctest.RandString(10), acctest.RandString(10), instance, address) +} diff --git a/website/source/docs/providers/google/r/compute_instance.html.markdown b/website/source/docs/providers/google/r/compute_instance.html.markdown index 77f89f360..3164f1061 100644 --- a/website/source/docs/providers/google/r/compute_instance.html.markdown +++ b/website/source/docs/providers/google/r/compute_instance.html.markdown @@ -139,6 +139,9 @@ The `network_interface` block supports: to. The subnetwork must exist in the same region this instance will be created in. Either `network` or `subnetwork` must be provided. +* `address` - (Optional) The private IP address to assign to the instance. If + empty, the address will be automatically assigned. + * `access_config` - (Optional) Access configurations, i.e. IPs via which this instance can be accessed via the Internet. Omit to ensure that the instance is not accessible from the Internet (this means that ssh provisioners will @@ -189,7 +192,7 @@ exported: * `tags_fingerprint` - The unique fingerprint of the tags. -* `network_interface.0.address` - The internal ip address of the instance (usually on the 10.x.x.x range). +* `network_interface.0.address` - The internal ip address of the instance, either manually or dynamically assigned. * `network_interface.0.access_config.0.assigned_nat_ip` - If the instance has an access config, either the given external ip (in the `nat_ip` field) or the ephemeral (generated) ip (if you didn't provide one). From 23560bab6f4865915229c231f30314c83746ad21 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 8 Aug 2016 13:02:16 +1200 Subject: [PATCH 0580/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 090e9a184..8a076a066 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ IMPROVEMENTS * provider/aws: Add support for Elasticsearch destination to firehose delivery streams [GH-7839] * provider/google: allows atomic Cloud DNS record changes [GH-6575] * provider/google: Move URLMap hosts to TypeSet from TypeList [GH-7472] + * provider/google: Support static private IP addresses in `resource_compute_instance` [GH-6310] * provider/vsphere: Improved SCSI controller handling in `vsphere_virtual_machine` [GH-7908] BUG FIXES: From e3b7760af4fca72db30d1412508c8e78b9908c67 Mon Sep 17 00:00:00 2001 From: Carlos Sanchez Date: Mon, 8 Aug 2016 03:30:14 +0200 Subject: [PATCH 0581/1238] [AWS] Retry AttachInternetGateway and increase timeout (#7891) --- .../aws/resource_aws_internet_gateway.go | 20 +++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/builtin/providers/aws/resource_aws_internet_gateway.go b/builtin/providers/aws/resource_aws_internet_gateway.go index fe9ed3ebd..a6c576386 100644 --- a/builtin/providers/aws/resource_aws_internet_gateway.go +++ b/builtin/providers/aws/resource_aws_internet_gateway.go @@ -168,9 +168,21 @@ func resourceAwsInternetGatewayAttach(d *schema.ResourceData, meta interface{}) d.Id(), d.Get("vpc_id").(string)) - _, err := conn.AttachInternetGateway(&ec2.AttachInternetGatewayInput{ - InternetGatewayId: aws.String(d.Id()), - VpcId: aws.String(d.Get("vpc_id").(string)), + err := resource.Retry(2*time.Minute, func() *resource.RetryError { + _, err := conn.AttachInternetGateway(&ec2.AttachInternetGatewayInput{ + InternetGatewayId: aws.String(d.Id()), + VpcId: aws.String(d.Get("vpc_id").(string)), + }) + if err == nil { + return nil + } + if ec2err, ok := err.(awserr.Error); ok { + switch ec2err.Code() { + case "InvalidInternetGatewayID.NotFound": + return resource.RetryableError(err) // retry + } + } + return resource.NonRetryableError(err) }) if err != nil { return err @@ -187,7 +199,7 @@ func resourceAwsInternetGatewayAttach(d *schema.ResourceData, meta interface{}) Pending: []string{"detached", "attaching"}, Target: []string{"available"}, Refresh: IGAttachStateRefreshFunc(conn, d.Id(), "available"), - Timeout: 1 * time.Minute, + Timeout: 4 * time.Minute, } if _, err := stateConf.WaitForState(); err != nil { return fmt.Errorf( From 8c3ab69253a76bb189e7311bc4298a411972fd4a Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 8 Aug 2016 13:50:34 +1200 Subject: [PATCH 0582/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8a076a066..8621fb422 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ IMPROVEMENTS * provider/aws: Query all pages of group membership [GH-6726] * provider/aws: Change the way ARNs are built [GH-7151] * provider/aws: Add support for Elasticsearch destination to firehose delivery streams [GH-7839] + * provider/aws: Retry AttachInternetGateway and increase timeout on `aws_internet_gateway` [GH-7891] * provider/google: allows atomic Cloud DNS record changes [GH-6575] * provider/google: Move URLMap hosts to TypeSet from TypeList [GH-7472] * provider/google: Support static private IP addresses in `resource_compute_instance` [GH-6310] From 57d3c722e29482e2243e3a4786242bca91481c21 Mon Sep 17 00:00:00 2001 From: "Ernest W. Durbin III" Date: Sun, 7 Aug 2016 22:34:26 -0400 Subject: [PATCH 0583/1238] rename aws load balancer policy resources team redundancy team had a good run, but is over now --- builtin/providers/aws/provider.go | 6 +-- ...ws_load_balancer_backend_server_policy.go} | 0 ...ad_balancer_backend_server_policy_test.go} | 48 +++++++++---------- ...urce_aws_load_balancer_listener_policy.go} | 0 ...aws_load_balancer_listener_policy_test.go} | 32 ++++++------- ...o => resource_aws_load_balancer_policy.go} | 0 ...resource_aws_load_balancer_policy_test.go} | 28 +++++------ ...ancer_backend_server_policy.html.markdown} | 14 +++--- ...ad_balancer_listener_policy.html.markdown} | 10 ++-- ...own => load_balancer_policy.html.markdown} | 20 ++++---- 10 files changed, 79 insertions(+), 79 deletions(-) rename builtin/providers/aws/{resource_aws_elb_load_balancer_backend_server_policy.go => resource_aws_load_balancer_backend_server_policy.go} (100%) rename builtin/providers/aws/{resource_aws_elb_load_balancer_backend_server_policy_test.go => resource_aws_load_balancer_backend_server_policy_test.go} (87%) rename builtin/providers/aws/{resource_aws_elb_load_balancer_listener_policy.go => resource_aws_load_balancer_listener_policy.go} (100%) rename builtin/providers/aws/{resource_aws_elb_load_balancer_listener_policy_test.go => resource_aws_load_balancer_listener_policy_test.go} (83%) rename builtin/providers/aws/{resource_aws_elb_load_balancer_policy.go => resource_aws_load_balancer_policy.go} (100%) rename builtin/providers/aws/{resource_aws_elb_load_balancer_policy_test.go => resource_aws_load_balancer_policy_test.go} (89%) rename website/source/docs/providers/aws/r/{elb_load_balancer_backend_server_policy.html.markdown => load_balancer_backend_server_policy.html.markdown} (77%) rename website/source/docs/providers/aws/r/{elb_load_balancer_listener_policy.html.markdown => load_balancer_listener_policy.html.markdown} (82%) rename website/source/docs/providers/aws/r/{elb_load_balancer_policy.html.markdown => load_balancer_policy.html.markdown} (77%) diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index ce141b005..afdbf23ba 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -215,9 +215,9 @@ func Provider() terraform.ResourceProvider { "aws_lambda_permission": resourceAwsLambdaPermission(), "aws_launch_configuration": resourceAwsLaunchConfiguration(), "aws_lb_cookie_stickiness_policy": resourceAwsLBCookieStickinessPolicy(), - "aws_elb_load_balancer_policy": resourceAwsLoadBalancerPolicy(), - "aws_elb_load_balancer_backend_server_policy": resourceAwsLoadBalancerBackendServerPolicies(), - "aws_elb_load_balancer_listener_policy": resourceAwsLoadBalancerListenerPolicies(), + "aws_load_balancer_policy": resourceAwsLoadBalancerPolicy(), + "aws_load_balancer_backend_server_policy": resourceAwsLoadBalancerBackendServerPolicies(), + "aws_load_balancer_listener_policy": resourceAwsLoadBalancerListenerPolicies(), "aws_main_route_table_association": resourceAwsMainRouteTableAssociation(), "aws_nat_gateway": resourceAwsNatGateway(), "aws_network_acl": resourceAwsNetworkAcl(), diff --git a/builtin/providers/aws/resource_aws_elb_load_balancer_backend_server_policy.go b/builtin/providers/aws/resource_aws_load_balancer_backend_server_policy.go similarity index 100% rename from builtin/providers/aws/resource_aws_elb_load_balancer_backend_server_policy.go rename to builtin/providers/aws/resource_aws_load_balancer_backend_server_policy.go diff --git a/builtin/providers/aws/resource_aws_elb_load_balancer_backend_server_policy_test.go b/builtin/providers/aws/resource_aws_load_balancer_backend_server_policy_test.go similarity index 87% rename from builtin/providers/aws/resource_aws_elb_load_balancer_backend_server_policy_test.go rename to builtin/providers/aws/resource_aws_load_balancer_backend_server_policy_test.go index 0bb3f6934..bf783bb00 100644 --- a/builtin/providers/aws/resource_aws_elb_load_balancer_backend_server_policy_test.go +++ b/builtin/providers/aws/resource_aws_load_balancer_backend_server_policy_test.go @@ -25,24 +25,24 @@ func TestAccAWSLoadBalancerBackendServerPolicy_basic(t *testing.T) { resource.TestStep{ Config: testAccAWSLoadBalancerBackendServerPolicyConfig_basic0, Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_elb_load_balancer_policy.test-pubkey-policy0"), - testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_elb_load_balancer_policy.test-backend-auth-policy0"), - testAccCheckAWSLoadBalancerBackendServerPolicyState("test-aws-elb-policies-lb", "test-backend-auth-policy0", true), + testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_load_balancer_policy.test-pubkey-policy0"), + testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_load_balancer_policy.test-backend-auth-policy0"), + testAccCheckAWSLoadBalancerBackendServerPolicyState("test-aws-policies-lb", "test-backend-auth-policy0", true), ), }, resource.TestStep{ Config: testAccAWSLoadBalancerBackendServerPolicyConfig_basic1, Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_elb_load_balancer_policy.test-pubkey-policy0"), - testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_elb_load_balancer_policy.test-pubkey-policy1"), - testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_elb_load_balancer_policy.test-backend-auth-policy0"), - testAccCheckAWSLoadBalancerBackendServerPolicyState("test-aws-elb-policies-lb", "test-backend-auth-policy0", true), + testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_load_balancer_policy.test-pubkey-policy0"), + testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_load_balancer_policy.test-pubkey-policy1"), + testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_load_balancer_policy.test-backend-auth-policy0"), + testAccCheckAWSLoadBalancerBackendServerPolicyState("test-aws-policies-lb", "test-backend-auth-policy0", true), ), }, resource.TestStep{ Config: testAccAWSLoadBalancerBackendServerPolicyConfig_basic2, Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLoadBalancerBackendServerPolicyState("test-aws-elb-policies-lb", "test-backend-auth-policy0", false), + testAccCheckAWSLoadBalancerBackendServerPolicyState("test-aws-policies-lb", "test-backend-auth-policy0", false), ), }, }, @@ -63,7 +63,7 @@ func testAccCheckAWSLoadBalancerBackendServerPolicyDestroy(s *terraform.State) e for _, rs := range s.RootModule().Resources { switch { - case rs.Type == "aws_elb_load_balancer_policy": + case rs.Type == "aws_load_balancer_policy": loadBalancerName, policyName := resourceAwsLoadBalancerBackendServerPoliciesParseId(rs.Primary.ID) out, err := conn.DescribeLoadBalancerPolicies( &elb.DescribeLoadBalancerPoliciesInput{ @@ -79,7 +79,7 @@ func testAccCheckAWSLoadBalancerBackendServerPolicyDestroy(s *terraform.State) e if len(out.PolicyDescriptions) > 0 { return fmt.Errorf("Policy still exists") } - case rs.Type == "aws_elb_load_balancer_backend_policy": + case rs.Type == "aws_load_balancer_backend_policy": loadBalancerName, policyName := resourceAwsLoadBalancerBackendServerPoliciesParseId(rs.Primary.ID) out, err := conn.DescribeLoadBalancers( &elb.DescribeLoadBalancersInput{ @@ -166,7 +166,7 @@ resource "aws_iam_server_certificate" "test-iam-cert0" { } resource "aws_elb" "test-lb" { - name = "test-aws-elb-policies-lb" + name = "test-aws-policies-lb" availability_zones = ["us-east-1a"] listener { @@ -182,7 +182,7 @@ resource "aws_elb" "test-lb" { } } -resource "aws_elb_load_balancer_policy" "test-pubkey-policy0" { +resource "aws_load_balancer_policy" "test-pubkey-policy0" { load_balancer_name = "${aws_elb.test-lb.name}" policy_name = "test-pubkey-policy0" policy_type_name = "PublicKeyPolicyType" @@ -192,21 +192,21 @@ resource "aws_elb_load_balancer_policy" "test-pubkey-policy0" { } } -resource "aws_elb_load_balancer_policy" "test-backend-auth-policy0" { +resource "aws_load_balancer_policy" "test-backend-auth-policy0" { load_balancer_name = "${aws_elb.test-lb.name}" policy_name = "test-backend-auth-policy0" policy_type_name = "BackendServerAuthenticationPolicyType" policy_attribute = { name = "PublicKeyPolicyName" - value = "${aws_elb_load_balancer_policy.test-pubkey-policy0.policy_name}" + value = "${aws_load_balancer_policy.test-pubkey-policy0.policy_name}" } } -resource "aws_elb_load_balancer_backend_server_policy" "test-backend-auth-policies-443" { +resource "aws_load_balancer_backend_server_policy" "test-backend-auth-policies-443" { load_balancer_name = "${aws_elb.test-lb.name}" instance_port = 443 policy_names = [ - "${aws_elb_load_balancer_policy.test-backend-auth-policy0.policy_name}" + "${aws_load_balancer_policy.test-backend-auth-policy0.policy_name}" ] } ` @@ -263,7 +263,7 @@ resource "aws_iam_server_certificate" "test-iam-cert0" { } resource "aws_elb" "test-lb" { - name = "test-aws-elb-policies-lb" + name = "test-aws-policies-lb" availability_zones = ["us-east-1a"] listener { @@ -279,7 +279,7 @@ resource "aws_elb" "test-lb" { } } -resource "aws_elb_load_balancer_policy" "test-pubkey-policy0" { +resource "aws_load_balancer_policy" "test-pubkey-policy0" { load_balancer_name = "${aws_elb.test-lb.name}" policy_name = "test-pubkey-policy0" policy_type_name = "PublicKeyPolicyType" @@ -289,7 +289,7 @@ resource "aws_elb_load_balancer_policy" "test-pubkey-policy0" { } } -resource "aws_elb_load_balancer_policy" "test-pubkey-policy1" { +resource "aws_load_balancer_policy" "test-pubkey-policy1" { load_balancer_name = "${aws_elb.test-lb.name}" policy_name = "test-pubkey-policy1" policy_type_name = "PublicKeyPolicyType" @@ -299,21 +299,21 @@ resource "aws_elb_load_balancer_policy" "test-pubkey-policy1" { } } -resource "aws_elb_load_balancer_policy" "test-backend-auth-policy0" { +resource "aws_load_balancer_policy" "test-backend-auth-policy0" { load_balancer_name = "${aws_elb.test-lb.name}" policy_name = "test-backend-auth-policy0" policy_type_name = "BackendServerAuthenticationPolicyType" policy_attribute = { name = "PublicKeyPolicyName" - value = "${aws_elb_load_balancer_policy.test-pubkey-policy1.policy_name}" + value = "${aws_load_balancer_policy.test-pubkey-policy1.policy_name}" } } -resource "aws_elb_load_balancer_backend_server_policy" "test-backend-auth-policies-443" { +resource "aws_load_balancer_backend_server_policy" "test-backend-auth-policies-443" { load_balancer_name = "${aws_elb.test-lb.name}" instance_port = 443 policy_names = [ - "${aws_elb_load_balancer_policy.test-backend-auth-policy0.policy_name}" + "${aws_load_balancer_policy.test-backend-auth-policy0.policy_name}" ] } ` @@ -370,7 +370,7 @@ resource "aws_iam_server_certificate" "test-iam-cert0" { } resource "aws_elb" "test-lb" { - name = "test-aws-elb-policies-lb" + name = "test-aws-policies-lb" availability_zones = ["us-east-1a"] listener { diff --git a/builtin/providers/aws/resource_aws_elb_load_balancer_listener_policy.go b/builtin/providers/aws/resource_aws_load_balancer_listener_policy.go similarity index 100% rename from builtin/providers/aws/resource_aws_elb_load_balancer_listener_policy.go rename to builtin/providers/aws/resource_aws_load_balancer_listener_policy.go diff --git a/builtin/providers/aws/resource_aws_elb_load_balancer_listener_policy_test.go b/builtin/providers/aws/resource_aws_load_balancer_listener_policy_test.go similarity index 83% rename from builtin/providers/aws/resource_aws_elb_load_balancer_listener_policy_test.go rename to builtin/providers/aws/resource_aws_load_balancer_listener_policy_test.go index 4bc7ac17f..9f3c02105 100644 --- a/builtin/providers/aws/resource_aws_elb_load_balancer_listener_policy_test.go +++ b/builtin/providers/aws/resource_aws_load_balancer_listener_policy_test.go @@ -22,21 +22,21 @@ func TestAccAWSLoadBalancerListenerPolicy_basic(t *testing.T) { resource.TestStep{ Config: testAccAWSLoadBalancerListenerPolicyConfig_basic0, Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_elb_load_balancer_policy.magic-cookie-sticky"), - testAccCheckAWSLoadBalancerListenerPolicyState("test-aws-elb-policies-lb", int64(80), "magic-cookie-sticky-policy", true), + testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_load_balancer_policy.magic-cookie-sticky"), + testAccCheckAWSLoadBalancerListenerPolicyState("test-aws-policies-lb", int64(80), "magic-cookie-sticky-policy", true), ), }, resource.TestStep{ Config: testAccAWSLoadBalancerListenerPolicyConfig_basic1, Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_elb_load_balancer_policy.magic-cookie-sticky"), - testAccCheckAWSLoadBalancerListenerPolicyState("test-aws-elb-policies-lb", int64(80), "magic-cookie-sticky-policy", true), + testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_load_balancer_policy.magic-cookie-sticky"), + testAccCheckAWSLoadBalancerListenerPolicyState("test-aws-policies-lb", int64(80), "magic-cookie-sticky-policy", true), ), }, resource.TestStep{ Config: testAccAWSLoadBalancerListenerPolicyConfig_basic2, Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLoadBalancerListenerPolicyState("test-aws-elb-policies-lb", int64(80), "magic-cookie-sticky-policy", false), + testAccCheckAWSLoadBalancerListenerPolicyState("test-aws-policies-lb", int64(80), "magic-cookie-sticky-policy", false), ), }, }, @@ -57,7 +57,7 @@ func testAccCheckAWSLoadBalancerListenerPolicyDestroy(s *terraform.State) error for _, rs := range s.RootModule().Resources { switch { - case rs.Type == "aws_elb_load_balancer_policy": + case rs.Type == "aws_load_balancer_policy": loadBalancerName, policyName := resourceAwsLoadBalancerListenerPoliciesParseId(rs.Primary.ID) out, err := conn.DescribeLoadBalancerPolicies( &elb.DescribeLoadBalancerPoliciesInput{ @@ -73,7 +73,7 @@ func testAccCheckAWSLoadBalancerListenerPolicyDestroy(s *terraform.State) error if len(out.PolicyDescriptions) > 0 { return fmt.Errorf("Policy still exists") } - case rs.Type == "aws_elb_load_listener_policy": + case rs.Type == "aws_load_listener_policy": loadBalancerName, _ := resourceAwsLoadBalancerListenerPoliciesParseId(rs.Primary.ID) out, err := conn.DescribeLoadBalancers( &elb.DescribeLoadBalancersInput{ @@ -144,7 +144,7 @@ func testAccCheckAWSLoadBalancerListenerPolicyState(loadBalancerName string, loa const testAccAWSLoadBalancerListenerPolicyConfig_basic0 = ` resource "aws_elb" "test-lb" { - name = "test-aws-elb-policies-lb" + name = "test-aws-policies-lb" availability_zones = ["us-east-1a"] listener { @@ -159,7 +159,7 @@ resource "aws_elb" "test-lb" { } } -resource "aws_elb_load_balancer_policy" "magic-cookie-sticky" { +resource "aws_load_balancer_policy" "magic-cookie-sticky" { load_balancer_name = "${aws_elb.test-lb.name}" policy_name = "magic-cookie-sticky-policy" policy_type_name = "AppCookieStickinessPolicyType" @@ -169,18 +169,18 @@ resource "aws_elb_load_balancer_policy" "magic-cookie-sticky" { } } -resource "aws_elb_load_balancer_listener_policy" "test-lb-listener-policies-80" { +resource "aws_load_balancer_listener_policy" "test-lb-listener-policies-80" { load_balancer_name = "${aws_elb.test-lb.name}" load_balancer_port = 80 policy_names = [ - "${aws_elb_load_balancer_policy.magic-cookie-sticky.policy_name}", + "${aws_load_balancer_policy.magic-cookie-sticky.policy_name}", ] } ` const testAccAWSLoadBalancerListenerPolicyConfig_basic1 = ` resource "aws_elb" "test-lb" { - name = "test-aws-elb-policies-lb" + name = "test-aws-policies-lb" availability_zones = ["us-east-1a"] listener { @@ -195,7 +195,7 @@ resource "aws_elb" "test-lb" { } } -resource "aws_elb_load_balancer_policy" "magic-cookie-sticky" { +resource "aws_load_balancer_policy" "magic-cookie-sticky" { load_balancer_name = "${aws_elb.test-lb.name}" policy_name = "magic-cookie-sticky-policy" policy_type_name = "AppCookieStickinessPolicyType" @@ -205,18 +205,18 @@ resource "aws_elb_load_balancer_policy" "magic-cookie-sticky" { } } -resource "aws_elb_load_balancer_listener_policy" "test-lb-listener-policies-80" { +resource "aws_load_balancer_listener_policy" "test-lb-listener-policies-80" { load_balancer_name = "${aws_elb.test-lb.name}" load_balancer_port = 80 policy_names = [ - "${aws_elb_load_balancer_policy.magic-cookie-sticky.policy_name}" + "${aws_load_balancer_policy.magic-cookie-sticky.policy_name}" ] } ` const testAccAWSLoadBalancerListenerPolicyConfig_basic2 = ` resource "aws_elb" "test-lb" { - name = "test-aws-elb-policies-lb" + name = "test-aws-policies-lb" availability_zones = ["us-east-1a"] listener { diff --git a/builtin/providers/aws/resource_aws_elb_load_balancer_policy.go b/builtin/providers/aws/resource_aws_load_balancer_policy.go similarity index 100% rename from builtin/providers/aws/resource_aws_elb_load_balancer_policy.go rename to builtin/providers/aws/resource_aws_load_balancer_policy.go diff --git a/builtin/providers/aws/resource_aws_elb_load_balancer_policy_test.go b/builtin/providers/aws/resource_aws_load_balancer_policy_test.go similarity index 89% rename from builtin/providers/aws/resource_aws_elb_load_balancer_policy_test.go rename to builtin/providers/aws/resource_aws_load_balancer_policy_test.go index 8ce166869..29771f789 100644 --- a/builtin/providers/aws/resource_aws_elb_load_balancer_policy_test.go +++ b/builtin/providers/aws/resource_aws_load_balancer_policy_test.go @@ -23,7 +23,7 @@ func TestAccAWSLoadBalancerPolicy_basic(t *testing.T) { resource.TestStep{ Config: testAccAWSLoadBalancerPolicyConfig_basic, Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_elb_load_balancer_policy.test-policy"), + testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_load_balancer_policy.test-policy"), ), }, }, @@ -39,13 +39,13 @@ func TestAccAWSLoadBalancerPolicy_updateWhileAssigned(t *testing.T) { resource.TestStep{ Config: testAccAWSLoadBalancerPolicyConfig_updateWhileAssigned0, Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_elb_load_balancer_policy.test-policy"), + testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_load_balancer_policy.test-policy"), ), }, resource.TestStep{ Config: testAccAWSLoadBalancerPolicyConfig_updateWhileAssigned1, Check: resource.ComposeTestCheckFunc( - testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_elb_load_balancer_policy.test-policy"), + testAccCheckAWSLoadBalancerPolicyState("aws_elb.test-lb", "aws_load_balancer_policy.test-policy"), ), }, }, @@ -56,7 +56,7 @@ func testAccCheckAWSLoadBalancerPolicyDestroy(s *terraform.State) error { conn := testAccProvider.Meta().(*AWSClient).elbconn for _, rs := range s.RootModule().Resources { - if rs.Type != "aws_elb_load_balancer_policy" { + if rs.Type != "aws_load_balancer_policy" { continue } @@ -141,7 +141,7 @@ func testAccCheckAWSLoadBalancerPolicyState(elbResource string, policyResource s const testAccAWSLoadBalancerPolicyConfig_basic = ` resource "aws_elb" "test-lb" { - name = "test-aws-elb-policies-lb" + name = "test-aws-policies-lb" availability_zones = ["us-east-1a"] listener { @@ -156,7 +156,7 @@ resource "aws_elb" "test-lb" { } } -resource "aws_elb_load_balancer_policy" "test-policy" { +resource "aws_load_balancer_policy" "test-policy" { load_balancer_name = "${aws_elb.test-lb.name}" policy_name = "test-policy-policy" policy_type_name = "AppCookieStickinessPolicyType" @@ -169,7 +169,7 @@ resource "aws_elb_load_balancer_policy" "test-policy" { const testAccAWSLoadBalancerPolicyConfig_updateWhileAssigned0 = ` resource "aws_elb" "test-lb" { - name = "test-aws-elb-policies-lb" + name = "test-aws-policies-lb" availability_zones = ["us-east-1a"] listener { @@ -184,7 +184,7 @@ resource "aws_elb" "test-lb" { } } -resource "aws_elb_load_balancer_policy" "test-policy" { +resource "aws_load_balancer_policy" "test-policy" { load_balancer_name = "${aws_elb.test-lb.name}" policy_name = "test-policy-policy" policy_type_name = "AppCookieStickinessPolicyType" @@ -194,18 +194,18 @@ resource "aws_elb_load_balancer_policy" "test-policy" { } } -resource "aws_elb_load_balancer_listener_policy" "test-lb-test-policy-80" { +resource "aws_load_balancer_listener_policy" "test-lb-test-policy-80" { load_balancer_name = "${aws_elb.test-lb.name}" load_balancer_port = 80 policy_names = [ - "${aws_elb_load_balancer_policy.test-policy.policy_name}" + "${aws_load_balancer_policy.test-policy.policy_name}" ] } ` const testAccAWSLoadBalancerPolicyConfig_updateWhileAssigned1 = ` resource "aws_elb" "test-lb" { - name = "test-aws-elb-policies-lb" + name = "test-aws-policies-lb" availability_zones = ["us-east-1a"] listener { @@ -220,7 +220,7 @@ resource "aws_elb" "test-lb" { } } -resource "aws_elb_load_balancer_policy" "test-policy" { +resource "aws_load_balancer_policy" "test-policy" { load_balancer_name = "${aws_elb.test-lb.name}" policy_name = "test-policy-policy" policy_type_name = "AppCookieStickinessPolicyType" @@ -230,11 +230,11 @@ resource "aws_elb_load_balancer_policy" "test-policy" { } } -resource "aws_elb_load_balancer_listener_policy" "test-lb-test-policy-80" { +resource "aws_load_balancer_listener_policy" "test-lb-test-policy-80" { load_balancer_name = "${aws_elb.test-lb.name}" load_balancer_port = 80 policy_names = [ - "${aws_elb_load_balancer_policy.test-policy.policy_name}" + "${aws_load_balancer_policy.test-policy.policy_name}" ] } ` diff --git a/website/source/docs/providers/aws/r/elb_load_balancer_backend_server_policy.html.markdown b/website/source/docs/providers/aws/r/load_balancer_backend_server_policy.html.markdown similarity index 77% rename from website/source/docs/providers/aws/r/elb_load_balancer_backend_server_policy.html.markdown rename to website/source/docs/providers/aws/r/load_balancer_backend_server_policy.html.markdown index 5c06bbdf8..b135bda26 100644 --- a/website/source/docs/providers/aws/r/elb_load_balancer_backend_server_policy.html.markdown +++ b/website/source/docs/providers/aws/r/load_balancer_backend_server_policy.html.markdown @@ -1,7 +1,7 @@ --- layout: "aws" -page_title: "AWS: aws_elb_load_balancer_backend_server_policy" -sidebar_current: "docs-aws-resource-elb-load-balancer-backend-server-policy" +page_title: "AWS: aws_load_balancer_backend_server_policy" +sidebar_current: "docs-aws-resource-load-balancer-backend-server-policy" description: |- Attaches a load balancer policy to an ELB backend server. --- @@ -31,7 +31,7 @@ resource "aws_elb" "wu-tang" { } } -resource "aws_elb_load_balancer_policy" "wu-tang-ca-pubkey-policy" { +resource "aws_load_balancer_policy" "wu-tang-ca-pubkey-policy" { load_balancer_name = "${aws_elb.wu-tang.name}" policy_name = "wu-tang-ca-pubkey-policy" policy_type_name = "PublicKeyPolicyType" @@ -41,21 +41,21 @@ resource "aws_elb_load_balancer_policy" "wu-tang-ca-pubkey-policy" { } } -resource "aws_elb_load_balancer_policy" "wu-tang-root-ca-backend-auth-policy" { +resource "aws_load_balancer_policy" "wu-tang-root-ca-backend-auth-policy" { load_balancer_name = "${aws_elb.wu-tang.name}" policy_name = "wu-tang-root-ca-backend-auth-policy" policy_type_name = "BackendServerAuthenticationPolicyType" policy_attribute = { name = "PublicKeyPolicyName" - value = "${aws_elb_load_balancer_policy.wu-tang-root-ca-pubkey-policy.policy_name}" + value = "${aws_load_balancer_policy.wu-tang-root-ca-pubkey-policy.policy_name}" } } -resource "aws_elb_load_balancer_backend_server_policy" "wu-tang-backend-auth-policies-443" { +resource "aws_load_balancer_backend_server_policy" "wu-tang-backend-auth-policies-443" { load_balancer_name = "${aws_elb.wu-tang.name}" instance_port = 443 policy_names = [ - "${aws_elb_load_balancer_policy.wu-tang-root-ca-backend-auth-policy.policy_name}" + "${aws_load_balancer_policy.wu-tang-root-ca-backend-auth-policy.policy_name}" ] } ``` diff --git a/website/source/docs/providers/aws/r/elb_load_balancer_listener_policy.html.markdown b/website/source/docs/providers/aws/r/load_balancer_listener_policy.html.markdown similarity index 82% rename from website/source/docs/providers/aws/r/elb_load_balancer_listener_policy.html.markdown rename to website/source/docs/providers/aws/r/load_balancer_listener_policy.html.markdown index 37e388c1b..5248ee6e2 100644 --- a/website/source/docs/providers/aws/r/elb_load_balancer_listener_policy.html.markdown +++ b/website/source/docs/providers/aws/r/load_balancer_listener_policy.html.markdown @@ -1,7 +1,7 @@ --- layout: "aws" -page_title: "AWS: aws_elb_load_balancer_listener_policy" -sidebar_current: "docs-aws-resource-elb-load-balancer-listener-policy" +page_title: "AWS: aws_load_balancer_listener_policy" +sidebar_current: "docs-aws-resource-load-balancer-listener-policy" description: |- Attaches a load balancer policy to an ELB Listener. --- @@ -31,7 +31,7 @@ resource "aws_elb" "wu-tang" { } } -resource "aws_elb_load_balancer_policy" "wu-tang-ssl" { +resource "aws_load_balancer_policy" "wu-tang-ssl" { load_balancer_name = "${aws_elb.wu-tang.name}" policy_name = "wu-tang-ssl" policy_type_name = "SSLNegotiationPolicyType" @@ -45,11 +45,11 @@ resource "aws_elb_load_balancer_policy" "wu-tang-ssl" { } } -resource "aws_elb_load_balancer_listener_policy" "wu-tang-listener-policies-443" { +resource "aws_load_balancer_listener_policy" "wu-tang-listener-policies-443" { load_balancer_name = "${aws_elb.wu-tang.name}" load_balancer_port = 443 policy_names = [ - "${aws_elb_load_balancer_policy.wu-tang-ssl.policy_name}" + "${aws_load_balancer_policy.wu-tang-ssl.policy_name}" ] } ``` diff --git a/website/source/docs/providers/aws/r/elb_load_balancer_policy.html.markdown b/website/source/docs/providers/aws/r/load_balancer_policy.html.markdown similarity index 77% rename from website/source/docs/providers/aws/r/elb_load_balancer_policy.html.markdown rename to website/source/docs/providers/aws/r/load_balancer_policy.html.markdown index a5d44676d..04978aaef 100644 --- a/website/source/docs/providers/aws/r/elb_load_balancer_policy.html.markdown +++ b/website/source/docs/providers/aws/r/load_balancer_policy.html.markdown @@ -1,7 +1,7 @@ --- layout: "aws" -page_title: "AWS: aws_elb_load_balancer_policy" -sidebar_current: "docs-aws-resource-elb-load-balancer-policy" +page_title: "AWS: aws_load_balancer_policy" +sidebar_current: "docs-aws-resource-load-balancer-policy" description: |- Provides a load balancer policy, which can be attached to an ELB listener or backend server. --- @@ -30,7 +30,7 @@ resource "aws_elb" "wu-tang" { } } -resource "aws_elb_load_balancer_policy" "wu-tang-ca-pubkey-policy" { +resource "aws_load_balancer_policy" "wu-tang-ca-pubkey-policy" { load_balancer_name = "${aws_elb.wu-tang.name}" policy_name = "wu-tang-ca-pubkey-policy" policy_type_name = "PublicKeyPolicyType" @@ -40,17 +40,17 @@ resource "aws_elb_load_balancer_policy" "wu-tang-ca-pubkey-policy" { } } -resource "aws_elb_load_balancer_policy" "wu-tang-root-ca-backend-auth-policy" { +resource "aws_load_balancer_policy" "wu-tang-root-ca-backend-auth-policy" { load_balancer_name = "${aws_elb.wu-tang.name}" policy_name = "wu-tang-root-ca-backend-auth-policy" policy_type_name = "BackendServerAuthenticationPolicyType" policy_attribute = { name = "PublicKeyPolicyName" - value = "${aws_elb_load_balancer_policy.wu-tang-root-ca-pubkey-policy.policy_name}" + value = "${aws_load_balancer_policy.wu-tang-root-ca-pubkey-policy.policy_name}" } } -resource "aws_elb_load_balancer_policy" "wu-tang-ssl" { +resource "aws_load_balancer_policy" "wu-tang-ssl" { load_balancer_name = "${aws_elb.wu-tang.name}" policy_name = "wu-tang-ssl" policy_type_name = "SSLNegotiationPolicyType" @@ -64,19 +64,19 @@ resource "aws_elb_load_balancer_policy" "wu-tang-ssl" { } } -resource "aws_elb_load_balancer_backend_server_policy" "wu-tang-backend-auth-policies-443" { +resource "aws_load_balancer_backend_server_policy" "wu-tang-backend-auth-policies-443" { load_balancer_name = "${aws_elb.wu-tang.name}" instance_port = 443 policy_names = [ - "${aws_elb_load_balancer_policy.wu-tang-root-ca-backend-auth-policy.policy_name}" + "${aws_load_balancer_policy.wu-tang-root-ca-backend-auth-policy.policy_name}" ] } -resource "aws_elb_load_balancer_listener_policy" "wu-tang-listener-policies-443" { +resource "aws_load_balancer_listener_policy" "wu-tang-listener-policies-443" { load_balancer_name = "${aws_elb.wu-tang.name}" load_balancer_port = 443 policy_names = [ - "${aws_elb_load_balancer_policy.wu-tang-ssl.policy_name}" + "${aws_load_balancer_policy.wu-tang-ssl.policy_name}" ] } ``` From 4f07b924479759b47ab6df4baa906bbee5b75371 Mon Sep 17 00:00:00 2001 From: "Ernest W. Durbin III" Date: Sun, 7 Aug 2016 23:08:00 -0400 Subject: [PATCH 0584/1238] address feedback from @stack72 on PR #7458 as of Mon Aug 8 03:08:21 UTC 2016 --- .../resource_aws_load_balancer_backend_server_policy.go | 7 +++---- .../aws/resource_aws_load_balancer_listener_policy.go | 7 +++---- builtin/providers/aws/resource_aws_load_balancer_policy.go | 6 ++++-- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/builtin/providers/aws/resource_aws_load_balancer_backend_server_policy.go b/builtin/providers/aws/resource_aws_load_balancer_backend_server_policy.go index 0bdd85f94..d3759ffcb 100644 --- a/builtin/providers/aws/resource_aws_load_balancer_backend_server_policy.go +++ b/builtin/providers/aws/resource_aws_load_balancer_backend_server_policy.go @@ -60,10 +60,7 @@ func resourceAwsLoadBalancerBackendServerPoliciesCreate(d *schema.ResourceData, } d.SetId(fmt.Sprintf("%s:%s", *setOpts.LoadBalancerName, strconv.FormatInt(*setOpts.InstancePort, 10))) - d.Set("load_balancer_name", setOpts.LoadBalancerName) - d.Set("instance_port", setOpts.InstancePort) - d.Set("policy_names", flattenStringList(setOpts.PolicyNames)) - return nil + return resourceAwsLoadBalancerBackendServerPoliciesRead(d, meta) } func resourceAwsLoadBalancerBackendServerPoliciesRead(d *schema.ResourceData, meta interface{}) error { @@ -81,6 +78,7 @@ func resourceAwsLoadBalancerBackendServerPoliciesRead(d *schema.ResourceData, me if ec2err, ok := err.(awserr.Error); ok { if ec2err.Code() == "LoadBalancerNotFound" { return fmt.Errorf("LoadBalancerNotFound: %s", err) + d.SetId("") } } return fmt.Errorf("Error retrieving ELB description: %s", err) @@ -130,6 +128,7 @@ func resourceAwsLoadBalancerBackendServerPoliciesDelete(d *schema.ResourceData, return fmt.Errorf("Error setting LoadBalancerPoliciesForBackendServer: %s", err) } + d.SetId("") return nil } diff --git a/builtin/providers/aws/resource_aws_load_balancer_listener_policy.go b/builtin/providers/aws/resource_aws_load_balancer_listener_policy.go index 494b8bd98..2a13be1c3 100644 --- a/builtin/providers/aws/resource_aws_load_balancer_listener_policy.go +++ b/builtin/providers/aws/resource_aws_load_balancer_listener_policy.go @@ -60,10 +60,7 @@ func resourceAwsLoadBalancerListenerPoliciesCreate(d *schema.ResourceData, meta } d.SetId(fmt.Sprintf("%s:%s", *setOpts.LoadBalancerName, strconv.FormatInt(*setOpts.LoadBalancerPort, 10))) - d.Set("load_balancer_name", setOpts.LoadBalancerName) - d.Set("load_balancer_port", setOpts.LoadBalancerPort) - d.Set("policy_names", flattenStringList(setOpts.PolicyNames)) - return nil + return resourceAwsLoadBalancerListenerPoliciesRead(d, meta) } func resourceAwsLoadBalancerListenerPoliciesRead(d *schema.ResourceData, meta interface{}) error { @@ -81,6 +78,7 @@ func resourceAwsLoadBalancerListenerPoliciesRead(d *schema.ResourceData, meta in if ec2err, ok := err.(awserr.Error); ok { if ec2err.Code() == "LoadBalancerNotFound" { return fmt.Errorf("LoadBalancerNotFound: %s", err) + d.SetId("") } } return fmt.Errorf("Error retrieving ELB description: %s", err) @@ -130,6 +128,7 @@ func resourceAwsLoadBalancerListenerPoliciesDelete(d *schema.ResourceData, meta return fmt.Errorf("Error setting LoadBalancerPoliciesOfListener: %s", err) } + d.SetId("") return nil } diff --git a/builtin/providers/aws/resource_aws_load_balancer_policy.go b/builtin/providers/aws/resource_aws_load_balancer_policy.go index 1d1219c80..8305cf992 100644 --- a/builtin/providers/aws/resource_aws_load_balancer_policy.go +++ b/builtin/providers/aws/resource_aws_load_balancer_policy.go @@ -86,7 +86,7 @@ func resourceAwsLoadBalancerPolicyCreate(d *schema.ResourceData, meta interface{ d.SetId(fmt.Sprintf("%s:%s", *lbspOpts.LoadBalancerName, *lbspOpts.PolicyName)) - return nil + return resourceAwsLoadBalancerPolicyRead(d, meta) } func resourceAwsLoadBalancerPolicyRead(d *schema.ResourceData, meta interface{}) error { @@ -176,7 +176,7 @@ func resourceAwsLoadBalancerPolicyUpdate(d *schema.ResourceData, meta interface{ } } - return nil + return resourceAwsLoadBalancerPolicyRead(d, meta) } func resourceAwsLoadBalancerPolicyDelete(d *schema.ResourceData, meta interface{}) error { @@ -204,6 +204,8 @@ func resourceAwsLoadBalancerPolicyDelete(d *schema.ResourceData, meta interface{ if _, err := elbconn.DeleteLoadBalancerPolicy(request); err != nil { return fmt.Errorf("Error deleting Load Balancer Policy %s: %s", d.Id(), err) } + + d.SetId("") return nil } From 7036d9e1c40d53dfa8926058bd51caa105abca9d Mon Sep 17 00:00:00 2001 From: "Ernest W. Durbin III" Date: Sun, 7 Aug 2016 23:19:57 -0400 Subject: [PATCH 0585/1238] d.SetId before return, may be up too late. pass `go tool vet` (great resource by the way) --- .../aws/resource_aws_load_balancer_backend_server_policy.go | 2 +- .../providers/aws/resource_aws_load_balancer_listener_policy.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/builtin/providers/aws/resource_aws_load_balancer_backend_server_policy.go b/builtin/providers/aws/resource_aws_load_balancer_backend_server_policy.go index d3759ffcb..325c4fd1a 100644 --- a/builtin/providers/aws/resource_aws_load_balancer_backend_server_policy.go +++ b/builtin/providers/aws/resource_aws_load_balancer_backend_server_policy.go @@ -77,8 +77,8 @@ func resourceAwsLoadBalancerBackendServerPoliciesRead(d *schema.ResourceData, me if err != nil { if ec2err, ok := err.(awserr.Error); ok { if ec2err.Code() == "LoadBalancerNotFound" { - return fmt.Errorf("LoadBalancerNotFound: %s", err) d.SetId("") + return fmt.Errorf("LoadBalancerNotFound: %s", err) } } return fmt.Errorf("Error retrieving ELB description: %s", err) diff --git a/builtin/providers/aws/resource_aws_load_balancer_listener_policy.go b/builtin/providers/aws/resource_aws_load_balancer_listener_policy.go index 2a13be1c3..d1c8cacbb 100644 --- a/builtin/providers/aws/resource_aws_load_balancer_listener_policy.go +++ b/builtin/providers/aws/resource_aws_load_balancer_listener_policy.go @@ -77,8 +77,8 @@ func resourceAwsLoadBalancerListenerPoliciesRead(d *schema.ResourceData, meta in if err != nil { if ec2err, ok := err.(awserr.Error); ok { if ec2err.Code() == "LoadBalancerNotFound" { - return fmt.Errorf("LoadBalancerNotFound: %s", err) d.SetId("") + return fmt.Errorf("LoadBalancerNotFound: %s", err) } } return fmt.Errorf("Error retrieving ELB description: %s", err) From d9277a71be82f41bc9c39c8ec2ba0e5eff920631 Mon Sep 17 00:00:00 2001 From: stack72 Date: Mon, 8 Aug 2016 15:39:27 +1200 Subject: [PATCH 0586/1238] provider/azurerm: Addition of documentation for the new Storage Blob upload options --- .../docs/providers/azurerm/r/storage_blob.html.markdown | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/website/source/docs/providers/azurerm/r/storage_blob.html.markdown b/website/source/docs/providers/azurerm/r/storage_blob.html.markdown index c216c0633..9c8a3e7ea 100644 --- a/website/source/docs/providers/azurerm/r/storage_blob.html.markdown +++ b/website/source/docs/providers/azurerm/r/storage_blob.html.markdown @@ -60,7 +60,13 @@ The following arguments are supported: * `type` - (Required) The type of the storage blob to be created. One of either `block` or `page`. -* `size` - (Optional) Used only for `page` blobs to specify the size in bytes of the blob to be created. Must be a multiple of 512. Defaults to 0. +* `size` - (Optional) Used only for `page` blobs to specify the size in bytes of the blob to be created. Must be a multiple of 512. Defaults to 0. + +* `source` - (Optional) An absolute path to a file on the local system + +* `parallelism` - (Optional) The number of workers per CPU core to run for concurrent uploads. Defaults to `8`. + +* `attempts` - (Optional) The number of attempts to make per page or block when uploading. Defaults to `1`. ## Attributes Reference From f233003ded2bebd5fd52bf151df268b4310cb8f8 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 8 Aug 2016 15:41:45 +1200 Subject: [PATCH 0587/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8621fb422..2cbea0069 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ IMPROVEMENTS * provider/aws: Change the way ARNs are built [GH-7151] * provider/aws: Add support for Elasticsearch destination to firehose delivery streams [GH-7839] * provider/aws: Retry AttachInternetGateway and increase timeout on `aws_internet_gateway` [GH-7891] + * provider/azurerm: Adds support for uploading blobs to azure storage from local source [GH-7994] * provider/google: allows atomic Cloud DNS record changes [GH-6575] * provider/google: Move URLMap hosts to TypeSet from TypeList [GH-7472] * provider/google: Support static private IP addresses in `resource_compute_instance` [GH-6310] From a5f3deb7ec629bf887b772ef00bf94d3a2cb261a Mon Sep 17 00:00:00 2001 From: Linda Xu Date: Sun, 7 Aug 2016 23:43:47 -0700 Subject: [PATCH 0588/1238] Aurora Enhanced monitoring support --- .../aws/resource_aws_rds_cluster_instance.go | 40 ++++++++++ .../resource_aws_rds_cluster_instance_test.go | 80 +++++++++++++++++++ .../aws/r/rds_cluster_instance.html.markdown | 4 + 3 files changed, 124 insertions(+) diff --git a/builtin/providers/aws/resource_aws_rds_cluster_instance.go b/builtin/providers/aws/resource_aws_rds_cluster_instance.go index 745674c43..83b959a02 100644 --- a/builtin/providers/aws/resource_aws_rds_cluster_instance.go +++ b/builtin/providers/aws/resource_aws_rds_cluster_instance.go @@ -97,6 +97,18 @@ func resourceAwsRDSClusterInstance() *schema.Resource { ForceNew: true, }, + "monitoring_role_arn": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + + "monitoring_interval": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 0, + }, + "tags": tagsSchema(), }, } @@ -128,6 +140,14 @@ func resourceAwsRDSClusterInstanceCreate(d *schema.ResourceData, meta interface{ createOpts.DBSubnetGroupName = aws.String(attr.(string)) } + if attr, ok := d.GetOk("monitoring_role_arn"); ok { + createOpts.MonitoringRoleArn = aws.String(attr.(string)) + } + + if attr, ok := d.GetOk("monitoring_interval"); ok { + createOpts.MonitoringInterval = aws.Int64(int64(attr.(int))) + } + log.Printf("[DEBUG] Creating RDS DB Instance opts: %s", createOpts) resp, err := conn.CreateDBInstance(createOpts) if err != nil { @@ -207,6 +227,14 @@ func resourceAwsRDSClusterInstanceRead(d *schema.ResourceData, meta interface{}) d.Set("identifier", db.DBInstanceIdentifier) d.Set("storage_encrypted", db.StorageEncrypted) + if db.MonitoringInterval != nil { + d.Set("monitoring_interval", db.MonitoringInterval) + } + + if db.MonitoringRoleArn != nil { + d.Set("monitoring_role_arn", db.MonitoringRoleArn) + } + if len(db.DBParameterGroups) > 0 { d.Set("db_parameter_group_name", db.DBParameterGroups[0].DBParameterGroupName) } @@ -245,6 +273,18 @@ func resourceAwsRDSClusterInstanceUpdate(d *schema.ResourceData, meta interface{ } + if d.HasChange("monitoring_role_arn") { + d.SetPartial("monitoring_role_arn") + req.MonitoringRoleArn = aws.String(d.Get("monitoring_role_arn").(string)) + requestUpdate = true + } + + if d.HasChange("monitoring_interval") { + d.SetPartial("monitoring_interval") + req.MonitoringInterval = aws.Int64(int64(d.Get("monitoring_interval").(int))) + requestUpdate = true + } + log.Printf("[DEBUG] Send DB Instance Modification request: %#v", requestUpdate) if requestUpdate { log.Printf("[DEBUG] DB Instance Modification request: %#v", req) diff --git a/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go b/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go index 81e79f488..74a87c1d3 100644 --- a/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go +++ b/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go @@ -187,6 +187,25 @@ func testAccCheckAWSClusterInstanceExists(n string, v *rds.DBInstance) resource. } } +func testAccAWSCluster_with_InstanceEnhancedMonitor(t *testing.T) { + var v rds.DBInstance + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSClusterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSClusterInstanceEnhancedMonitor(acctest.RandInt()), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSClusterInstanceExists("aws_rds_cluster_instance.cluster_instances", &v), + testAccCheckAWSDBClusterInstanceAttributes(&v), + ), + }, + }, + }) +} + // Add some random to the name, to avoid collision func testAccAWSClusterInstanceConfig(n int) string { return fmt.Sprintf(` @@ -281,3 +300,64 @@ resource "aws_db_parameter_group" "bar" { } `, n, n, n, n) } + +func testAccAWSClusterInstanceEnhancedMonitor(n int) string { + return fmt.Sprintf(` +resource "aws_rds_cluster" "default" { + cluster_identifier = "tf-aurora-cluster-test-%d" + availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"] + database_name = "mydb" + master_username = "foo" + master_password = "mustbeeightcharaters" +} + +resource "aws_rds_cluster_instance" "cluster_instances" { + identifier = "tf-cluster-instance-%d" + cluster_identifier = "${aws_rds_cluster.default.id}" + instance_class = "db.r3.large" + db_parameter_group_name = "${aws_db_parameter_group.bar.name}" + monitoring_interval = "0" + monitoring_role_arn = "${aws_iam_role.tf_enhanced_monitor_role}" +} + +resource "aws_iam_role" "tf_enhanced_monitor_role" { + name = "tf_enhanced_monitor_role-%d" + assume_role_policy = < Date: Mon, 8 Aug 2016 19:05:54 +1200 Subject: [PATCH 0589/1238] provider/aws: `aws_s3_bucket` acceleration_status not available in china (#7999) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit or us-gov Fixes #7969 `acceleration_status` is not available in China or US-Gov data centers. Even querying for this will give the following: ``` Error refreshing state: 1 error(s) occurred: 2016/08/04 13:58:52 [DEBUG] plugin: waiting for all plugin processes to complete... * aws_s3_bucket.registry_cn: UnsupportedArgument: The request contained * an unsupported argument. status code: 400, request id: F74BA6AA0985B103 ``` We are going to stop any Read calls for acceleration status from these data centers ``` % make testacc TEST=./builtin/providers/aws % TESTARGS='-run=TestAccAWSS3Bucket_' ✹ ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSS3Bucket_ -timeout 120m === RUN TestAccAWSS3Bucket_Notification --- PASS: TestAccAWSS3Bucket_Notification (409.46s) === RUN TestAccAWSS3Bucket_NotificationWithoutFilter --- PASS: TestAccAWSS3Bucket_NotificationWithoutFilter (166.84s) === RUN TestAccAWSS3Bucket_basic --- PASS: TestAccAWSS3Bucket_basic (133.48s) === RUN TestAccAWSS3Bucket_acceleration --- PASS: TestAccAWSS3Bucket_acceleration (282.06s) === RUN TestAccAWSS3Bucket_Policy --- PASS: TestAccAWSS3Bucket_Policy (332.14s) === RUN TestAccAWSS3Bucket_UpdateAcl --- PASS: TestAccAWSS3Bucket_UpdateAcl (225.96s) === RUN TestAccAWSS3Bucket_Website_Simple --- PASS: TestAccAWSS3Bucket_Website_Simple (358.15s) === RUN TestAccAWSS3Bucket_WebsiteRedirect --- PASS: TestAccAWSS3Bucket_WebsiteRedirect (380.38s) === RUN TestAccAWSS3Bucket_WebsiteRoutingRules --- PASS: TestAccAWSS3Bucket_WebsiteRoutingRules (258.29s) === RUN TestAccAWSS3Bucket_shouldFailNotFound --- PASS: TestAccAWSS3Bucket_shouldFailNotFound (92.24s) === RUN TestAccAWSS3Bucket_Versioning --- PASS: TestAccAWSS3Bucket_Versioning (654.19s) === RUN TestAccAWSS3Bucket_Cors --- PASS: TestAccAWSS3Bucket_Cors (143.58s) === RUN TestAccAWSS3Bucket_Logging --- PASS: TestAccAWSS3Bucket_Logging (249.79s) === RUN TestAccAWSS3Bucket_Lifecycle --- PASS: TestAccAWSS3Bucket_Lifecycle (259.87s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 3946.464s ``` thanks to @kwilczynski and @radeksimko for the research on how to handle the generic errors here Running these over a 4G tethering connection has been painful :) --- builtin/providers/aws/resource_aws_s3_bucket.go | 12 ++++++++++-- .../docs/providers/aws/r/s3_bucket.html.markdown | 2 ++ 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/builtin/providers/aws/resource_aws_s3_bucket.go b/builtin/providers/aws/resource_aws_s3_bucket.go index 29ac708f5..840b00d09 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket.go +++ b/builtin/providers/aws/resource_aws_s3_bucket.go @@ -556,9 +556,17 @@ func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error { }) log.Printf("[DEBUG] S3 bucket: %s, read Acceleration: %v", d.Id(), accelerate) if err != nil { - return err + // Amazon S3 Transfer Acceleration might not be supported in the + // given region, for example, China (Beijing) and the Government + // Cloud does not support this feature at the moment. + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() != "UnsupportedArgument" { + return err + } + log.Printf("[WARN] S3 bucket: %s, the S3 Transfer Accelaration is not supported in the region: %s", d.Id(), meta.(*AWSClient).region) + } else { + log.Printf("[DEBUG] S3 bucket: %s, read Acceleration: %v", d.Id(), accelerate) + d.Set("acceleration_status", accelerate.Status) } - d.Set("acceleration_status", accelerate.Status) // Read the logging configuration logging, err := s3conn.GetBucketLogging(&s3.GetBucketLoggingInput{ diff --git a/website/source/docs/providers/aws/r/s3_bucket.html.markdown b/website/source/docs/providers/aws/r/s3_bucket.html.markdown index c8445a5b0..821a48192 100644 --- a/website/source/docs/providers/aws/r/s3_bucket.html.markdown +++ b/website/source/docs/providers/aws/r/s3_bucket.html.markdown @@ -174,6 +174,8 @@ The following arguments are supported: * `lifecycle_rule` - (Optional) A configuration of [object lifecycle management](http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) (documented below). * `acceleration_status` - (Optional) Sets the accelerate configuration of an existing bucket. Can be `Enabled` or `Suspended`. +~> **NOTE:** You cannot use `acceleration_status` in `cn-north-1` or `us-gov-west-1` + The `website` object supports the following: * `index_document` - (Required, unless using `redirect_all_requests_to`) Amazon S3 returns this index document when requests are made to the root domain or any of the subfolders. From 107e935c97e2c2c6f90cf293dce276911daf9b2d Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Mon, 8 Aug 2016 08:07:10 +0100 Subject: [PATCH 0590/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2cbea0069..12951164e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ IMPROVEMENTS BUG FIXES: * provider/aws: guard against missing image_digest in `aws_ecs_task_definition` [GH-7966] * provider/aws: `aws_cloudformation_stack` now respects `timeout_in_minutes` field when waiting for CF API to finish an update operation [GH-7997] + * provider/aws: Prevent errors when `aws_s3_bucket` `acceleration_status` is not available in a given region [GH-7999] * provider/aws: Add state filter to `aws_availability_zone`s data source [GH-7965] * provider/aws: Handle lack of snapshot ID for a volume in `ami_copy` [GH-7995] * provider/aws: Retry association of IAM Role & instance profile [GH-7938] From a6d79802804c6d96c3592c08de311b30d0fe9462 Mon Sep 17 00:00:00 2001 From: Linda Xu Date: Mon, 8 Aug 2016 01:15:22 -0700 Subject: [PATCH 0591/1238] fix testAccAWSClusterInstanceEnhancedMonitor function --- .../aws/resource_aws_rds_cluster_instance_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go b/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go index 74a87c1d3..212fab1e5 100644 --- a/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go +++ b/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go @@ -187,7 +187,7 @@ func testAccCheckAWSClusterInstanceExists(n string, v *rds.DBInstance) resource. } } -func testAccAWSCluster_with_InstanceEnhancedMonitor(t *testing.T) { +func TestAccAWSCluster_withInstanceEnhancedMonitor(t *testing.T) { var v rds.DBInstance resource.Test(t, resource.TestCase{ @@ -316,14 +316,14 @@ resource "aws_rds_cluster_instance" "cluster_instances" { cluster_identifier = "${aws_rds_cluster.default.id}" instance_class = "db.r3.large" db_parameter_group_name = "${aws_db_parameter_group.bar.name}" - monitoring_interval = "0" - monitoring_role_arn = "${aws_iam_role.tf_enhanced_monitor_role}" + monitoring_interval = "60" + monitoring_role_arn = "${aws_iam_role.tf_enhanced_monitor_role.arn}" } resource "aws_iam_role" "tf_enhanced_monitor_role" { name = "tf_enhanced_monitor_role-%d" assume_role_policy = < Date: Mon, 8 Aug 2016 18:16:30 +0200 Subject: [PATCH 0592/1238] Updated API GW integration response documentation ### Explanation for this change Recently, I've been using Terraform to manage AWS API GWs with Lambda backends. It appears that an explicit dependency is required. Not setting it would lead to this error: ``` [...] Error creating API Gateway Integration Response: NotFoundException: No integration defined for method ``` Thus, I found the thread below which exposes the problem too. Relevant Terraform version: checked against 0.6.16 Thread issue: https://github.com/hashicorp/terraform/issues/6128 --- .../aws/r/api_gateway_integration_response.html.markdown | 3 +++ 1 file changed, 3 insertions(+) diff --git a/website/source/docs/providers/aws/r/api_gateway_integration_response.html.markdown b/website/source/docs/providers/aws/r/api_gateway_integration_response.html.markdown index 1db32a92a..8e13aeabd 100644 --- a/website/source/docs/providers/aws/r/api_gateway_integration_response.html.markdown +++ b/website/source/docs/providers/aws/r/api_gateway_integration_response.html.markdown @@ -10,6 +10,9 @@ description: |- Provides an HTTP Method Integration Response for an API Gateway Resource. +-> **Note:** Depends on having `aws_api_gateway_integration` inside your rest api. To ensure this +you might need to add an explicit `depends_on` for clean runs. + ## Example Usage ``` From 4398304b9328d8b6517fdb342837b439dc7fbbb9 Mon Sep 17 00:00:00 2001 From: Seth Vargo Date: Mon, 8 Aug 2016 12:42:46 -0400 Subject: [PATCH 0593/1238] Update links to serf --- website/source/intro/hashicorp-ecosystem.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/intro/hashicorp-ecosystem.html.markdown b/website/source/intro/hashicorp-ecosystem.html.markdown index a1ac56644..66390245c 100644 --- a/website/source/intro/hashicorp-ecosystem.html.markdown +++ b/website/source/intro/hashicorp-ecosystem.html.markdown @@ -25,6 +25,6 @@ Below are summaries of HashiCorp’s open source projects and a graphic showing [Consul](https://www.consul.io/?utm_source=terraform&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for service discovery, service registry, and health checks. In the Atlas workflow Consul is configured at the Packer build stage and identifies the service(s) contained in each artifact. Since Consul is configured at the build phase with Packer, when the artifact is deployed with Terraform, it is fully configured with dependencies and service discovery pre-baked. This greatly reduces the risk of an unhealthy node in production due to configuration failure at runtime. -[Serf](https://www.serfdom.io/?utm_source=terraform&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for cluster membership and failure detection. Consul uses Serf’s gossip protocol as the foundation for service discovery. +[Serf](https://www.serf.io/?utm_source=terraform&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for cluster membership and failure detection. Consul uses Serf’s gossip protocol as the foundation for service discovery. [Vagrant](https://www.vagrantup.com/?utm_source=terraform&utm_campaign=HashicorpEcosystem) is a HashiCorp tool for managing development environments that mirror production. Vagrant environments reduce the friction of developing a project and reduce the risk of unexpected behavior appearing after deployment. Vagrant boxes can be built in parallel with production artifacts with Packer to maintain parity between development and production. From f8ee778c3a417e8efba5cd6e8a9fb5c9a426b9c8 Mon Sep 17 00:00:00 2001 From: Dan Allegood Date: Mon, 8 Aug 2016 12:59:32 -0700 Subject: [PATCH 0594/1238] Adding disk type of Thick Lazy (#7916) --- .../vsphere/resource_vsphere_virtual_disk.go | 16 +++- .../resource_vsphere_virtual_machine.go | 20 ++++- .../resource_vsphere_virtual_machine_test.go | 79 +++++++++++++++---- .../vsphere/r/virtual_machine.html.markdown | 2 +- 4 files changed, 95 insertions(+), 22 deletions(-) diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_disk.go b/builtin/providers/vsphere/resource_vsphere_virtual_disk.go index 5505e17cd..6ae14e983 100644 --- a/builtin/providers/vsphere/resource_vsphere_virtual_disk.go +++ b/builtin/providers/vsphere/resource_vsphere_virtual_disk.go @@ -49,9 +49,9 @@ func resourceVSphereVirtualDisk() *schema.Resource { Default: "eagerZeroedThick", ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { value := v.(string) - if value != "thin" && value != "eagerZeroedThick" { + if value != "thin" && value != "eagerZeroedThick" && value != "lazy" { errors = append(errors, fmt.Errorf( - "only 'thin' and 'eagerZeroedThick' are supported values for 'type'")) + "only 'thin', 'eagerZeroedThick', and 'lazy' are supported values for 'type'")) } return }, @@ -234,11 +234,21 @@ func resourceVSphereVirtualDiskDelete(d *schema.ResourceData, meta interface{}) // createHardDisk creates a new Hard Disk. func createHardDisk(client *govmomi.Client, size int, diskPath string, diskType string, adapterType string, dc string) error { + var vDiskType string + switch diskType { + case "thin": + vDiskType = "thin" + case "eagerZeroedThick": + vDiskType = "eagerZeroedThick" + case "lazy": + vDiskType = "preallocated" + } + virtualDiskManager := object.NewVirtualDiskManager(client.Client) spec := &types.FileBackedVirtualDiskSpec{ VirtualDiskSpec: types.VirtualDiskSpec{ AdapterType: adapterType, - DiskType: diskType, + DiskType: vDiskType, }, CapacityKb: int64(1024 * 1024 * size), } diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go index d8e907212..3de90d502 100644 --- a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go +++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go @@ -380,9 +380,9 @@ func resourceVSphereVirtualMachine() *schema.Resource { Default: "eager_zeroed", ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { value := v.(string) - if value != "thin" && value != "eager_zeroed" { + if value != "thin" && value != "eager_zeroed" && value != "lazy" { errors = append(errors, fmt.Errorf( - "only 'thin' and 'eager_zeroed' are supported values for 'type'")) + "only 'thin', 'eager_zeroed', and 'lazy' are supported values for 'type'")) } return }, @@ -580,8 +580,15 @@ func resourceVSphereVirtualMachineUpdate(d *schema.ResourceData, meta interface{ return fmt.Errorf("[ERROR] resourceVSphereVirtualMachineUpdate - Neither vmdk path nor vmdk name was given") } + var initType string + if disk["type"] != "" { + initType = disk["type"].(string) + } else { + initType = "thin" + } + log.Printf("[INFO] Attaching disk: %v", diskPath) - err = addHardDisk(vm, size, iops, "thin", datastore, diskPath, controller_type) + err = addHardDisk(vm, size, iops, initType, datastore, diskPath, controller_type) if err != nil { log.Printf("[ERROR] Add Hard Disk Failed: %v", err) return err @@ -1298,6 +1305,10 @@ func addHardDisk(vm *object.VirtualMachine, size, iops int64, diskType string, d // eager zeroed thick virtual disk backing.ThinProvisioned = types.NewBool(false) backing.EagerlyScrub = types.NewBool(true) + } else if diskType == "lazy" { + // lazy zeroed thick virtual disk + backing.ThinProvisioned = types.NewBool(false) + backing.EagerlyScrub = types.NewBool(false) } else if diskType == "thin" { // thin provisioned virtual disk backing.ThinProvisioned = types.NewBool(true) @@ -1477,6 +1488,7 @@ func buildVMRelocateSpec(rp *object.ResourcePool, ds *object.Datastore, vm *obje } isThin := initType == "thin" + eagerScrub := initType == "eager_zeroed" rpr := rp.Reference() dsr := ds.Reference() return types.VirtualMachineRelocateSpec{ @@ -1489,7 +1501,7 @@ func buildVMRelocateSpec(rp *object.ResourcePool, ds *object.Datastore, vm *obje DiskBackingInfo: &types.VirtualDiskFlatVer2BackingInfo{ DiskMode: "persistent", ThinProvisioned: types.NewBool(isThin), - EagerlyScrub: types.NewBool(!isThin), + EagerlyScrub: types.NewBool(eagerScrub), }, DiskId: key, }, diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go index 5a1dafe24..a8c4602ab 100644 --- a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go +++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go @@ -398,36 +398,36 @@ func TestAccVSphereVirtualMachine_diskSCSICapacity(t *testing.T) { }) } -const testAccCheckVSphereVirtualMachineConfig_initType = ` -resource "vsphere_virtual_machine" "thin" { +const testAccCheckVSphereVirtualMachineConfig_initTypeEager = ` +resource "vsphere_virtual_machine" "thickEagerZero" { name = "terraform-test" ` + testAccTemplateBasicBody + ` disk { - size = 1 - iops = 500 - controller_type = "scsi" - name = "one" + size = 1 + iops = 500 + controller_type = "scsi" + name = "one" } disk { - size = 1 - controller_type = "ide" - type = "eager_zeroed" - name = "two" + size = 1 + controller_type = "ide" + type = "eager_zeroed" + name = "two" } } ` -func TestAccVSphereVirtualMachine_diskInitType(t *testing.T) { +func TestAccVSphereVirtualMachine_diskInitTypeEager(t *testing.T) { var vm virtualMachine basic_vars := setupTemplateBasicBodyVars() - config := basic_vars.testSprintfTemplateBody(testAccCheckVSphereVirtualMachineConfig_initType) + config := basic_vars.testSprintfTemplateBody(testAccCheckVSphereVirtualMachineConfig_initTypeEager) - vmName := "vsphere_virtual_machine.thin" + vmName := "vsphere_virtual_machine.thickEagerZero" test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label := TestFuncData{vm: vm, label: basic_vars.label, vmName: vmName, numDisks: "3"}.testCheckFuncBasic() - log.Printf("[DEBUG] template= %s", testAccCheckVSphereVirtualMachineConfig_initType) + log.Printf("[DEBUG] template= %s", testAccCheckVSphereVirtualMachineConfig_initTypeEager) log.Printf("[DEBUG] template config= %s", config) resource.Test(t, resource.TestCase{ @@ -449,6 +449,57 @@ func TestAccVSphereVirtualMachine_diskInitType(t *testing.T) { }) } +const testAccCheckVSphereVirtualMachineConfig_initTypeLazy = ` +resource "vsphere_virtual_machine" "lazy" { + name = "terraform-test" +` + testAccTemplateBasicBody + ` + disk { + size = 1 + iops = 500 + controller_type = "scsi" + name = "one" + } + disk { + size = 1 + controller_type = "ide" + type = "lazy" + name = "two" + } +} +` + +func TestAccVSphereVirtualMachine_diskInitTypeLazy(t *testing.T) { + var vm virtualMachine + basic_vars := setupTemplateBasicBodyVars() + config := basic_vars.testSprintfTemplateBody(testAccCheckVSphereVirtualMachineConfig_initTypeLazy) + + vmName := "vsphere_virtual_machine.lazy" + + test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label := + TestFuncData{vm: vm, label: basic_vars.label, vmName: vmName, numDisks: "3"}.testCheckFuncBasic() + + log.Printf("[DEBUG] template= %s", testAccCheckVSphereVirtualMachineConfig_initTypeLazy) + log.Printf("[DEBUG] template config= %s", config) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckVSphereVirtualMachineDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: config, + Check: resource.ComposeTestCheckFunc( + test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label, + // FIXME dynmically calculate the hashes + resource.TestCheckResourceAttr(vmName, "disk.692719290.type", "lazy"), + resource.TestCheckResourceAttr(vmName, "disk.692719290.controller_type", "ide"), + resource.TestCheckResourceAttr(vmName, "disk.531766495.controller_type", "scsi"), + ), + }, + }, + }) +} + const testAccCheckVSphereVirtualMachineConfig_dhcp = ` resource "vsphere_virtual_machine" "bar" { name = "terraform-test" diff --git a/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown b/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown index 5015ac6c4..1ad81742a 100644 --- a/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown +++ b/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown @@ -117,7 +117,7 @@ The `disk` block supports: * `size` - (Required if template and bootable_vmdks_path not provided) Size of this disk (in GB). * `name` - (Required if size is provided when creating a new disk) This "name" is used for the disk file name in vSphere, when the new disk is created. * `iops` - (Optional) Number of virtual iops to allocate for this disk. -* `type` - (Optional) 'eager_zeroed' (the default), or 'thin' are supported options. +* `type` - (Optional) 'eager_zeroed' (the default), 'lazy', or 'thin' are supported options. * `vmdk` - (Required if template and size not provided) Path to a vmdk in a vSphere datastore. * `bootable` - (Optional) Set to 'true' if a vmdk was given and it should attempt to boot after creation. * `controller_type` = (Optional) Controller type to attach the disk to. 'scsi' (the default), or 'ide' are supported options. From 012090f2a3c47969af685eb294b84d3fd8c2ca48 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Tue, 9 Aug 2016 08:00:41 +1200 Subject: [PATCH 0595/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 12951164e..0b5ebe669 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ IMPROVEMENTS * provider/google: Move URLMap hosts to TypeSet from TypeList [GH-7472] * provider/google: Support static private IP addresses in `resource_compute_instance` [GH-6310] * provider/vsphere: Improved SCSI controller handling in `vsphere_virtual_machine` [GH-7908] + * provider/vsphere: Adding disk type of `Thick Lazy` to `vsphere_virtual_disk` and `vsphere_virtual_machine` [GH-7916] BUG FIXES: * provider/aws: guard against missing image_digest in `aws_ecs_task_definition` [GH-7966] From 58a01f2fd518b6d7c11c47088f6ec7f36b9fc734 Mon Sep 17 00:00:00 2001 From: Kevin Crawley Date: Mon, 8 Aug 2016 15:23:21 -0500 Subject: [PATCH 0596/1238] #8051 :: bumped aws_rds_cluster timeout to 40 minutes (#8052) --- builtin/providers/aws/resource_aws_rds_cluster.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_rds_cluster.go b/builtin/providers/aws/resource_aws_rds_cluster.go index 348018000..ccb6328a6 100644 --- a/builtin/providers/aws/resource_aws_rds_cluster.go +++ b/builtin/providers/aws/resource_aws_rds_cluster.go @@ -373,7 +373,7 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error Pending: []string{"creating", "backing-up", "modifying"}, Target: []string{"available"}, Refresh: resourceAwsRDSClusterStateRefreshFunc(d, meta), - Timeout: 15 * time.Minute, + Timeout: 40 * time.Minute, MinTimeout: 3 * time.Second, } From 40980f58952ec9b9e4cd46749ddeed4f53c10b60 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Mon, 8 Aug 2016 21:24:36 +0100 Subject: [PATCH 0597/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0b5ebe669..49ae9df19 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -26,6 +26,7 @@ BUG FIXES: * provider/aws: Retry association of IAM Role & instance profile [GH-7938] * provider/aws: Fix `aws_s3_bucket` resource `redirect_all_requests_to` action [GH-7883] * provider/aws: Fix issue updating ElasticBeanstalk Environment Settings [GH-7777] + * providers/aws: `aws_rds_cluster` creation timeout bumped to 40 minutes [GH-8052] * provider/google: Use resource specific project when making queries/changes [GH-7029] * provider/google: Fix read for the backend service resource [GH-7476] From 92d1cfb890522bcfdb92f911d6f36981150e1863 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Sat, 6 Aug 2016 14:15:13 -0700 Subject: [PATCH 0598/1238] deps: Update github.com/aws/aws-sdk-go/... Update to v1.3.1 tag --- vendor/github.com/aws/aws-sdk-go/Gemfile | 6 + vendor/github.com/aws/aws-sdk-go/Makefile | 152 ++++++ vendor/github.com/aws/aws-sdk-go/README.md | 116 ++++ .../github.com/aws/aws-sdk-go/aws/config.go | 9 +- .../aws/credentials/static_provider.go | 11 +- .../stscreds/assume_role_provider.go | 161 ++++++ .../aws/aws-sdk-go/aws/defaults/defaults.go | 6 +- .../aws/aws-sdk-go/aws/session/doc.go | 229 ++++++++ .../aws/aws-sdk-go/aws/session/env_config.go | 188 +++++++ .../aws/aws-sdk-go/aws/session/session.go | 313 +++++++++-- .../aws-sdk-go/aws/session/shared_config.go | 294 +++++++++++ .../aws/aws-sdk-go/aws/signer/v4/v4.go | 2 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- vendor/github.com/aws/aws-sdk-go/sdk.go | 7 + .../aws-sdk-go/service/cloudwatchlogs/api.go | 140 +++-- .../service/cloudwatchlogs/service.go | 8 +- .../service/directoryservice/api.go | 3 +- .../aws/aws-sdk-go/service/emr/api.go | 155 ++++-- .../aws/aws-sdk-go/service/lambda/api.go | 111 ++-- .../aws/aws-sdk-go/service/rds/api.go | 390 +++++++++++++- vendor/vendor.json | 495 ++++++------------ 21 files changed, 2245 insertions(+), 553 deletions(-) create mode 100644 vendor/github.com/aws/aws-sdk-go/Gemfile create mode 100644 vendor/github.com/aws/aws-sdk-go/Makefile create mode 100644 vendor/github.com/aws/aws-sdk-go/README.md create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/sdk.go diff --git a/vendor/github.com/aws/aws-sdk-go/Gemfile b/vendor/github.com/aws/aws-sdk-go/Gemfile new file mode 100644 index 000000000..2fb295a1a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/Gemfile @@ -0,0 +1,6 @@ +source 'https://rubygems.org' + +gem 'yard', git: 'git://github.com/lsegal/yard', ref: '5025564a491e1b7c6192632cba2802202ca08449' +gem 'yard-go', git: 'git://github.com/jasdel/yard-go', ref: 'e78e1ef7cdf5e0f3266845b26bb4fd64f1dd6f85' +gem 'rdiscount' + diff --git a/vendor/github.com/aws/aws-sdk-go/Makefile b/vendor/github.com/aws/aws-sdk-go/Makefile new file mode 100644 index 000000000..e4b5a4791 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/Makefile @@ -0,0 +1,152 @@ +LINTIGNOREDOT='awstesting/integration.+should not use dot imports' +LINTIGNOREDOC='service/[^/]+/(api|service|waiters)\.go:.+(comment on exported|should have comment or be unexported)' +LINTIGNORECONST='service/[^/]+/(api|service|waiters)\.go:.+(type|struct field|const|func) ([^ ]+) should be ([^ ]+)' +LINTIGNORESTUTTER='service/[^/]+/(api|service)\.go:.+(and that stutters)' +LINTIGNOREINFLECT='service/[^/]+/(api|service)\.go:.+method .+ should be ' +LINTIGNOREINFLECTS3UPLOAD='service/s3/s3manager/upload\.go:.+struct field SSEKMSKeyId should be ' +LINTIGNOREDEPS='vendor/.+\.go' + +SDK_WITH_VENDOR_PKGS=$(shell go list ./... | grep -v "/vendor/src") +SDK_ONLY_PKGS=$(shell go list ./... | grep -v "/vendor/") +SDK_GO_1_4=$(shell go version | grep "go1.4") +SDK_GO_VERSION=$(shell go version | awk '''{print $$3}''' | tr -d '''\n''') + +all: get-deps generate unit + +help: + @echo "Please use \`make ' where is one of" + @echo " api_info to print a list of services and versions" + @echo " docs to build SDK documentation" + @echo " build to go build the SDK" + @echo " unit to run unit tests" + @echo " integration to run integration tests" + @echo " performance to run performance tests" + @echo " verify to verify tests" + @echo " lint to lint the SDK" + @echo " vet to vet the SDK" + @echo " generate to go generate and make services" + @echo " gen-test to generate protocol tests" + @echo " gen-services to generate services" + @echo " get-deps to go get the SDK dependencies" + @echo " get-deps-tests to get the SDK's test dependencies" + @echo " get-deps-verify to get the SDK's verification dependencies" + +generate: gen-test gen-endpoints gen-services + +gen-test: gen-protocol-test + +gen-services: + go generate ./service + +gen-protocol-test: + go generate ./private/protocol/... + +gen-endpoints: + go generate ./private/endpoints + +build: + @echo "go build SDK and vendor packages" + @go build ${SDK_ONLY_PKGS} + +unit: get-deps-tests build verify + @echo "go test SDK and vendor packages" + @go test -tags $(SDK_ONLY_PKGS) + +unit-with-race-cover: get-deps-tests build verify + @echo "go test SDK and vendor packages" + @go test -tags -race -cpu=1,2,4 $(SDK_ONLY_PKGS) + +integration: get-deps-tests integ-custom smoke-tests performance + +integ-custom: + go test -tags "integration" ./awstesting/integration/customizations/... + +smoke-tests: get-deps-tests + gucumber -go-tags "integration" ./awstesting/integration/smoke + +performance: get-deps-tests + AWS_TESTING_LOG_RESULTS=${log-detailed} AWS_TESTING_REGION=$(region) AWS_TESTING_DB_TABLE=$(table) gucumber -go-tags "integration" ./awstesting/performance + +sandbox-tests: sandbox-test-go14 sandbox-test-go15 sandbox-test-go15-novendorexp sandbox-test-go16 sandbox-test-go17 sandbox-test-gotip + +sandbox-test-go14: + docker build -f ./awstesting/sandbox/Dockerfile.test.go1.4 -t "aws-sdk-go-1.4" . + docker run -t aws-sdk-go-1.4 + +sandbox-test-go15: + docker build -f ./awstesting/sandbox/Dockerfile.test.go1.5 -t "aws-sdk-go-1.5" . + docker run -t aws-sdk-go-1.5 + +sandbox-test-go15-novendorexp: + docker build -f ./awstesting/sandbox/Dockerfile.test.go1.5-novendorexp -t "aws-sdk-go-1.5-novendorexp" . + docker run -t aws-sdk-go-1.5-novendorexp + +sandbox-test-go16: + docker build -f ./awstesting/sandbox/Dockerfile.test.go1.6 -t "aws-sdk-go-1.6" . + docker run -t aws-sdk-go-1.6 + +sandbox-test-go17: + docker build -f ./awstesting/sandbox/Dockerfile.test.go1.7 -t "aws-sdk-go-1.7" . + docker run -t aws-sdk-go-1.7 + +sandbox-test-gotip: + @echo "Run make update-aws-golang-tip, if this test fails because missing aws-golang:tip container" + docker build -f ./awstesting/sandbox/Dockerfile.test.gotip -t "aws-sdk-go-tip" . + docker run -t aws-sdk-go-tip + +update-aws-golang-tip: + docker build -f ./awstesting/sandbox/Dockerfile.golang-tip -t "aws-golang:tip" . + +verify: get-deps-verify lint vet + +lint: + @echo "go lint SDK and vendor packages" + @lint=`if [ -z "${SDK_GO_1_4}" ]; then golint ./...; else echo "skipping golint"; fi`; \ + lint=`echo "$$lint" | grep -E -v -e ${LINTIGNOREDOT} -e ${LINTIGNOREDOC} -e ${LINTIGNORECONST} -e ${LINTIGNORESTUTTER} -e ${LINTIGNOREINFLECT} -e ${LINTIGNOREDEPS} -e ${LINTIGNOREINFLECTS3UPLOAD}`; \ + echo "$$lint"; \ + if [ "$$lint" != "" ] && [ "$$lint" != "skipping golint" ]; then exit 1; fi + +SDK_BASE_FOLDERS=$(shell ls -d */ | grep -v vendor | grep -v awsmigrate) +ifneq (,$(findstring go1.5, ${SDK_GO_VERSION})) + GO_VET_CMD=go tool vet --all -shadow +else ifneq (,$(findstring go1.6, ${SDK_GO_VERSION})) + GO_VET_CMD=go tool vet --all -shadow -example=false +else ifneq (,$(findstring devel, ${SDK_GO_VERSION})) + GO_VET_CMD=go tool vet --all -shadow -tests=false +else + GO_VET_CMD=echo skipping go vet, ${SDK_GO_VERSION} +endif + +vet: + ${GO_VET_CMD} ${SDK_BASE_FOLDERS} + +get-deps: get-deps-tests get-deps-verify + @echo "go get SDK dependencies" + @go get -v $(SDK_ONLY_PKGS) + +get-deps-tests: + @echo "go get SDK testing dependencies" + go get github.com/gucumber/gucumber/cmd/gucumber + go get github.com/stretchr/testify + go get github.com/smartystreets/goconvey + +get-deps-verify: + @echo "go get SDK verification utilities" + @if [ -z "${SDK_GO_1_4}" ]; then go get github.com/golang/lint/golint; else echo "skipped getting golint"; fi + +bench: + @echo "go bench SDK packages" + @go test -run NONE -bench . -benchmem -tags 'bench' $(SDK_ONLY_PKGS) + +bench-protocol: + @echo "go bench SDK protocol marshallers" + @go test -run NONE -bench . -benchmem -tags 'bench' ./private/protocol/... + +docs: + @echo "generate SDK docs" + rm -rf doc && bundle install && bundle exec yard + @# This env variable, DOCS, is for internal use + @if [ -n "$(AWS_DOC_GEN_TOOL)" ]; then echo "For internal use. Subject to change."; $(AWS_DOC_GEN_TOOL) `pwd`; fi + +api_info: + @go run private/model/cli/api-info/api-info.go diff --git a/vendor/github.com/aws/aws-sdk-go/README.md b/vendor/github.com/aws/aws-sdk-go/README.md new file mode 100644 index 000000000..b4d302e31 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/README.md @@ -0,0 +1,116 @@ +# AWS SDK for Go + + +[![API Reference](http://img.shields.io/badge/api-reference-blue.svg)](http://docs.aws.amazon.com/sdk-for-go/api) +[![Join the chat at https://gitter.im/aws/aws-sdk-go](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/aws/aws-sdk-go?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Build Status](https://img.shields.io/travis/aws/aws-sdk-go.svg)](https://travis-ci.org/aws/aws-sdk-go) +[![Apache V2 License](http://img.shields.io/badge/license-Apache%20V2-blue.svg)](https://github.com/aws/aws-sdk-go/blob/master/LICENSE.txt) + + +aws-sdk-go is the official AWS SDK for the Go programming language. + +Checkout our [release notes](https://github.com/aws/aws-sdk-go/releases) for information about the latest bug fixes, updates, and features added to the SDK. + +## Installing + +If you are using Go 1.5 with the `GO15VENDOREXPERIMENT=1` vendoring flag, or 1.6 and higher you can use the following command to retrieve the SDK. The SDK's non-testing dependencies will be included and are vendored in the `vendor` folder. + + go get -u github.com/aws/aws-sdk-go + +Otherwise if your Go environment does not have vendoring support enabled, or you do not want to include the vendored SDK's dependencies you can use the following command to retrieve the SDK and its non-testing dependencies using `go get`. + + go get -u github.com/aws/aws-sdk-go/aws/... + go get -u github.com/aws/aws-sdk-go/service/... + +If you're looking to retrieve just the SDK without any dependencies use the following command. + + go get -d github.com/aws/aws-sdk-go/ + +These two processes will still include the `vendor` folder and it should be deleted if its not going to be used by your environment. + + rm -rf $GOPATH/src/github.com/aws/aws-sdk-go/vendor + +## Reference Documentation +[`Getting Started Guide`](https://aws.amazon.com/sdk-for-go/) - This document is a general introduction how to configure and make requests with the SDK. If this is your first time using the SDK, this documentation and the API documentation will help you get started. This document focuses on the syntax and behavior of the SDK. The [Service Developer Guide](https://aws.amazon.com/documentation/) will help you get started using specific AWS services. + +[`SDK API Reference Documentation`](https://docs.aws.amazon.com/sdk-for-go/api/) - Use this document to look up all API operation input and output parameters for AWS services supported by the SDK. The API reference also includes documentation of the SDK, and examples how to using the SDK, service client API operations, and API operation require parameters. + +[`Service Developer Guide`](https://aws.amazon.com/documentation/) - Use this documentation to learn how to interface with an AWS service. These are great guides both, if you're getting started with a service, or looking for more information on a service. You should not need this document for coding, though in some cases, services may supply helpful samples that you might want to look out for. + +[`SDK Examples`](https://github.com/aws/aws-sdk-go/tree/master/example) - Included in the SDK's repo are a several hand crafted examples using the SDK features and AWS services. + +## Configuring Credentials + +Before using the SDK, ensure that you've configured credentials. The best +way to configure credentials on a development machine is to use the +`~/.aws/credentials` file, which might look like: + +``` +[default] +aws_access_key_id = AKID1234567890 +aws_secret_access_key = MY-SECRET-KEY +``` + +You can learn more about the credentials file from this +[blog post](http://blogs.aws.amazon.com/security/post/Tx3D6U6WSFGOK2H/A-New-and-Standardized-Way-to-Manage-Credentials-in-the-AWS-SDKs). + +Alternatively, you can set the following environment variables: + +``` +AWS_ACCESS_KEY_ID=AKID1234567890 +AWS_SECRET_ACCESS_KEY=MY-SECRET-KEY +``` + +### AWS shared config file (`~/.aws/config`) +The AWS SDK for Go added support the shared config file in release [v1.3.0](https://github.com/aws/aws-sdk-go/releases/tag/v1.3.0). You can opt into enabling support for the shared config by setting the environment variable `AWS_SDK_LOAD_CONFIG` to a truthy value. See the [Session](https://github.com/aws/aws-sdk-go/wiki/sessions) wiki for more information about this feature. + +## Using the Go SDK + +To use a service in the SDK, create a service variable by calling the `New()` +function. Once you have a service client, you can call API operations which each +return response data and a possible error. + +To list a set of instance IDs from EC2, you could run: + +```go +package main + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/ec2" +) + +func main() { + // Create an EC2 service object in the "us-west-2" region + // Note that you can also configure your region globally by + // exporting the AWS_REGION environment variable + svc := ec2.New(session.New(), &aws.Config{Region: aws.String("us-west-2")}) + + // Call the DescribeInstances Operation + resp, err := svc.DescribeInstances(nil) + if err != nil { + panic(err) + } + + // resp has all of the response data, pull out instance IDs: + fmt.Println("> Number of reservation sets: ", len(resp.Reservations)) + for idx, res := range resp.Reservations { + fmt.Println(" > Number of instances: ", len(res.Instances)) + for _, inst := range resp.Reservations[idx].Instances { + fmt.Println(" - Instance ID: ", *inst.InstanceId) + } + } +} +``` + +You can find more information and operations in our +[API documentation](http://docs.aws.amazon.com/sdk-for-go/api/). + +## License + +This SDK is distributed under the +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), +see LICENSE.txt and NOTICE.txt for more information. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index d3e889514..16647c808 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -130,11 +130,12 @@ type Config struct { // client to create a new http.Client. This options is only meaningful if you're not // already using a custom HTTP client with the SDK. Enabled by default. // - // Must be set and provided to the session.New() in order to disable the EC2Metadata - // overriding the timeout for default credentials chain. + // Must be set and provided to the session.NewSession() in order to disable + // the EC2Metadata overriding the timeout for default credentials chain. // // Example: - // sess := session.New(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true)) + // sess, err := session.NewSession(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true)) + // // svc := s3.New(sess) // EC2MetadataDisableTimeoutOverride *bool @@ -150,7 +151,7 @@ type Config struct { // NewConfig returns a new Config pointer that can be chained with builder methods to // set multiple configuration values inline without using pointers. // -// sess := session.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10)) +// sess, err := session.NewSession(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10)) // func NewConfig() *Config { return &Config{} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go index 6f075604e..4f5dab3fc 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -30,13 +30,22 @@ func NewStaticCredentials(id, secret, token string) *Credentials { }}) } +// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object +// wrapping the static credentials value provide. Same as NewStaticCredentials +// but takes the creds Value instead of individual fields +func NewStaticCredentialsFromCreds(creds Value) *Credentials { + return NewCredentials(&StaticProvider{Value: creds}) +} + // Retrieve returns the credentials or error if the credentials are invalid. func (s *StaticProvider) Retrieve() (Value, error) { if s.AccessKeyID == "" || s.SecretAccessKey == "" { return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty } - s.Value.ProviderName = StaticProviderName + if len(s.Value.ProviderName) == 0 { + s.Value.ProviderName = StaticProviderName + } return s.Value, nil } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go new file mode 100644 index 000000000..30c847ae2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -0,0 +1,161 @@ +// Package stscreds are credential Providers to retrieve STS AWS credentials. +// +// STS provides multiple ways to retrieve credentials which can be used when making +// future AWS service API operation calls. +package stscreds + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sts" +) + +// ProviderName provides a name of AssumeRole provider +const ProviderName = "AssumeRoleProvider" + +// AssumeRoler represents the minimal subset of the STS client API used by this provider. +type AssumeRoler interface { + AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) +} + +// DefaultDuration is the default amount of time in minutes that the credentials +// will be valid for. +var DefaultDuration = time.Duration(15) * time.Minute + +// AssumeRoleProvider retrieves temporary credentials from the STS service, and +// keeps track of their expiration time. This provider must be used explicitly, +// as it is not included in the credentials chain. +type AssumeRoleProvider struct { + credentials.Expiry + + // STS client to make assume role request with. + Client AssumeRoler + + // Role to be assumed. + RoleARN string + + // Session name, if you wish to reuse the credentials elsewhere. + RoleSessionName string + + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // Optional ExternalID to pass along, defaults to nil if not set. + ExternalID *string + + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + SerialNumber *string + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + TokenCode *string + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes a Config provider to create the STS client. The ConfigProvider is +// satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: sts.New(c), + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes an AssumeRoler which can be satisfiede by the STS client. +func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: svc, + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { + + // Apply defaults where parameters are not set. + if p.RoleSessionName == "" { + // Try to work out a role name that will hopefully end up unique. + p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) + } + if p.Duration == 0 { + // Expire as often as AWS permits. + p.Duration = DefaultDuration + } + input := &sts.AssumeRoleInput{ + DurationSeconds: aws.Int64(int64(p.Duration / time.Second)), + RoleArn: aws.String(p.RoleARN), + RoleSessionName: aws.String(p.RoleSessionName), + ExternalId: p.ExternalID, + } + if p.Policy != nil { + input.Policy = p.Policy + } + if p.SerialNumber != nil && p.TokenCode != nil { + input.SerialNumber = p.SerialNumber + input.TokenCode = p.TokenCode + } + roleOutput, err := p.Client.AssumeRole(input) + + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // We will proactively generate new credentials before they expire. + p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: *roleOutput.Credentials.AccessKeyId, + SecretAccessKey: *roleOutput.Credentials.SecretAccessKey, + SessionToken: *roleOutput.Credentials.SessionToken, + ProviderName: ProviderName, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go index 570417ffa..dccbafbfc 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -90,12 +90,14 @@ func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credenti Providers: []credentials.Provider{ &credentials.EnvProvider{}, &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, - remoteCredProvider(*cfg, handlers), + RemoteCredProvider(*cfg, handlers), }, }) } -func remoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { +// RemoteCredProvider returns a credenitials provider for the default remote +// endpoints such as EC2 or ECS Roles. +func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { ecsCredURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI") if len(ecsCredURI) > 0 { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go new file mode 100644 index 000000000..4ad78d7f3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -0,0 +1,229 @@ +/* +Package session provides configuration for the SDK's service clients. + +Sessions can be shared across all service clients that share the same base +configuration. The Session is built from the SDK's default configuration and +request handlers. + +Sessions should be cached when possible, because creating a new Session will +load all configuration values from the environment, and config files each time +the Session is created. Sharing the Session value across all of your service +clients will ensure the configuration is loaded the fewest number of times possible. + +Concurrency + +Sessions are safe to use concurrently as long as the Session is not being +modified. The SDK will not modify the Session once the Session has been created. +Creating service clients concurrently from a shared Session is safe. + +Sessions from Shared Config + +Sessions can be created using the method above that will only load the +additional config if the AWS_SDK_LOAD_CONFIG environment variable is set. +Alternatively you can explicitly create a Session with shared config enabled. +To do this you can use NewSessionWithOptions to configure how the Session will +be created. Using the NewSessionWithOptions with SharedConfigState set to +SharedConfigEnabled will create the session as if the AWS_SDK_LOAD_CONFIG +environment variable was set. + +Creating Sessions + +When creating Sessions optional aws.Config values can be passed in that will +override the default, or loaded config values the Session is being created +with. This allows you to provide additional, or case based, configuration +as needed. + +By default NewSession will only load credentials from the shared credentials +file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is +set to a truthy value the Session will be created from the configuration +values from the shared config (~/.aws/config) and shared credentials +(~/.aws/credentials) files. See the section Sessions from Shared Config for +more information. + +Create a Session with the default config and request handlers. With credentials +region, and profile loaded from the environment and shared config automatically. +Requires the AWS_PROFILE to be set, or "default" is used. + + // Create Session + sess, err := session.NewSession() + + // Create a Session with a custom region + sess, err := session.NewSession(&aws.Config{Region: aws.String("us-east-1")}) + + // Create a S3 client instance from a session + sess, err := session.NewSession() + if err != nil { + // Handle Session creation error + } + svc := s3.New(sess) + +Create Session With Option Overrides + +In addition to NewSession, Sessions can be created using NewSessionWithOptions. +This func allows you to control and override how the Session will be created +through code instead of being driven by environment variables only. + +Use NewSessionWithOptions when you want to provide the config profile, or +override the shared config state (AWS_SDK_LOAD_CONFIG). + + // Equivalent to session.New + sess, err := session.NewSessionWithOptions(session.Options{}) + + // Specify profile to load for the session's config + sess, err := session.NewSessionWithOptions(session.Options{ + Profile: "profile_name", + }) + + // Specify profile for config and region for requests + sess, err := session.NewSessionWithOptions(session.Options{ + Config: aws.Config{Region: aws.String("us-east-1")}, + Profile: "profile_name", + }) + + // Force enable Shared Config support + sess, err := session.NewSessionWithOptions(session.Options{ + SharedConfigState: SharedConfigEnable, + }) + +Deprecated "New" function + +The New session function has been deprecated because it does not provide good +way to return errors that occur when loading the configuration files and values. +Because of this, the NewWithError + +Adding Handlers + +You can add handlers to a session for processing HTTP requests. All service +clients that use the session inherit the handlers. For example, the following +handler logs every request and its payload made by a service client: + + // Create a session, and add additional handlers for all service + // clients created with the Session to inherit. Adds logging handler. + sess, err := session.NewSession() + sess.Handlers.Send.PushFront(func(r *request.Request) { + // Log every request made and its payload + logger.Println("Request: %s/%s, Payload: %s", + r.ClientInfo.ServiceName, r.Operation, r.Params) + }) + +Deprecated "New" function + +The New session function has been deprecated because it does not provide good +way to return errors that occur when loading the configuration files and values. +Because of this, NewSession was created so errors can be retrieved when +creating a session fails. + +Shared Config Fields + +By default the SDK will only load the shared credentials file's (~/.aws/credentials) +credentials values, and all other config is provided by the environment variables, +SDK defaults, and user provided aws.Config values. + +If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable +option is used to create the Session the full shared config values will be +loaded. This includes credentials, region, and support for assume role. In +addition the Session will load its configuration from both the shared config +file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both +files have the same format. + +If both config files are present the configuration from both files will be +read. The Session will be created from configuration values from the shared +credentials file (~/.aws/credentials) over those in the shared credentials +file (~/.aws/config). + +Credentials are the values the SDK should use for authenticating requests with +AWS Services. They arfrom a configuration file will need to include both +aws_access_key_id and aws_secret_access_key must be provided together in the +same file to be considered valid. The values will be ignored if not a complete +group. aws_session_token is an optional field that can be provided if both of +the other two fields are also provided. + + aws_access_key_id = AKID + aws_secret_access_key = SECRET + aws_session_token = TOKEN + +Assume Role values allow you to configure the SDK to assume an IAM role using +a set of credentials provided in a config file via the source_profile field. +Both "role_arn" and "source_profile" are required. The SDK does not support +assuming a role with MFA token Via the Session's constructor. You can use the +stscreds.AssumeRoleProvider credentials provider to specify custom +configuration and support for MFA. + + role_arn = arn:aws:iam:::role/ + source_profile = profile_with_creds + external_id = 1234 + mfa_serial = not supported! + role_session_name = session_name + +Region is the region the SDK should use for looking up AWS service endpoints +and signing requests. + + region = us-east-1 + +Environment Variables + +When a Session is created several environment variables can be set to adjust +how the SDK functions, and what configuration data it loads when creating +Sessions. All environment values are optional, but some values like credentials +require multiple of the values to set or the partial values will be ignored. +All environment variable values are strings unless otherwise noted. + +Environment configuration values. If set both Access Key ID and Secret Access +Key must be provided. Session Token and optionally also be provided, but is +not required. + + # Access Key ID + AWS_ACCESS_KEY_ID=AKID + AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + + # Secret Access Key + AWS_SECRET_ACCESS_KEY=SECRET + AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + + # Session Token + AWS_SESSION_TOKEN=TOKEN + +Region value will instruct the SDK where to make service API requests to. If is +not provided in the environment the region must be provided before a service +client request is made. + + AWS_REGION=us-east-1 + + # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_REGION is not also set. + AWS_DEFAULT_REGION=us-east-1 + +Profile name the SDK should load use when loading shared config from the +configuration files. If not provided "default" will be used as the profile name. + + AWS_PROFILE=my_profile + + # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_PROFILE is not also set. + AWS_DEFAULT_PROFILE=my_profile + +SDK load config instructs the SDK to load the shared config in addition to +shared credentials. This also expands the configuration loaded so the shared +credentials will have parity with the shared config file. This also enables +Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE +env values as well. + + AWS_SDK_LOAD_CONFIG=1 + +Shared credentials file path can be set to instruct the SDK to use an alternative +file for the shared credentials. If not set the file will be loaded from +$HOME/.aws/credentials on Linux/Unix based systems, and +%USERPROFILE%\.aws\credentials on Windows. + + AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + +Shared config file path can be set to instruct the SDK to use an alternative +file for the shared config. If not set the file will be loaded from +$HOME/.aws/config on Linux/Unix based systems, and +%USERPROFILE%\.aws\config on Windows. + + AWS_CONFIG_FILE=$HOME/my_shared_config + + +*/ +package session diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go new file mode 100644 index 000000000..d2f0c8448 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -0,0 +1,188 @@ +package session + +import ( + "os" + "path/filepath" + "strconv" + + "github.com/aws/aws-sdk-go/aws/credentials" +) + +// envConfig is a collection of environment values the SDK will read +// setup config from. All environment values are optional. But some values +// such as credentials require multiple values to be complete or the values +// will be ignored. +type envConfig struct { + // Environment configuration values. If set both Access Key ID and Secret Access + // Key must be provided. Session Token and optionally also be provided, but is + // not required. + // + // # Access Key ID + // AWS_ACCESS_KEY_ID=AKID + // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + // + // # Secret Access Key + // AWS_SECRET_ACCESS_KEY=SECRET + // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + // + // # Session Token + // AWS_SESSION_TOKEN=TOKEN + Creds credentials.Value + + // Region value will instruct the SDK where to make service API requests to. If is + // not provided in the environment the region must be provided before a service + // client request is made. + // + // AWS_REGION=us-east-1 + // + // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_REGION is not also set. + // AWS_DEFAULT_REGION=us-east-1 + Region string + + // Profile name the SDK should load use when loading shared configuration from the + // shared configuration files. If not provided "default" will be used as the + // profile name. + // + // AWS_PROFILE=my_profile + // + // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_PROFILE is not also set. + // AWS_DEFAULT_PROFILE=my_profile + Profile string + + // SDK load config instructs the SDK to load the shared config in addition to + // shared credentials. This also expands the configuration loaded from the shared + // credentials to have parity with the shared config file. This also enables + // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE + // env values as well. + // + // AWS_SDK_LOAD_CONFIG=1 + EnableSharedConfig bool + + // Shared credentials file path can be set to instruct the SDK to use an alternate + // file for the shared credentials. If not set the file will be loaded from + // $HOME/.aws/credentials on Linux/Unix based systems, and + // %USERPROFILE%\.aws\credentials on Windows. + // + // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + SharedCredentialsFile string + + // Shared config file path can be set to instruct the SDK to use an alternate + // file for the shared config. If not set the file will be loaded from + // $HOME/.aws/config on Linux/Unix based systems, and + // %USERPROFILE%\.aws\config on Windows. + // + // AWS_CONFIG_FILE=$HOME/my_shared_config + SharedConfigFile string +} + +var ( + credAccessEnvKey = []string{ + "AWS_ACCESS_KEY_ID", + "AWS_ACCESS_KEY", + } + credSecretEnvKey = []string{ + "AWS_SECRET_ACCESS_KEY", + "AWS_SECRET_KEY", + } + credSessionEnvKey = []string{ + "AWS_SESSION_TOKEN", + } + + regionEnvKeys = []string{ + "AWS_REGION", + "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + profileEnvKeys = []string{ + "AWS_PROFILE", + "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set + } +) + +// loadEnvConfig retrieves the SDK's environment configuration. +// See `envConfig` for the values that will be retrieved. +// +// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value +// the shared SDK config will be loaded in addition to the SDK's specific +// configuration values. +func loadEnvConfig() envConfig { + enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG")) + return envConfigLoad(enableSharedConfig) +} + +// loadEnvSharedConfig retrieves the SDK's environment configuration, and the +// SDK shared config. See `envConfig` for the values that will be retrieved. +// +// Loads the shared configuration in addition to the SDK's specific configuration. +// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG` +// environment variable is set. +func loadSharedEnvConfig() envConfig { + return envConfigLoad(true) +} + +func envConfigLoad(enableSharedConfig bool) envConfig { + cfg := envConfig{} + + cfg.EnableSharedConfig = enableSharedConfig + + setFromEnvVal(&cfg.Creds.AccessKeyID, credAccessEnvKey) + setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey) + setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey) + + // Require logical grouping of credentials + if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 { + cfg.Creds = credentials.Value{} + } else { + cfg.Creds.ProviderName = "EnvConfigCredentials" + } + + regionKeys := regionEnvKeys + profileKeys := profileEnvKeys + if !cfg.EnableSharedConfig { + regionKeys = regionKeys[:1] + profileKeys = profileKeys[:1] + } + + setFromEnvVal(&cfg.Region, regionKeys) + setFromEnvVal(&cfg.Profile, profileKeys) + + cfg.SharedCredentialsFile = sharedCredentialsFilename() + cfg.SharedConfigFile = sharedConfigFilename() + + return cfg +} + +func setFromEnvVal(dst *string, keys []string) { + for _, k := range keys { + if v := os.Getenv(k); len(v) > 0 { + *dst = v + break + } + } +} + +func sharedCredentialsFilename() string { + if name := os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(name) > 0 { + return name + } + + return filepath.Join(userHomeDir(), ".aws", "credentials") +} + +func sharedConfigFilename() string { + if name := os.Getenv("AWS_CONFIG_FILE"); len(name) > 0 { + return name + } + + return filepath.Join(userHomeDir(), ".aws", "config") +} + +func userHomeDir() string { + homeDir := os.Getenv("HOME") // *nix + if len(homeDir) == 0 { // windows + homeDir = os.Getenv("USERPROFILE") + } + + return homeDir +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 6bc8f1be9..1abe39a3a 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -1,17 +1,11 @@ -// Package session provides a way to create service clients with shared configuration -// and handlers. -// -// Generally this package should be used instead of the `defaults` package. -// -// A session should be used to share configurations and request handlers between multiple -// service clients. When service clients need specific configuration aws.Config can be -// used to provide additional configuration directly to the service client. package session import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" "github.com/aws/aws-sdk-go/aws/defaults" "github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/private/endpoints" @@ -21,36 +15,199 @@ import ( // store configurations and request handlers for those services. // // Sessions are safe to create service clients concurrently, but it is not safe -// to mutate the session concurrently. +// to mutate the Session concurrently. +// +// The Session satisfies the service client's client.ClientConfigProvider. type Session struct { Config *aws.Config Handlers request.Handlers } -// New creates a new instance of the handlers merging in the provided Configs -// on top of the SDK's default configurations. Once the session is created it -// can be mutated to modify Configs or Handlers. The session is safe to be read -// concurrently, but it should not be written to concurrently. +// New creates a new instance of the handlers merging in the provided configs +// on top of the SDK's default configurations. Once the Session is created it +// can be mutated to modify the Config or Handlers. The Session is safe to be +// read concurrently, but it should not be written to concurrently. // -// Example: -// // Create a session with the default config and request handlers. -// sess := session.New() +// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New +// method could now encounter an error when loading the configuration. When +// The environment variable is set, and an error occurs, New will return a +// session that will fail all requests reporting the error that occured while +// loading the session. Use NewSession to get the error when creating the +// session. // -// // Create a session with a custom region -// sess := session.New(&aws.Config{Region: aws.String("us-east-1")}) +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded, in addition to +// the shared credentials file (~/.aws/config). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. // -// // Create a session, and add additional handlers for all service -// // clients created with the session to inherit. Adds logging handler. -// sess := session.New() -// sess.Handlers.Send.PushFront(func(r *request.Request) { -// // Log every request made and its payload -// logger.Println("Request: %s/%s, Payload: %s", r.ClientInfo.ServiceName, r.Operation, r.Params) +// Deprecated: Use NewSession functiions to create sessions instead. NewSession +// has the same functionality as New except an error can be returned when the +// func is called instead of waiting to receive an error until a request is made. +func New(cfgs ...*aws.Config) *Session { + // load initial config from environment + envCfg := loadEnvConfig() + + if envCfg.EnableSharedConfig { + s, err := newSession(envCfg, cfgs...) + if err != nil { + // Old session.New expected all errors to be discovered when + // a request is made, and would report the errors then. This + // needs to be replicated if an error occurs while creating + // the session. + msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " + + "Use session.NewSession to handle errors occuring during session creation." + + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s = &Session{Config: defaults.Config()} + s.Config.MergeIn(cfgs...) + s.Config.Logger.Log("ERROR:", msg, "Error:", err) + s.Handlers.Validate.PushBack(func(r *request.Request) { + r.Error = err + }) + } + return s + } + + return oldNewSession(cfgs...) +} + +// NewSession returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. Once the Session is created +// it can be mutated to modify the Config or Handlers. The Session is safe to +// be read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/config). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// See the NewSessionWithOptions func for information on how to override or +// control through code how the Session will be created. Such as specifing the +// config profile, and controlling if shared config is enabled or not. +func NewSession(cfgs ...*aws.Config) (*Session, error) { + envCfg := loadEnvConfig() + + return newSession(envCfg, cfgs...) +} + +// SharedConfigState provides the ability to optionally override the state +// of the session's creation based on the shared config being enabled or +// disabled. +type SharedConfigState int + +const ( + // SharedConfigStateFromEnv does not override any state of the + // AWS_SDK_LOAD_CONFIG env var. It is the default value of the + // SharedConfigState type. + SharedConfigStateFromEnv SharedConfigState = iota + + // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value + // and disables the shared config functionality. + SharedConfigDisable + + // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value + // and enables the shared config functionality. + SharedConfigEnable +) + +// Options provides the means to control how a Session is created and what +// configuration values will be loaded. +// +type Options struct { + // Provides config values for the SDK to use when creating service clients + // and making API requests to services. Any value set in with this field + // will override the associated value provided by the SDK defaults, + // environment or config files where relevent. + // + // If not set, configuration values from from SDK defaults, environment, + // config will be used. + Config aws.Config + + // Overrides the config profile the Session should be created from. If not + // set the value of the environment variable will be loaded (AWS_PROFILE, + // or AWS_DEFAULT_PROFILE if the Shared Config is enabled). + // + // If not set and environment variables are not set the "default" + // (DefaultSharedConfigProfile) will be used as the profile to load the + // session config from. + Profile string + + // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG + // environment variable. By default a Session will be created using the + // value provided by the AWS_SDK_LOAD_CONFIG environment variable. + // + // Setting this value to SharedConfigEnable or SharedConfigDisable + // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable + // and enable or disable the shared config functionality. + SharedConfigState SharedConfigState +} + +// NewSessionWithOptions returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. This func uses the Options +// values to configure how the Session is created. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/config). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// // Equivalent to session.New +// sess, err := session.NewSessionWithOptions(session.Options{}) +// +// // Specify profile to load for the session's config +// sess, err := session.NewSessionWithOptions(session.Options{ +// Profile: "profile_name", // }) // -// // Create a S3 client instance from a session -// sess := session.New() -// svc := s3.New(sess) -func New(cfgs ...*aws.Config) *Session { +// // Specify profile for config and region for requests +// sess, err := session.NewSessionWithOptions(session.Options{ +// Config: aws.Config{Region: aws.String("us-east-1")}, +// Profile: "profile_name", +// }) +// +// // Force enable Shared Config support +// sess, err := session.NewSessionWithOptions(session.Options{ +// SharedConfigState: SharedConfigEnable, +// }) +func NewSessionWithOptions(opts Options) (*Session, error) { + envCfg := loadEnvConfig() + + if len(opts.Profile) > 0 { + envCfg.Profile = opts.Profile + } + + switch opts.SharedConfigState { + case SharedConfigDisable: + envCfg.EnableSharedConfig = false + case SharedConfigEnable: + envCfg.EnableSharedConfig = true + } + + return newSession(envCfg, &opts.Config) +} + +// Must is a helper function to ensure the Session is valid and there was no +// error when calling a NewSession function. +// +// This helper is intended to be used in variable initialization to load the +// Session and configuration at startup. Such as: +// +// var sess = session.Must(session.NewSession()) +func Must(sess *Session, err error) *Session { + if err != nil { + panic(err) + } + + return sess +} + +func oldNewSession(cfgs ...*aws.Config) *Session { cfg := defaults.Config() handlers := defaults.Handlers() @@ -72,6 +229,95 @@ func New(cfgs ...*aws.Config) *Session { return s } +func newSession(envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Get a merged version of the user provided config to determine if + // credentials were. + userCfg := &aws.Config{} + userCfg.MergeIn(cfgs...) + + // Order config files will be loaded in with later files overwriting + // previous config file values. + cfgFiles := []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} + if !envCfg.EnableSharedConfig { + // The shared config file (~/.aws/config) is only loaded if instructed + // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). + cfgFiles = cfgFiles[1:] + } + + // Load additional config from file(s) + sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles) + if err != nil { + return nil, err + } + + mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers) + + s := &Session{ + Config: cfg, + Handlers: handlers, + } + + initHandlers(s) + + return s, nil +} + +func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers) { + // Merge in user provided configuration + cfg.MergeIn(userCfg) + + // Region if not already set by user + if len(aws.StringValue(cfg.Region)) == 0 { + if len(envCfg.Region) > 0 { + cfg.WithRegion(envCfg.Region) + } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 { + cfg.WithRegion(sharedCfg.Region) + } + } + + // Configure credentials if not already set + if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { + if len(envCfg.Creds.AccessKeyID) > 0 { + cfg.Credentials = credentials.NewStaticCredentialsFromCreds( + envCfg.Creds, + ) + } else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil { + cfgCp := *cfg + cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds( + sharedCfg.AssumeRoleSource.Creds, + ) + cfg.Credentials = stscreds.NewCredentials( + &Session{ + Config: &cfgCp, + Handlers: handlers.Copy(), + }, + sharedCfg.AssumeRole.RoleARN, + func(opt *stscreds.AssumeRoleProvider) { + opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName + + if len(sharedCfg.AssumeRole.ExternalID) > 0 { + opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID) + } + + // MFA not supported + }, + ) + } else if len(sharedCfg.Creds.AccessKeyID) > 0 { + cfg.Credentials = credentials.NewStaticCredentialsFromCreds( + sharedCfg.Creds, + ) + } else { + // Fallback to default credentials provider + cfg.Credentials = credentials.NewCredentials( + defaults.RemoteCredProvider(*cfg, handlers), + ) + } + } +} + func initHandlers(s *Session) { // Add the Validate parameter handler if it is not disabled. s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) @@ -80,12 +326,11 @@ func initHandlers(s *Session) { } } -// Copy creates and returns a copy of the current session, coping the config +// Copy creates and returns a copy of the current Session, coping the config // and handlers. If any additional configs are provided they will be merged -// on top of the session's copied config. +// on top of the Session's copied config. // -// Example: -// // Create a copy of the current session, configured for the us-west-2 region. +// // Create a copy of the current Session, configured for the us-west-2 region. // sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) func (s *Session) Copy(cfgs ...*aws.Config) *Session { newSession := &Session{ @@ -101,10 +346,6 @@ func (s *Session) Copy(cfgs ...*aws.Config) *Session { // ClientConfig satisfies the client.ConfigProvider interface and is used to // configure the service client instances. Passing the Session to the service // client's constructor (New) will use this method to configure the client. -// -// Example: -// sess := session.New() -// s3.New(sess) func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { s = s.Copy(cfgs...) endpoint, signingRegion := endpoints.NormalizeEndpoint( diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go new file mode 100644 index 000000000..0147eedeb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -0,0 +1,294 @@ +package session + +import ( + "fmt" + "os" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/go-ini/ini" +) + +const ( + // Static Credentials group + accessKeyIDKey = `aws_access_key_id` // group required + secretAccessKey = `aws_secret_access_key` // group required + sessionTokenKey = `aws_session_token` // optional + + // Assume Role Credentials group + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional + + // Additional Config fields + regionKey = `region` + + // DefaultSharedConfigProfile is the default profile to be used when + // loading configuration from the config files if another profile name + // is not provided. + DefaultSharedConfigProfile = `default` +) + +type assumeRoleConfig struct { + RoleARN string + SourceProfile string + ExternalID string + MFASerial string + RoleSessionName string +} + +// sharedConfig represents the configuration fields of the SDK config files. +type sharedConfig struct { + // Credentials values from the config file. Both aws_access_key_id + // and aws_secret_access_key must be provided together in the same file + // to be considered valid. The values will be ignored if not a complete group. + // aws_session_token is an optional field that can be provided if both of the + // other two fields are also provided. + // + // aws_access_key_id + // aws_secret_access_key + // aws_session_token + Creds credentials.Value + + AssumeRole assumeRoleConfig + AssumeRoleSource *sharedConfig + + // Region is the region the SDK should use for looking up AWS service endpoints + // and signing requests. + // + // region + Region string +} + +type sharedConfigFile struct { + Filename string + IniData *ini.File +} + +// loadSharedConfig retrieves the configuration from the list of files +// using the profile provided. The order the files are listed will determine +// precedence. Values in subsequent files will overwrite values defined in +// earlier files. +// +// For example, given two files A and B. Both define credentials. If the order +// of the files are A then B, B's credential values will be used instead of A's. +// +// See sharedConfig.setFromFile for information how the config files +// will be loaded. +func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) { + if len(profile) == 0 { + profile = DefaultSharedConfigProfile + } + + files, err := loadSharedConfigIniFiles(filenames) + if err != nil { + return sharedConfig{}, err + } + + cfg := sharedConfig{} + if err = cfg.setFromIniFiles(profile, files); err != nil { + return sharedConfig{}, err + } + + if len(cfg.AssumeRole.SourceProfile) > 0 { + if err := cfg.setAssumeRoleSource(profile, files); err != nil { + return sharedConfig{}, err + } + } + + return cfg, nil +} + +func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { + files := make([]sharedConfigFile, 0, len(filenames)) + + for _, filename := range filenames { + if _, err := os.Stat(filename); os.IsNotExist(err) { + // Trim files from the list that don't exist. + continue + } + + f, err := ini.Load(filename) + if err != nil { + return nil, SharedConfigLoadError{Filename: filename} + } + + files = append(files, sharedConfigFile{ + Filename: filename, IniData: f, + }) + } + + return files, nil +} + +func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error { + var assumeRoleSrc sharedConfig + + // Multiple level assume role chains are not support + if cfg.AssumeRole.SourceProfile == origProfile { + assumeRoleSrc = *cfg + assumeRoleSrc.AssumeRole = assumeRoleConfig{} + } else { + err := assumeRoleSrc.setFromIniFiles(cfg.AssumeRole.SourceProfile, files) + if err != nil { + return err + } + } + + if len(assumeRoleSrc.Creds.AccessKeyID) == 0 { + return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN} + } + + cfg.AssumeRoleSource = &assumeRoleSrc + + return nil +} + +func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error { + // Trim files from the list that don't exist. + for _, f := range files { + if err := cfg.setFromIniFile(profile, f); err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + // Ignore proviles missings + continue + } + return err + } + } + + return nil +} + +// setFromFile loads the configuration from the file using +// the profile provided. A sharedConfig pointer type value is used so that +// multiple config file loadings can be chained. +// +// Only loads complete logically grouped values, and will not set fields in cfg +// for incomplete grouped values in the config. Such as credentials. For example +// if a config file only includes aws_access_key_id but no aws_secret_access_key +// the aws_access_key_id will be ignored. +func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error { + section, err := file.IniData.GetSection(profile) + if err != nil { + // Fallback to to alternate profile name: profile + section, err = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) + if err != nil { + return SharedConfigProfileNotExistsError{Profile: profile, Err: err} + } + } + + // Shared Credentials + akid := section.Key(accessKeyIDKey).String() + secret := section.Key(secretAccessKey).String() + if len(akid) > 0 && len(secret) > 0 { + cfg.Creds = credentials.Value{ + AccessKeyID: akid, + SecretAccessKey: secret, + SessionToken: section.Key(sessionTokenKey).String(), + ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), + } + } + + // Assume Role + roleArn := section.Key(roleArnKey).String() + srcProfile := section.Key(sourceProfileKey).String() + if len(roleArn) > 0 && len(srcProfile) > 0 { + cfg.AssumeRole = assumeRoleConfig{ + RoleARN: roleArn, + SourceProfile: srcProfile, + ExternalID: section.Key(externalIDKey).String(), + MFASerial: section.Key(mfaSerialKey).String(), + RoleSessionName: section.Key(roleSessionNameKey).String(), + } + } + + // Region + if v := section.Key(regionKey).String(); len(v) > 0 { + cfg.Region = v + } + + return nil +} + +// SharedConfigLoadError is an error for the shared config file failed to load. +type SharedConfigLoadError struct { + Filename string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigLoadError) Code() string { + return "SharedConfigLoadError" +} + +// Message is the description of the error +func (e SharedConfigLoadError) Message() string { + return fmt.Sprintf("failed to load config file, %s", e.Filename) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigLoadError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigLoadError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigProfileNotExistsError is an error for the shared config when +// the profile was not find in the config file. +type SharedConfigProfileNotExistsError struct { + Profile string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigProfileNotExistsError) Code() string { + return "SharedConfigProfileNotExistsError" +} + +// Message is the description of the error +func (e SharedConfigProfileNotExistsError) Message() string { + return fmt.Sprintf("failed to get profile, %s", e.Profile) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigProfileNotExistsError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigProfileNotExistsError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigAssumeRoleError is an error for the shared config when the +// profile contains assume role information, but that information is invalid +// or not complete. +type SharedConfigAssumeRoleError struct { + RoleARN string +} + +// Code is the short id of the error. +func (e SharedConfigAssumeRoleError) Code() string { + return "SharedConfigAssumeRoleError" +} + +// Message is the description of the error +func (e SharedConfigAssumeRoleError) Message() string { + return fmt.Sprintf("failed to load assume role for %s, source profile has no shared credentials", + e.RoleARN) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigAssumeRoleError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e SharedConfigAssumeRoleError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go index f040f9ce9..7d99f54d1 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -545,7 +545,7 @@ func (ctx *signingCtx) buildBodyDigest() { } else { hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) } - if ctx.ServiceName == "s3" { + if ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" { ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) } } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index 90df0b23b..b03a27624 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.2.10" +const SDKVersion = "1.3.1" diff --git a/vendor/github.com/aws/aws-sdk-go/sdk.go b/vendor/github.com/aws/aws-sdk-go/sdk.go new file mode 100644 index 000000000..afa465a22 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/sdk.go @@ -0,0 +1,7 @@ +// Package sdk is the official AWS SDK for the Go programming language. +// +// See our Developer Guide for information for on getting started and using +// the SDK. +// +// https://github.com/aws/aws-sdk-go/wiki +package sdk diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go index d2f65e266..991cd6e17 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go @@ -106,13 +106,13 @@ func (c *CloudWatchLogs) CreateExportTaskRequest(input *CreateExportTaskInput) ( // Creates an ExportTask which allows you to efficiently export data from a // Log Group to your Amazon S3 bucket. // -// This is an asynchronous call. If all the required information is provided, +// This is an asynchronous call. If all the required information is provided, // this API will initiate an export task and respond with the task Id. Once // started, DescribeExportTasks can be used to get the status of an export task. // You can only have one active (RUNNING or PENDING) export task at a time, // per account. // -// You can export logs from multiple log groups or multiple time ranges to +// You can export logs from multiple log groups or multiple time ranges to // the same Amazon S3 bucket. To separate out log data for each export task, // you can specify a prefix that will be used as the Amazon S3 key prefix for // all exported objects. @@ -169,9 +169,12 @@ func (c *CloudWatchLogs) CreateLogGroupRequest(input *CreateLogGroupInput) (req // must be unique within a region for an AWS account. You can create up to 500 // log groups per account. // -// You must use the following guidelines when naming a log group: Log group -// names can be between 1 and 512 characters long. Allowed characters are a-z, -// A-Z, 0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), and '.' (period). +// You must use the following guidelines when naming a log group: +// +// Log group names can be between 1 and 512 characters long. +// +// Allowed characters are a-z, A-Z, 0-9, '_' (underscore), '-' (hyphen), +// '/' (forward slash), and '.' (period). func (c *CloudWatchLogs) CreateLogGroup(input *CreateLogGroupInput) (*CreateLogGroupOutput, error) { req, out := c.CreateLogGroupRequest(input) err := req.Send() @@ -225,9 +228,11 @@ func (c *CloudWatchLogs) CreateLogStreamRequest(input *CreateLogStreamInput) (re // stream must be unique within the log group. There is no limit on the number // of log streams that can exist in a log group. // -// You must use the following guidelines when naming a log stream: Log stream -// names can be between 1 and 512 characters long. The ':' colon character is -// not allowed. +// You must use the following guidelines when naming a log stream: +// +// Log stream names can be between 1 and 512 characters long. +// +// The ':' colon character is not allowed. func (c *CloudWatchLogs) CreateLogStream(input *CreateLogStreamInput) (*CreateLogStreamOutput, error) { req, out := c.CreateLogStreamRequest(input) err := req.Send() @@ -590,10 +595,10 @@ func (c *CloudWatchLogs) DescribeDestinationsRequest(input *DescribeDestinations // the request. The list returned in the response is ASCII-sorted by destination // name. // -// By default, this operation returns up to 50 destinations. If there are -// more destinations to list, the response would contain a nextToken value in -// the response body. You can also limit the number of destinations returned -// in the response by specifying the limit parameter in the request. +// By default, this operation returns up to 50 destinations. If there are more +// destinations to list, the response would contain a nextToken value in the +// response body. You can also limit the number of destinations returned in +// the response by specifying the limit parameter in the request. func (c *CloudWatchLogs) DescribeDestinations(input *DescribeDestinationsInput) (*DescribeDestinationsOutput, error) { req, out := c.DescribeDestinationsRequest(input) err := req.Send() @@ -669,7 +674,7 @@ func (c *CloudWatchLogs) DescribeExportTasksRequest(input *DescribeExportTasksIn // Returns all the export tasks that are associated with the AWS account making // the request. The export tasks can be filtered based on TaskId or TaskStatus. // -// By default, this operation returns up to 50 export tasks that satisfy the +// By default, this operation returns up to 50 export tasks that satisfy the // specified filters. If there are more export tasks to list, the response would // contain a nextToken value in the response body. You can also limit the number // of export tasks returned in the response by specifying the limit parameter @@ -731,7 +736,7 @@ func (c *CloudWatchLogs) DescribeLogGroupsRequest(input *DescribeLogGroupsInput) // the request. The list returned in the response is ASCII-sorted by log group // name. // -// By default, this operation returns up to 50 log groups. If there are more +// By default, this operation returns up to 50 log groups. If there are more // log groups to list, the response would contain a nextToken value in the response // body. You can also limit the number of log groups returned in the response // by specifying the limit parameter in the request. @@ -816,7 +821,7 @@ func (c *CloudWatchLogs) DescribeLogStreamsRequest(input *DescribeLogStreamsInpu // Returns all the log streams that are associated with the specified log group. // The list returned in the response is ASCII-sorted by log stream name. // -// By default, this operation returns up to 50 log streams. If there are more +// By default, this operation returns up to 50 log streams. If there are more // log streams to list, the response would contain a nextToken value in the // response body. You can also limit the number of log streams returned in the // response by specifying the limit parameter in the request. This operation @@ -903,7 +908,7 @@ func (c *CloudWatchLogs) DescribeMetricFiltersRequest(input *DescribeMetricFilte // Returns all the metrics filters associated with the specified log group. // The list returned in the response is ASCII-sorted by filter name. // -// By default, this operation returns up to 50 metric filters. If there are +// By default, this operation returns up to 50 metric filters. If there are // more metric filters to list, the response would contain a nextToken value // in the response body. You can also limit the number of metric filters returned // in the response by specifying the limit parameter in the request. @@ -988,7 +993,7 @@ func (c *CloudWatchLogs) DescribeSubscriptionFiltersRequest(input *DescribeSubsc // Returns all the subscription filters associated with the specified log group. // The list returned in the response is ASCII-sorted by filter name. // -// By default, this operation returns up to 50 subscription filters. If there +// By default, this operation returns up to 50 subscription filters. If there // are more subscription filters to list, the response would contain a nextToken // value in the response body. You can also limit the number of subscription // filters returned in the response by specifying the limit parameter in the @@ -1076,14 +1081,14 @@ func (c *CloudWatchLogs) FilterLogEventsRequest(input *FilterLogEventsInput) (re // the event timestamp. You can limit the streams searched to an explicit list // of logStreamNames. // -// By default, this operation returns as much matching log events as can fit +// By default, this operation returns as much matching log events as can fit // in a response size of 1MB, up to 10,000 log events, or all the events found // within a time-bounded scan window. If the response includes a nextToken, // then there is more data to search, and the search can be resumed with a new // request providing the nextToken. The response will contain a list of searchedLogStreams // that contains information about which streams were searched in the request // and whether they have been searched completely or require further pagination. -// The limit parameter in the request. can be used to specify the maximum number +// The limit parameter in the request can be used to specify the maximum number // of events to return in a page. func (c *CloudWatchLogs) FilterLogEvents(input *FilterLogEventsInput) (*FilterLogEventsOutput, error) { req, out := c.FilterLogEventsRequest(input) @@ -1166,7 +1171,7 @@ func (c *CloudWatchLogs) GetLogEventsRequest(input *GetLogEventsInput) (req *req // Retrieves log events from the specified log stream. You can provide an optional // time range to filter the results on the event timestamp. // -// By default, this operation returns as much log events as can fit in a response +// By default, this operation returns as much log events as can fit in a response // size of 1MB, up to 10,000 log events. The response will always include a // nextForwardToken and a nextBackwardToken in the response body. You can use // any of these tokens in subsequent GetLogEvents requests to paginate through @@ -1251,7 +1256,7 @@ func (c *CloudWatchLogs) PutDestinationRequest(input *PutDestinationInput) (req // Currently, the only supported physical resource is a Amazon Kinesis stream // belonging to the same account as the destination. // -// A destination controls what is written to its Amazon Kinesis stream through +// A destination controls what is written to its Amazon Kinesis stream through // an access policy. By default, PutDestination does not set any access policy // with the destination, which means a cross-account user will not be able to // call PutSubscriptionFilter against this destination. To enable that, the @@ -1358,19 +1363,28 @@ func (c *CloudWatchLogs) PutLogEventsRequest(input *PutLogEventsInput) (req *req // Uploads a batch of log events to the specified log stream. // -// Every PutLogEvents request must include the sequenceToken obtained from +// Every PutLogEvents request must include the sequenceToken obtained from // the response of the previous request. An upload in a newly created log stream -// does not require a sequenceToken. +// does not require a sequenceToken. You can also get the sequenceToken using +// DescribeLogStreams. // -// The batch of events must satisfy the following constraints: The maximum -// batch size is 1,048,576 bytes, and this size is calculated as the sum of -// all event messages in UTF-8, plus 26 bytes for each log event. None of the -// log events in the batch can be more than 2 hours in the future. None of the -// log events in the batch can be older than 14 days or the retention period -// of the log group. The log events in the batch must be in chronological ordered -// by their timestamp. The maximum number of log events in a batch is 10,000. -// A batch of log events in a single PutLogEvents request cannot span more than -// 24 hours. Otherwise, the PutLogEvents operation will fail. +// The batch of events must satisfy the following constraints: +// +// The maximum batch size is 1,048,576 bytes, and this size is calculated +// as the sum of all event messages in UTF-8, plus 26 bytes for each log event. +// +// None of the log events in the batch can be more than 2 hours in the future. +// +// None of the log events in the batch can be older than 14 days or the retention +// period of the log group. +// +// The log events in the batch must be in chronological ordered by their +// timestamp. +// +// The maximum number of log events in a batch is 10,000. +// +// A batch of log events in a single PutLogEvents request cannot span more +// than 24 hours. Otherwise, the PutLogEvents operation will fail. func (c *CloudWatchLogs) PutLogEvents(input *PutLogEventsInput) (*PutLogEventsOutput, error) { req, out := c.PutLogEventsRequest(input) err := req.Send() @@ -1424,8 +1438,8 @@ func (c *CloudWatchLogs) PutMetricFilterRequest(input *PutMetricFilterInput) (re // group. Metric filters allow you to configure rules to extract metric data // from log events ingested through PutLogEvents requests. // -// The maximum number of metric filters that can be associated with a log -// group is 100. +// The maximum number of metric filters that can be associated with a log group +// is 100. func (c *CloudWatchLogs) PutMetricFilter(input *PutMetricFilterInput) (*PutMetricFilterOutput, error) { req, out := c.PutMetricFilterRequest(input) err := req.Send() @@ -1530,17 +1544,22 @@ func (c *CloudWatchLogs) PutSubscriptionFilterRequest(input *PutSubscriptionFilt // Creates or updates a subscription filter and associates it with the specified // log group. Subscription filters allow you to subscribe to a real-time stream // of log events ingested through PutLogEvents requests and have them delivered -// to a specific destination. Currently, the supported destinations are: An -// Amazon Kinesis stream belonging to the same account as the subscription filter, -// for same-account delivery. A logical destination (used via an ARN of Destination) -// belonging to a different account, for cross-account delivery. An Amazon -// Kinesis Firehose stream belonging to the same account as the subscription -// filter, for same-account delivery. An AWS Lambda function belonging to -// the same account as the subscription filter, for same-account delivery. +// to a specific destination. Currently, the supported destinations are: // +// An Amazon Kinesis stream belonging to the same account as the subscription +// filter, for same-account delivery. // -// Currently there can only be one subscription filter associated with a log -// group. +// A logical destination (used via an ARN of Destination) belonging to a +// different account, for cross-account delivery. +// +// An Amazon Kinesis Firehose stream belonging to the same account as the +// subscription filter, for same-account delivery. +// +// An AWS Lambda function belonging to the same account as the subscription +// filter, for same-account delivery. +// +// Currently there can only be one subscription filter associated with a +// log group. func (c *CloudWatchLogs) PutSubscriptionFilter(input *PutSubscriptionFilterInput) (*PutSubscriptionFilterOutput, error) { req, out := c.PutSubscriptionFilterRequest(input) err := req.Send() @@ -1649,7 +1668,7 @@ type CreateExportTaskInput struct { // Name of Amazon S3 bucket to which the log data will be exported. // - // Note: Only buckets in the same AWS region are supported. + // Note: Only buckets in the same AWS region are supported. Destination *string `locationName:"destination" min:"1" type:"string" required:"true"` // Prefix that will be used as the start of Amazon S3 key for every object exported. @@ -3121,17 +3140,18 @@ func (s MetricFilterMatchRecord) GoString() string { type MetricTransformation struct { _ struct{} `type:"structure"` - // The name of the CloudWatch metric to which the monitored log information - // should be published. For example, you may publish to a metric called ErrorCount. + // (Optional) A default value to emit when a filter pattern does not match a + // log event. Can be null. + DefaultValue *float64 `locationName:"defaultValue" type:"double"` + + // Name of the metric. MetricName *string `locationName:"metricName" type:"string" required:"true"` - // The destination namespace of the new CloudWatch metric. + // Namespace to which the metric belongs. MetricNamespace *string `locationName:"metricNamespace" type:"string" required:"true"` - // What to publish to the metric. For example, if you're counting the occurrences - // of a particular term like "Error", the value will be "1" for each occurrence. - // If you're counting the bytes transferred the published value will be the - // value in the log event. + // A string representing a value to publish to this metric when a filter pattern + // matches a log event. MetricValue *string `locationName:"metricValue" type:"string" required:"true"` } @@ -3195,7 +3215,7 @@ type PutDestinationInput struct { DestinationName *string `locationName:"destinationName" min:"1" type:"string" required:"true"` // The ARN of an IAM role that grants CloudWatch Logs permissions to do Amazon - // Kinesis PutRecord requests on the desitnation stream. + // Kinesis PutRecord requests on the destination stream. RoleArn *string `locationName:"roleArn" min:"1" type:"string" required:"true"` // The ARN of an Amazon Kinesis stream to deliver matching log events to. @@ -3544,11 +3564,17 @@ type PutSubscriptionFilterInput struct { _ struct{} `type:"structure"` // The ARN of the destination to deliver matching log events to. Currently, - // the supported destinations are: An Amazon Kinesis stream belonging to the - // same account as the subscription filter, for same-account delivery. A logical - // destination (used via an ARN of Destination) belonging to a different account, - // for cross-account delivery. An Amazon Kinesis Firehose stream belonging - // to the same account as the subscription filter, for same-account delivery. + // the supported destinations are: + // + // An Amazon Kinesis stream belonging to the same account as the subscription + // filter, for same-account delivery. + // + // A logical destination (used via an ARN of Destination) belonging to a + // different account, for cross-account delivery. + // + // An Amazon Kinesis Firehose stream belonging to the same account as the + // subscription filter, for same-account delivery. + // // An AWS Lambda function belonging to the same account as the subscription // filter, for same-account delivery. DestinationArn *string `locationName:"destinationArn" min:"1" type:"string" required:"true"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go index 064110304..c769589cf 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go @@ -19,7 +19,7 @@ import ( // // You can use CloudWatch Logs to: // -// Monitor Logs from Amazon EC2 Instances in Real-time: You can use CloudWatch +// Monitor Logs from Amazon EC2 Instances in Real-time: You can use CloudWatch // Logs to monitor applications and systems using log data. For example, CloudWatch // Logs can track the number of errors that occur in your application logs and // send you a notification whenever the rate of errors exceeds a threshold you @@ -30,12 +30,12 @@ import ( // codes in an Apache access log). When the term you are searching for is found, // CloudWatch Logs reports the data to a Amazon CloudWatch metric that you specify. // -// Monitor Amazon CloudTrail Logged Events: You can create alarms in Amazon +// Monitor Amazon CloudTrail Logged Events: You can create alarms in Amazon // CloudWatch and receive notifications of particular API activity as captured // by CloudTrail and use the notification to perform troubleshooting. // -// Archive Log Data: You can use CloudWatch Logs to store your log data in -// highly durable storage. You can change the log retention setting so that +// Archive Log Data: You can use CloudWatch Logs to store your log data +// in highly durable storage. You can change the log retention setting so that // any log events older than this setting are automatically deleted. The CloudWatch // Logs agent makes it easy to quickly send both rotated and non-rotated log // data off of a host and into the log service. You can then access the raw diff --git a/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go index 05f2efed4..8289531fa 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/directoryservice/api.go @@ -3979,8 +3979,7 @@ type RadiusSettings struct { // The amount of time, in seconds, to wait for the RADIUS server to respond. RadiusTimeout *int64 `min:"1" type:"integer"` - // The shared secret code that was specified when your RADIUS endpoints were - // created. + // Not currently used. SharedSecret *string `min:"8" type:"string"` // Not currently used. diff --git a/vendor/github.com/aws/aws-sdk-go/service/emr/api.go b/vendor/github.com/aws/aws-sdk-go/service/emr/api.go index bacf653be..a54b7eb6b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/emr/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/emr/api.go @@ -113,18 +113,18 @@ func (c *EMR) AddJobFlowStepsRequest(input *AddJobFlowStepsInput) (req *request. // on how to do this, go to Add More than 256 Steps to a Job Flow (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/AddMoreThan256Steps.html) // in the Amazon Elastic MapReduce Developer's Guide. // -// A step specifies the location of a JAR file stored either on the master +// A step specifies the location of a JAR file stored either on the master // node of the job flow or in Amazon S3. Each step is performed by the main // function of the main class of the JAR file. The main class can be specified // either in the manifest of the JAR or by using the MainFunction parameter // of the step. // -// Elastic MapReduce executes each step in the order listed. For a step to +// Elastic MapReduce executes each step in the order listed. For a step to // be considered complete, the main function must exit with a zero exit code // and all Hadoop jobs started while the step was running must have completed // and run successfully. // -// You can only add steps to a job flow that is in one of the following states: +// You can only add steps to a job flow that is in one of the following states: // STARTING, BOOTSTRAPPING, RUNNING, or WAITING. func (c *EMR) AddJobFlowSteps(input *AddJobFlowStepsInput) (*AddJobFlowStepsOutput, error) { req, out := c.AddJobFlowStepsRequest(input) @@ -280,20 +280,22 @@ func (c *EMR) DescribeJobFlowsRequest(input *DescribeJobFlowsInput) (req *reques // ListClusters, DescribeCluster, ListSteps, ListInstanceGroups and ListBootstrapActions // instead. // -// DescribeJobFlows returns a list of job flows that match all of the supplied +// DescribeJobFlows returns a list of job flows that match all of the supplied // parameters. The parameters can include a list of job flow IDs, job flow states, // and restrictions on job flow creation date and time. // -// Regardless of supplied parameters, only job flows created within the last +// Regardless of supplied parameters, only job flows created within the last // two months are returned. // -// If no parameters are supplied, then job flows matching either of the following +// If no parameters are supplied, then job flows matching either of the following // criteria are returned: // -// Job flows created and completed in the last two weeks Job flows created -// within the last two months that are in one of the following states: RUNNING, -// WAITING, SHUTTING_DOWN, STARTING Amazon Elastic MapReduce can return a -// maximum of 512 job flow descriptions. +// Job flows created and completed in the last two weeks +// +// Job flows created within the last two months that are in one of the following +// states: RUNNING, WAITING, SHUTTING_DOWN, STARTING +// +// Amazon Elastic MapReduce can return a maximum of 512 job flow descriptions. func (c *EMR) DescribeJobFlows(input *DescribeJobFlowsInput) (*DescribeJobFlowsOutput, error) { req, out := c.DescribeJobFlowsRequest(input) err := req.Send() @@ -977,7 +979,7 @@ func (c *EMR) SetTerminationProtectionRequest(input *SetTerminationProtectionInp // is analogous to calling the Amazon EC2 DisableAPITermination API on all of // the EC2 instances in a cluster. // -// SetTerminationProtection is used to prevent accidental termination of a +// SetTerminationProtection is used to prevent accidental termination of a // job flow and to ensure that in the event of an error, the instances will // persist so you can recover any data stored in their ephemeral instance storage. // @@ -1096,7 +1098,7 @@ func (c *EMR) TerminateJobFlowsRequest(input *TerminateJobFlowsInput) (req *requ // the job flow is running are stopped. Any log files not already saved are // uploaded to Amazon S3 if a LogUri was specified when the job flow was created. // -// The maximum number of JobFlows allowed is 10. The call to TerminateJobFlows +// The maximum number of JobFlows allowed is 10. The call to TerminateJobFlows // is asynchronous. Depending on the configuration of the job flow, it may take // up to 5-20 minutes for the job flow to completely terminate and release allocated // resources, such as Amazon EC2 instances. @@ -1304,12 +1306,16 @@ func (s AddTagsOutput) GoString() string { // Flow on the MapR Distribution for Hadoop (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/emr-mapr.html). // Currently supported values are: // -// "mapr-m3" - launch the job flow using MapR M3 Edition. "mapr-m5" - launch -// the job flow using MapR M5 Edition. "mapr" with the user arguments specifying -// "--edition,m3" or "--edition,m5" - launch the job flow using MapR M3 or M5 -// Edition, respectively. In Amazon EMR releases 4.0 and greater, the only -// accepted parameter is the application name. To pass arguments to applications, -// you supply a configuration for each application. +// "mapr-m3" - launch the job flow using MapR M3 Edition. +// +// "mapr-m5" - launch the job flow using MapR M5 Edition. +// +// "mapr" with the user arguments specifying "--edition,m3" or "--edition,m5" +// - launch the job flow using MapR M3 or M5 Edition, respectively. +// +// In Amazon EMR releases 4.0 and greater, the only accepted parameter is +// the application name. To pass arguments to applications, you supply a configuration +// for each application. type Application struct { _ struct{} `type:"structure"` @@ -1409,7 +1415,7 @@ type Cluster struct { // Amazon EMR releases 4.x or later. // - // The list of Configurations supplied to the EMR cluster. + // The list of Configurations supplied to the EMR cluster. Configurations []*Configuration `type:"list"` // Provides information about the EC2 instances in a cluster grouped by category. @@ -1608,7 +1614,7 @@ func (s Command) GoString() string { // Amazon EMR releases 4.x or later. // -// Specifies a hardware and software configuration of the EMR cluster. This +// Specifies a hardware and software configuration of the EMR cluster. This // includes configurations for applications and software bundled with Amazon // EMR. The Configuration object is a JSON object which is defined by a classification // and a set of properties. Configurations can be nested, so a configuration @@ -1933,7 +1939,7 @@ type Ec2InstanceAttributes struct { // not specify this value, the job flow is launched in the normal AWS cloud, // outside of a VPC. // - // Amazon VPC currently does not support cluster compute quadruple extra large + // Amazon VPC currently does not support cluster compute quadruple extra large // (cc1.4xlarge) instances. Thus, you cannot specify the cc1.4xlarge instance // type for nodes of a job flow launched in a VPC. Ec2SubnetId *string `type:"string"` @@ -1963,6 +1969,36 @@ func (s Ec2InstanceAttributes) GoString() string { return s.String() } +// The details of the step failure. The service attempts to detect the root +// cause for many common failures. +type FailureDetails struct { + _ struct{} `type:"structure"` + + // The path to the log file where the step failure root cause was originally + // recorded. + LogFile *string `type:"string"` + + // The descriptive message including the error the EMR service has identified + // as the cause of step failure. This is text from an error log that describes + // the root cause of the failure. + Message *string `type:"string"` + + // The reason for the step failure. In the case where the service cannot successfully + // determine the root cause of the failure, it returns "Unknown Error" as a + // reason. + Reason *string `type:"string"` +} + +// String returns the string representation +func (s FailureDetails) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FailureDetails) GoString() string { + return s.String() +} + // A job flow step consisting of a JAR file whose main function will be executed. // The main function submits a job for Hadoop to execute and waits for the job // to finish or fail. @@ -2093,15 +2129,15 @@ type InstanceGroup struct { // Amazon EMR releases 4.x or later. // - // The list of configurations supplied for an EMR cluster instance group. You - // can specify a separate configuration for each instance group (master, core, - // and task). + // The list of configurations supplied for an EMR cluster instance group. + // You can specify a separate configuration for each instance group (master, + // core, and task). Configurations []*Configuration `type:"list"` // The EBS block devices that are mapped to this instance group. EbsBlockDevices []*EbsBlockDevice `type:"list"` - // If the instance group is EBS-optimized. An Amazon EBS–optimized instance + // If the instance group is EBS-optimized. An Amazon EBS-optimized instance // uses an optimized configuration stack and provides additional, dedicated // capacity for Amazon EBS I/O. EbsOptimized *bool `type:"boolean"` @@ -2155,9 +2191,9 @@ type InstanceGroupConfig struct { // Amazon EMR releases 4.x or later. // - // The list of configurations supplied for an EMR cluster instance group. You - // can specify a separate configuration for each instance group (master, core, - // and task). + // The list of configurations supplied for an EMR cluster instance group. + // You can specify a separate configuration for each instance group (master, + // core, and task). Configurations []*Configuration `type:"list"` // EBS configurations that will be attached to each Amazon EC2 instance in the @@ -2600,7 +2636,7 @@ type JobFlowInstancesConfig struct { // the job flow to launch. If you do not specify this value, the job flow is // launched in the normal Amazon Web Services cloud, outside of an Amazon VPC. // - // Amazon VPC currently does not support cluster compute quadruple extra large + // Amazon VPC currently does not support cluster compute quadruple extra large // (cc1.4xlarge) instances. Thus you cannot specify the cc1.4xlarge instance // type for nodes of a job flow launched in a Amazon VPC. Ec2SubnetId *string `type:"string"` @@ -3207,13 +3243,15 @@ type RunJobFlowInput struct { // For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and greater, // use ReleaseLabel. // - // The version of the Amazon Machine Image (AMI) to use when launching Amazon + // The version of the Amazon Machine Image (AMI) to use when launching Amazon // EC2 instances in the job flow. The following values are valid: // - // The version number of the AMI to use, for example, "2.0." If the AMI supports - // multiple versions of Hadoop (for example, AMI 1.0 supports both Hadoop 0.18 - // and 0.20) you can use the JobFlowInstancesConfig HadoopVersion parameter - // to modify the version of Hadoop from the defaults shown above. + // The version number of the AMI to use, for example, "2.0." + // + // If the AMI supports multiple versions of Hadoop (for example, AMI 1.0 + // supports both Hadoop 0.18 and 0.20) you can use the JobFlowInstancesConfig + // HadoopVersion parameter to modify the version of Hadoop from the defaults + // shown above. // // For details about the AMI versions currently supported by Amazon Elastic // MapReduce, go to AMI Versions Supported in Elastic MapReduce (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/EnvironmentConfig_AMIVersion.html#ami-versions-supported) @@ -3222,7 +3260,7 @@ type RunJobFlowInput struct { // Amazon EMR releases 4.x or later. // - // A list of applications for the cluster. Valid values are: "Hadoop", "Hive", + // A list of applications for the cluster. Valid values are: "Hadoop", "Hive", // "Mahout", "Pig", and "Spark." They are case insensitive. Applications []*Application `type:"list"` @@ -3232,7 +3270,7 @@ type RunJobFlowInput struct { // Amazon EMR releases 4.x or later. // - // The list of configurations supplied for the EMR cluster you are creating. + // The list of configurations supplied for the EMR cluster you are creating. Configurations []*Configuration `type:"list"` // A specification of the number and type of Amazon EC2 instances on which to @@ -3255,26 +3293,34 @@ type RunJobFlowInput struct { // For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and greater, // use Applications. // - // A list of strings that indicates third-party software to use with the job + // A list of strings that indicates third-party software to use with the job // flow that accepts a user argument list. EMR accepts and forwards the argument // list to the corresponding installation script as bootstrap action arguments. // For more information, see Launch a Job Flow on the MapR Distribution for // Hadoop (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/emr-mapr.html). // Currently supported values are: // - // "mapr-m3" - launch the cluster using MapR M3 Edition. "mapr-m5" - launch - // the cluster using MapR M5 Edition. "mapr" with the user arguments specifying - // "--edition,m3" or "--edition,m5" - launch the job flow using MapR M3 or M5 - // Edition respectively. "mapr-m7" - launch the cluster using MapR M7 Edition. - // "hunk" - launch the cluster with the Hunk Big Data Analtics Platform. "hue"- - // launch the cluster with Hue installed. "spark" - launch the cluster with - // Apache Spark installed. "ganglia" - launch the cluster with the Ganglia Monitoring - // System installed. + // "mapr-m3" - launch the cluster using MapR M3 Edition. + // + // "mapr-m5" - launch the cluster using MapR M5 Edition. + // + // "mapr" with the user arguments specifying "--edition,m3" or "--edition,m5" + // - launch the job flow using MapR M3 or M5 Edition respectively. + // + // "mapr-m7" - launch the cluster using MapR M7 Edition. + // + // "hunk" - launch the cluster with the Hunk Big Data Analtics Platform. + // + // "hue"- launch the cluster with Hue installed. + // + // "spark" - launch the cluster with Apache Spark installed. + // + // "ganglia" - launch the cluster with the Ganglia Monitoring System installed. NewSupportedProducts []*SupportedProductConfig `type:"list"` // Amazon EMR releases 4.x or later. // - // The release label for the Amazon EMR release. For Amazon EMR 3.x and 2.x + // The release label for the Amazon EMR release. For Amazon EMR 3.x and 2.x // AMIs, use amiVersion instead instead of ReleaseLabel. ReleaseLabel *string `type:"string"` @@ -3288,13 +3334,14 @@ type RunJobFlowInput struct { // For Amazon EMR releases 3.x and 2.x. For Amazon EMR releases 4.x and greater, // use Applications. // - // A list of strings that indicates third-party software to use with the job + // A list of strings that indicates third-party software to use with the job // flow. For more information, go to Use Third Party Applications with Amazon // EMR (http://docs.aws.amazon.com/ElasticMapReduce/latest/DeveloperGuide/emr-supported-products.html). // Currently supported values are: // - // "mapr-m3" - launch the job flow using MapR M3 Edition. "mapr-m5" - launch - // the job flow using MapR M5 Edition. + // "mapr-m3" - launch the job flow using MapR M3 Edition. + // + // "mapr-m5" - launch the job flow using MapR M5 Edition. SupportedProducts []*string `type:"list"` // A list of tags to associate with a cluster and propagate to Amazon EC2 instances. @@ -3699,6 +3746,10 @@ func (s StepStateChangeReason) GoString() string { type StepStatus struct { _ struct{} `type:"structure"` + // The details for the step failure including reason, message, and log file + // path where the root cause was identified. + FailureDetails *FailureDetails `type:"structure"` + // The execution state of the cluster step. State *string `type:"string" enum:"StepState"` @@ -3877,7 +3928,7 @@ type VolumeSpecification struct { // The number of I/O operations per second (IOPS) that the volume supports. Iops *int64 `type:"integer"` - // The volume size, in gibibytes (GiB). This can be a number from 1 – 1024. + // The volume size, in gibibytes (GiB). This can be a number from 1 - 1024. // If the volume type is EBS-optimized, the minimum value is 10. SizeInGB *int64 `type:"integer" required:"true"` @@ -4035,10 +4086,6 @@ const ( ) // The type of instance. -// -// A small instance -// -// A large instance const ( // @enum JobFlowExecutionState JobFlowExecutionStateStarting = "STARTING" diff --git a/vendor/github.com/aws/aws-sdk-go/service/lambda/api.go b/vendor/github.com/aws/aws-sdk-go/service/lambda/api.go index 2467d596b..7dda986fb 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/lambda/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/lambda/api.go @@ -178,19 +178,20 @@ func (c *Lambda) CreateEventSourceMappingRequest(input *CreateEventSourceMapping // This event source mapping is relevant only in the AWS Lambda pull model, // where AWS Lambda invokes the function. For more information, go to AWS Lambda: // How it Works (http://docs.aws.amazon.com/lambda/latest/dg/lambda-introduction.html) -// in the AWS Lambda Developer Guide. You provide mapping information (for -// example, which stream to read from and which Lambda function to invoke) in -// the request body. +// in the AWS Lambda Developer Guide. // -// Each event source, such as an Amazon Kinesis or a DynamoDB stream, can -// be associated with multiple AWS Lambda function. A given Lambda function -// can be associated with multiple AWS event sources. +// You provide mapping information (for example, which stream to read from +// and which Lambda function to invoke) in the request body. // -// If you are using versioning, you can specify a specific function version +// Each event source, such as an Amazon Kinesis or a DynamoDB stream, can be +// associated with multiple AWS Lambda function. A given Lambda function can +// be associated with multiple AWS event sources. +// +// If you are using versioning, you can specify a specific function version // or an alias via the function name parameter. For more information about versioning, // see AWS Lambda Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html). // -// This operation requires permission for the lambda:CreateEventSourceMapping +// This operation requires permission for the lambda:CreateEventSourceMapping // action. func (c *Lambda) CreateEventSourceMapping(input *CreateEventSourceMappingInput) (*EventSourceMappingConfiguration, error) { req, out := c.CreateEventSourceMappingRequest(input) @@ -750,8 +751,9 @@ func (c *Lambda) InvokeRequest(input *InvokeInput) (req *request.Request, output // version by providing function version or alias name that is pointing to the // function version using the Qualifier parameter in the request. If you don't // provide the Qualifier parameter, the $LATEST version of the Lambda function -// is invoked. For information about the versioning feature, see AWS Lambda -// Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html). +// is invoked. Invocations occur at least once in response to an event and functions +// must be idempotent to handle this. For information about the versioning feature, +// see AWS Lambda Function Versioning and Aliases (http://docs.aws.amazon.com/lambda/latest/dg/versioning-aliases.html). // // This operation requires permission for the lambda:InvokeFunction action. func (c *Lambda) Invoke(input *InvokeInput) (*InvokeOutput, error) { @@ -804,10 +806,11 @@ func (c *Lambda) InvokeAsyncRequest(input *InvokeAsyncInput) (req *request.Reque return } -// This API is deprecated. We recommend you use Invoke API (see Invoke). Submits -// an invocation request to AWS Lambda. Upon receiving the request, Lambda executes -// the specified function asynchronously. To see the logs generated by the Lambda -// function execution, see the CloudWatch Logs console. +// This API is deprecated. We recommend you use Invoke API (see Invoke). +// +// Submits an invocation request to AWS Lambda. Upon receiving the request, +// Lambda executes the specified function asynchronously. To see the logs generated +// by the Lambda function execution, see the CloudWatch Logs console. // // This operation requires permission for the lambda:InvokeFunction action. func (c *Lambda) InvokeAsync(input *InvokeAsyncInput) (*InvokeAsyncOutput, error) { @@ -1444,11 +1447,13 @@ type AddPermissionInput struct { _ struct{} `type:"structure"` // The AWS Lambda action you want to allow in this statement. Each Lambda action - // is a string starting with lambda: followed by the API name (see Operations). - // For example, lambda:CreateFunction. You can use wildcard (lambda:*) to grant - // permission for all AWS Lambda actions. + // is a string starting with lambda: followed by the API name . For example, + // lambda:CreateFunction. You can use wildcard (lambda:*) to grant permission + // for all AWS Lambda actions. Action *string `type:"string" required:"true"` + // A unique token that must be supplied by the principal invoking the function. + // This is currently only used for Alexa Smart Home functions. EventSourceToken *string `type:"string"` // Name of the Lambda function whose resource policy you are updating by adding @@ -1475,32 +1480,32 @@ type AddPermissionInput struct { // the qualifier, then permission applies only when request is made using qualified // function ARN: // - // arn:aws:lambda:aws-region:acct-id:function:function-name:2 + // arn:aws:lambda:aws-region:acct-id:function:function-name:2 // // If you specify an alias name, for example PROD, then the permission is valid // only for requests made using the alias ARN: // - // arn:aws:lambda:aws-region:acct-id:function:function-name:PROD + // arn:aws:lambda:aws-region:acct-id:function:function-name:PROD // // If the qualifier is not specified, the permission is valid only when requests // is made using unqualified function ARN. // - // arn:aws:lambda:aws-region:acct-id:function:function-name + // arn:aws:lambda:aws-region:acct-id:function:function-name Qualifier *string `location:"querystring" locationName:"Qualifier" min:"1" type:"string"` - // The AWS account ID (without a hyphen) of the source owner. For example, if - // the SourceArn identifies a bucket, then this is the bucket owner's account - // ID. You can use this additional condition to ensure the bucket you specify - // is owned by a specific account (it is possible the bucket owner deleted the - // bucket and some other AWS account created the bucket). You can also use this - // condition to specify all sources (that is, you don't specify the SourceArn) - // owned by a specific account. + // This parameter is used for S3 and SES only. The AWS account ID (without a + // hyphen) of the source owner. For example, if the SourceArn identifies a bucket, + // then this is the bucket owner's account ID. You can use this additional condition + // to ensure the bucket you specify is owned by a specific account (it is possible + // the bucket owner deleted the bucket and some other AWS account created the + // bucket). You can also use this condition to specify all sources (that is, + // you don't specify the SourceArn) owned by a specific account. SourceAccount *string `type:"string"` // This is optional; however, when granting Amazon S3 permission to invoke your - // function, you should specify this field with the bucket Amazon Resource Name - // (ARN) as its value. This ensures that only events generated from the specified - // bucket can invoke the function. + // function, you should specify this field with the Amazon Resource Name (ARN) + // as its value. This ensures that only events generated from the specified + // source can invoke the function. // // If you add a permission for the Amazon S3 principal without providing the // source ARN, any AWS account that creates a mapping to your function ARN can @@ -1686,7 +1691,7 @@ type CreateEventSourceMappingInput struct { // AWS Lambda also allows you to specify only the function name with the account // ID qualifier (for example, account-id:Thumbnail). // - // Note that the length constraint applies only to the ARN. If you specify + // Note that the length constraint applies only to the ARN. If you specify // only the function name, it is limited to 64 character in length. FunctionName *string `min:"1" type:"string" required:"true"` @@ -1770,6 +1775,9 @@ type CreateFunctionInput struct { Role *string `type:"string" required:"true"` // The runtime environment for the Lambda function you are uploading. + // + // To use the Node.js runtime v4.3, set the value to "nodejs4.3". To use earlier + // runtime (v0.10.42), set the value to "nodejs". Runtime *string `type:"string" required:"true" enum:"Runtime"` // The function execution time at which Lambda should terminate the function. @@ -2053,10 +2061,11 @@ type FunctionCode struct { // The Amazon S3 object (the deployment package) version you want to upload. S3ObjectVersion *string `min:"1" type:"string"` - // A zip file containing your deployment package. If you are using the API directly, - // the zip file must be base64-encoded (if you are using the AWS SDKs or the - // AWS CLI, the SDKs or CLI will do the encoding for you). For more information - // about creating a .zip file, go to Execution Permissions (http://docs.aws.amazon.com/lambda/latest/dg/intro-permission-model.html#lambda-intro-execution-role.html) + // The contents of your zip file containing your deployment package. If you + // are using the web API directly, the contents of the zip file must be base64-encoded. + // If you are using the AWS SDKs or the AWS CLI, the SDKs or CLI will do the + // encoding for you. For more information about creating a .zip file, go to + // Execution Permissions (http://docs.aws.amazon.com/lambda/latest/dg/intro-permission-model.html#lambda-intro-execution-role.html) // in the AWS Lambda Developer Guide. // // ZipFile is automatically base64 encoded/decoded by the SDK. @@ -2148,6 +2157,9 @@ type FunctionConfiguration struct { Role *string `type:"string"` // The runtime environment for the Lambda function. + // + // To use the Node.js runtime v4.3, set the value to "nodejs4.3". To use earlier + // runtime (v0.10.42), set the value to "nodejs". Runtime *string `type:"string" enum:"Runtime"` // The function execution time at which Lambda should terminate the function. @@ -2350,7 +2362,7 @@ func (s *GetFunctionInput) Validate() error { return nil } -// This response contains the object for the Lambda function location (see API_FunctionCodeLocation. +// This response contains the object for the Lambda function location (see . type GetFunctionOutput struct { _ struct{} `type:"structure"` @@ -2531,7 +2543,7 @@ type InvokeInput struct { // You can set this optional parameter to Tail in the request only if you specify // the InvocationType parameter with value RequestResponse. In this case, AWS // Lambda returns the base64-encoded last 4 KB of log data produced by your - // Lambda function in the x-amz-log-results header. + // Lambda function in the x-amz-log-result header. LogType *string `location:"header" locationName:"X-Amz-Log-Type" type:"string" enum:"LogType"` // JSON that you want to provide to your Lambda function as input. @@ -2694,7 +2706,8 @@ func (s ListAliasesOutput) GoString() string { type ListEventSourceMappingsInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) of the Amazon Kinesis stream. + // The Amazon Resource Name (ARN) of the Amazon Kinesis stream. (This parameter + // is optional.) EventSourceArn *string `location:"querystring" locationName:"EventSourceArn" type:"string"` // The name of the Lambda function. @@ -2745,7 +2758,7 @@ func (s *ListEventSourceMappingsInput) Validate() error { return nil } -// Contains a list of event sources (see API_EventSourceMappingConfiguration) +// Contains a list of event sources (see ) type ListEventSourceMappingsOutput struct { _ struct{} `type:"structure"` @@ -3148,7 +3161,12 @@ type UpdateFunctionCodeInput struct { // The Amazon S3 object (the deployment package) version you want to upload. S3ObjectVersion *string `min:"1" type:"string"` - // Based64-encoded .zip file containing your packaged source code. + // The contents of your zip file containing your deployment package. If you + // are using the web API directly, the contents of the zip file must be base64-encoded. + // If you are using the AWS SDKs or the AWS CLI, the SDKs or CLI will do the + // encoding for you. For more information about creating a .zip file, go to + // Execution Permissions (http://docs.aws.amazon.com/lambda/latest/dg/intro-permission-model.html#lambda-intro-execution-role.html) + // in the AWS Lambda Developer Guide. // // ZipFile is automatically base64 encoded/decoded by the SDK. ZipFile []byte `type:"blob"` @@ -3221,6 +3239,10 @@ type UpdateFunctionConfigurationInput struct { // it executes your function. Role *string `type:"string"` + // The runtime environment for the Lambda function. + // + // To use the Node.js runtime v4.3, set the value to "nodejs4.3". To use earlier + // runtime (v0.10.42), set the value to "nodejs". Runtime *string `type:"string" enum:"Runtime"` // The function execution time at which AWS Lambda should terminate the function. @@ -3348,3 +3370,12 @@ const ( // @enum Runtime RuntimePython27 = "python2.7" ) + +const ( + // @enum ThrottleReason + ThrottleReasonConcurrentInvocationLimitExceeded = "ConcurrentInvocationLimitExceeded" + // @enum ThrottleReason + ThrottleReasonFunctionInvocationRateLimitExceeded = "FunctionInvocationRateLimitExceeded" + // @enum ThrottleReason + ThrottleReasonCallerRateLimitExceeded = "CallerRateLimitExceeded" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/api.go b/vendor/github.com/aws/aws-sdk-go/service/rds/api.go index b62829b19..20d909d6f 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/rds/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/rds/api.go @@ -4616,6 +4616,57 @@ func (c *RDS) ResetDBParameterGroup(input *ResetDBParameterGroupInput) (*DBParam return out, err } +const opRestoreDBClusterFromS3 = "RestoreDBClusterFromS3" + +// RestoreDBClusterFromS3Request generates a "aws/request.Request" representing the +// client's request for the RestoreDBClusterFromS3 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RestoreDBClusterFromS3 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RestoreDBClusterFromS3Request method. +// req, resp := client.RestoreDBClusterFromS3Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *RDS) RestoreDBClusterFromS3Request(input *RestoreDBClusterFromS3Input) (req *request.Request, output *RestoreDBClusterFromS3Output) { + op := &request.Operation{ + Name: opRestoreDBClusterFromS3, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RestoreDBClusterFromS3Input{} + } + + req = c.newRequest(op, input, output) + output = &RestoreDBClusterFromS3Output{} + req.Data = output + return +} + +// Creates an Amazon Aurora DB cluster from data stored in an Amazon S3 bucket. +// Amazon RDS must be authorized to access the Amazon S3 bucket and the data +// must be created using the Percona XtraBackup utility as described in Migrating +// Data from an External MySQL Database to an Amazon Aurora DB Cluster (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Aurora.Migrate.html). +func (c *RDS) RestoreDBClusterFromS3(input *RestoreDBClusterFromS3Input) (*RestoreDBClusterFromS3Output, error) { + req, out := c.RestoreDBClusterFromS3Request(input) + err := req.Send() + return out, err +} + const opRestoreDBClusterFromSnapshot = "RestoreDBClusterFromSnapshot" // RestoreDBClusterFromSnapshotRequest generates a "aws/request.Request" representing the @@ -5775,8 +5826,7 @@ type CreateDBClusterInput struct { DBClusterIdentifier *string `type:"string" required:"true"` // The name of the DB cluster parameter group to associate with this DB cluster. - // If this argument is omitted, default.aurora5.6 for the specified engine will - // be used. + // If this argument is omitted, default.aurora5.6 will be used. // // Constraints: // @@ -5831,7 +5881,7 @@ type CreateDBClusterInput struct { // Constraints: Must contain from 8 to 41 characters. MasterUserPassword *string `type:"string"` - // The name of the master user for the client DB cluster. + // The name of the master user for the DB cluster. // // Constraints: // @@ -7167,8 +7217,6 @@ type CreateDBSecurityGroupInput struct { // // Must not be "Default" // - // Cannot contain spaces - // // Example: mysecuritygroup DBSecurityGroupName *string `type:"string" required:"true"` @@ -8966,8 +9014,6 @@ type DeleteDBSecurityGroupInput struct { // Cannot end with a hyphen or contain two consecutive hyphens // // Must not be "Default" - // - // Cannot contain spaces DBSecurityGroupName *string `type:"string" required:"true"` } @@ -10493,9 +10539,10 @@ type DescribeDBSnapshotsInput struct { // public - Return all DB snapshots that have been marked as public. // // If you don't specify a SnapshotType value, then both automated and manual - // snapshots are returned. You can include shared snapshots with these results - // by setting the IncludeShared parameter to true. You can include public snapshots - // with these results by setting the IncludePublic parameter to true. + // snapshots are returned. Shared and public DB snapshots are not included in + // the returned results by default. You can include shared snapshots with these + // results by setting the IncludeShared parameter to true. You can include public + // snapshots with these results by setting the IncludePublic parameter to true. // // The IncludeShared and IncludePublic parameters don't apply for SnapshotType // values of manual or automated. The IncludePublic parameter doesn't apply @@ -12659,6 +12706,22 @@ type ModifyDBInstanceInput struct { // Cannot end with a hyphen or contain two consecutive hyphens DBSecurityGroups []*string `locationNameList:"DBSecurityGroupName" type:"list"` + // The new DB subnet group for the DB instance. You can use this parameter to + // move your DB instance to a different VPC, or to a different subnet group + // in the same VPC. If your DB instance is not in a VPC, you can also use this + // parameter to move your DB instance into a VPC. For more information, see + // Updating the VPC for a DB Instance (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.WorkingWithRDSInstanceinaVPC.html#USER_VPC.Non-VPC2VPC). + // + // Changing the subnet group causes an outage during the change. The change + // is applied during the next maintenance window, unless you specify true for + // the ApplyImmediately parameter. + // + // Constraints: Must contain no more than 255 alphanumeric characters, periods, + // underscores, spaces, or hyphens. + // + // Example: mySubnetGroup + DBSubnetGroupName *string `type:"string"` + // Specify the Active Directory Domain to move the instance to. // // The specified Active Directory Domain must be created prior to this operation. @@ -12717,6 +12780,11 @@ type ModifyDBInstanceInput struct { // snapshot of the instance. Iops *int64 `type:"integer"` + // The license model for the DB instance. + // + // Valid values: license-included | bring-your-own-license | general-public-license + LicenseModel *string `type:"string"` + // The new password for the DB instance master user. Can be any printable ASCII // character except "/", """, or "@". // @@ -13293,6 +13361,9 @@ type Option struct { // The option settings for this option. OptionSettings []*OptionSetting `locationNameList:"OptionSetting" type:"list"` + // The version of the option. + OptionVersion *string `type:"string"` + // Indicate if this option is permanent. Permanent *bool `type:"boolean"` @@ -13330,6 +13401,9 @@ type OptionConfiguration struct { // The option settings to include in an option group. OptionSettings []*OptionSetting `locationNameList:"OptionSetting" type:"list"` + // The version for the option. + OptionVersion *string `type:"string"` + // The optional port for the option. Port *int64 `type:"integer"` @@ -13450,6 +13524,9 @@ type OptionGroupOption struct { // for each option in an option group. OptionGroupOptionSettings []*OptionGroupOptionSetting `locationNameList:"OptionGroupOptionSetting" type:"list"` + // Specifies the versions that are available for the option. + OptionGroupOptionVersions []*OptionVersion `locationNameList:"OptionVersion" type:"list"` + // List of all options that are prerequisites for this option. OptionsDependedOn []*string `locationNameList:"OptionName" type:"list"` @@ -13560,6 +13637,28 @@ func (s OptionSetting) GoString() string { return s.String() } +// The version for an option. Option group option versions are returned by the +// DescribeOptionGroupOptions action. +type OptionVersion struct { + _ struct{} `type:"structure"` + + // True if the version is the default version of the option; otherwise, false. + IsDefault *bool `type:"boolean"` + + // The version of the option. + Version *string `type:"string"` +} + +// String returns the string representation +func (s OptionVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s OptionVersion) GoString() string { + return s.String() +} + // Contains a list of available options for a DB instance // // This data type is used as a response element in the DescribeOrderableDBInstanceOptions @@ -13731,6 +13830,9 @@ type PendingModifiedValues struct { // or is in progress. DBInstanceIdentifier *string `type:"string"` + // The new DB subnet group for the DB instance. + DBSubnetGroupName *string `type:"string"` + // Indicates the database engine version. EngineVersion *string `type:"string"` @@ -13738,6 +13840,11 @@ type PendingModifiedValues struct { // applied or is being applied. Iops *int64 `type:"integer"` + // The license model for the DB instance. + // + // Valid values: license-included | bring-your-own-license | general-public-license + LicenseModel *string `type:"string"` + // Contains the pending or in-progress change of the master credentials for // the DB instance. MasterUserPassword *string `type:"string"` @@ -14432,6 +14539,267 @@ func (s ResourcePendingMaintenanceActions) GoString() string { return s.String() } +type RestoreDBClusterFromS3Input struct { + _ struct{} `type:"structure"` + + // A list of EC2 Availability Zones that instances in the restored DB cluster + // can be created in. + AvailabilityZones []*string `locationNameList:"AvailabilityZone" type:"list"` + + // The number of days for which automated backups of the restored DB cluster + // are retained. You must specify a minimum value of 1. + // + // Default: 1 + // + // Constraints: + // + // Must be a value from 1 to 35 + BackupRetentionPeriod *int64 `type:"integer"` + + // A value that indicates that the restored DB cluster should be associated + // with the specified CharacterSet. + CharacterSetName *string `type:"string"` + + // The name of the DB cluster to create from the source data in the S3 bucket. + // This parameter is isn't case-sensitive. + // + // Constraints: + // + // Must contain from 1 to 63 alphanumeric characters or hyphens. + // + // First character must be a letter. + // + // Cannot end with a hyphen or contain two consecutive hyphens. + // + // Example: my-cluster1 + DBClusterIdentifier *string `type:"string" required:"true"` + + // The name of the DB cluster parameter group to associate with the restored + // DB cluster. If this argument is omitted, default.aurora5.6 will be used. + // + // Constraints: + // + // Must be 1 to 255 alphanumeric characters + // + // First character must be a letter + // + // Cannot end with a hyphen or contain two consecutive hyphens + DBClusterParameterGroupName *string `type:"string"` + + // A DB subnet group to associate with the restored DB cluster. + // + // Constraints: Must contain no more than 255 alphanumeric characters, periods, + // underscores, spaces, or hyphens. Must not be default. + // + // Example: mySubnetgroup + DBSubnetGroupName *string `type:"string"` + + // The database name for the restored DB cluster. + DatabaseName *string `type:"string"` + + // The name of the database engine to be used for the restored DB cluster. + // + // Valid Values: aurora + Engine *string `type:"string" required:"true"` + + // The version number of the database engine to use. + // + // Aurora + // + // Example: 5.6.10a + EngineVersion *string `type:"string"` + + // The KMS key identifier for an encrypted DB cluster. + // + // The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption + // key. If you are creating a DB cluster with the same AWS account that owns + // the KMS encryption key used to encrypt the new DB cluster, then you can use + // the KMS key alias instead of the ARN for the KM encryption key. + // + // If the StorageEncrypted parameter is true, and you do not specify a value + // for the KmsKeyId parameter, then Amazon RDS will use your default encryption + // key. AWS KMS creates the default encryption key for your AWS account. Your + // AWS account has a different default encryption key for each AWS region. + KmsKeyId *string `type:"string"` + + // The password for the master database user. This password can contain any + // printable ASCII character except "/", """, or "@". + // + // Constraints: Must contain from 8 to 41 characters. + MasterUserPassword *string `type:"string" required:"true"` + + // The name of the master user for the restored DB cluster. + // + // Constraints: + // + // Must be 1 to 16 alphanumeric characters. + // + // First character must be a letter. + // + // Cannot be a reserved word for the chosen database engine. + MasterUsername *string `type:"string" required:"true"` + + // A value that indicates that the restored DB cluster should be associated + // with the specified option group. + // + // Permanent options cannot be removed from an option group. An option group + // cannot be removed from a DB cluster once it is associated with a DB cluster. + OptionGroupName *string `type:"string"` + + // The port number on which the instances in the restored DB cluster accept + // connections. + // + // Default: 3306 + Port *int64 `type:"integer"` + + // The daily time range during which automated backups are created if automated + // backups are enabled using the BackupRetentionPeriod parameter. + // + // Default: A 30-minute window selected at random from an 8-hour block of time + // per region. To see the time blocks available, see Adjusting the Preferred + // Maintenance Window (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html) + // in the Amazon RDS User Guide. + // + // Constraints: + // + // Must be in the format hh24:mi-hh24:mi. + // + // Times should be in Universal Coordinated Time (UTC). + // + // Must not conflict with the preferred maintenance window. + // + // Must be at least 30 minutes. + PreferredBackupWindow *string `type:"string"` + + // The weekly time range during which system maintenance can occur, in Universal + // Coordinated Time (UTC). + // + // Format: ddd:hh24:mi-ddd:hh24:mi + // + // Default: A 30-minute window selected at random from an 8-hour block of time + // per region, occurring on a random day of the week. To see the time blocks + // available, see Adjusting the Preferred Maintenance Window (http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/AdjustingTheMaintenanceWindow.html) + // in the Amazon RDS User Guide. + // + // Valid Days: Mon, Tue, Wed, Thu, Fri, Sat, Sun + // + // Constraints: Minimum 30-minute window. + PreferredMaintenanceWindow *string `type:"string"` + + // The name of the Amazon S3 bucket that contains the data used to create the + // Amazon Aurora DB cluster. + S3BucketName *string `type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the AWS Identity and Access Management + // (IAM) role that authorizes Amazon RDS to access the Amazon S3 bucket on your + // behalf. + S3IngestionRoleArn *string `type:"string" required:"true"` + + // The prefix for all of the file names that contain the data used to create + // the Amazon Aurora DB cluster. If you do not specify a SourceS3Prefix value, + // then the Amazon Aurora DB cluster is created by using all of the files in + // the Amazon S3 bucket. + S3Prefix *string `type:"string"` + + // The identifier for the database engine that was backed up to create the files + // stored in the Amazon S3 bucket. + // + // Valid values: mysql + SourceEngine *string `type:"string" required:"true"` + + // The version of the database that the backup files were created from. + // + // MySQL version 5.5 and 5.6 are supported. + // + // Example: 5.6.22 + SourceEngineVersion *string `type:"string" required:"true"` + + // Specifies whether the restored DB cluster is encrypted. + StorageEncrypted *bool `type:"boolean"` + + // A list of tags. + Tags []*Tag `locationNameList:"Tag" type:"list"` + + // A list of EC2 VPC security groups to associate with the restored DB cluster. + VpcSecurityGroupIds []*string `locationNameList:"VpcSecurityGroupId" type:"list"` +} + +// String returns the string representation +func (s RestoreDBClusterFromS3Input) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreDBClusterFromS3Input) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreDBClusterFromS3Input) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreDBClusterFromS3Input"} + if s.DBClusterIdentifier == nil { + invalidParams.Add(request.NewErrParamRequired("DBClusterIdentifier")) + } + if s.Engine == nil { + invalidParams.Add(request.NewErrParamRequired("Engine")) + } + if s.MasterUserPassword == nil { + invalidParams.Add(request.NewErrParamRequired("MasterUserPassword")) + } + if s.MasterUsername == nil { + invalidParams.Add(request.NewErrParamRequired("MasterUsername")) + } + if s.S3BucketName == nil { + invalidParams.Add(request.NewErrParamRequired("S3BucketName")) + } + if s.S3IngestionRoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("S3IngestionRoleArn")) + } + if s.SourceEngine == nil { + invalidParams.Add(request.NewErrParamRequired("SourceEngine")) + } + if s.SourceEngineVersion == nil { + invalidParams.Add(request.NewErrParamRequired("SourceEngineVersion")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type RestoreDBClusterFromS3Output struct { + _ struct{} `type:"structure"` + + // Contains the result of a successful invocation of the following actions: + // + // CreateDBCluster + // + // DeleteDBCluster + // + // FailoverDBCluster + // + // ModifyDBCluster + // + // RestoreDBClusterFromSnapshot + // + // RestoreDBClusterToPointInTime + // + // This data type is used as a response element in the DescribeDBClusters + // action. + DBCluster *DBCluster `type:"structure"` +} + +// String returns the string representation +func (s RestoreDBClusterFromS3Output) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreDBClusterFromS3Output) GoString() string { + return s.String() +} + type RestoreDBClusterFromSnapshotInput struct { _ struct{} `type:"structure"` @@ -14785,7 +15153,7 @@ type RestoreDBInstanceFromDBSnapshotInput struct { // The database name for the restored DB instance. // - // This parameter doesn't apply to the MySQL or MariaDB engines. + // This parameter doesn't apply to the MySQL, PostgreSQL, or MariaDB engines. DBName *string `type:"string"` // The identifier for the DB snapshot to restore from. diff --git a/vendor/vendor.json b/vendor/vendor.json index ee31a419f..0adbef35d 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -283,598 +283,413 @@ "revision": "4239b77079c7b5d1243b7b4736304ce8ddb6f0f2" }, { - "checksumSHA1": "NuOPMyBrQF/R5cXmLo5zI2kIs7M=", - "comment": "v1.1.23", + "checksumSHA1": "QhFYdDb2z6DMbZPsDi9oCQS9nRY=", + "path": "github.com/aws/aws-sdk-go", + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z", + "version": "v1.3.1" + }, + { + "checksumSHA1": "4e7X+SkJ2EfR4pNJtMlTVwIh90g=", "path": "github.com/aws/aws-sdk-go/aws", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "Y9W+4GimK4Fuxq+vyIskVYFRnX4=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/awserr", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "dkfyy7aRNZ6BmUZ4ZdLIcMMXiPA=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/awsutil", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "RsYlRfQceaAgqjIrExwNsb/RBEM=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/client", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "ieAJ+Cvp/PKv1LpUEnUXpc3OI6E=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/client/metadata", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "gNWirlrTfSLbOe421hISBAhTqa4=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/corehandlers", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { - "checksumSHA1": "EiauD48zRlXIFvAENgZ+PXSEnT0=", - "comment": "v1.1.23", + "checksumSHA1": "dNZNaOPfBPnzE2CBnfhXXZ9g9jU=", "path": "github.com/aws/aws-sdk-go/aws/credentials", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "KQiUK/zr3mqnAXD7x/X55/iNme0=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "NUJUTWlc1sV8b7WjfiYc4JZbXl0=", "path": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { - "checksumSHA1": "svFeyM3oQkk0nfQ0pguDjMgV2M4=", - "comment": "v1.1.23", + "checksumSHA1": "4Ipx+5xN0gso+cENC2MHMWmQlR4=", + "path": "github.com/aws/aws-sdk-go/aws/credentials/stscreds", + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" + }, + { + "checksumSHA1": "dVqXFA18tta86y9KIfBqejJRI8Q=", "path": "github.com/aws/aws-sdk-go/aws/defaults", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "U0SthWum+t9ACanK7SDJOg3dO6M=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/ec2metadata", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "NyUg1P8ZS/LHAAQAk/4C5O4X3og=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/aws/request", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { - "checksumSHA1": "46SVikiXo5xuy/CS6mM1XVTUU7w=", - "comment": "v1.1.23", + "checksumSHA1": "6rpx6vnvZFvQTIKtxCmhKctnTBU=", "path": "github.com/aws/aws-sdk-go/aws/session", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { - "checksumSHA1": "0HzXzMByDLiJSqrMEqbg5URAx0o=", + "checksumSHA1": "7lla+sckQeF18wORAGuU2fFMlp4=", "path": "github.com/aws/aws-sdk-go/aws/signer/v4", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "sgft7A0lRCVD7QBogydg46lr3NM=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/endpoints", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "wk7EyvDaHwb5qqoOP/4d3cV0708=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "uNmSKXAF8B9HWEciW+iyUwZ99qQ=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/ec2query", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "L7xWYwx0jNQnzlYHwBS+1q6DcCI=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "H9TymcQkQnXSXSVfjggiiS4bpzM=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/jsonrpc", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "isoix7lTx4qIq2zI2xFADtti5SI=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/query", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "5xzix1R8prUyWxgLnzUQoxTsfik=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "TW/7U+/8ormL7acf6z2rv2hDD+s=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/rest", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "oUOTWZIpPJiGjc9p/hntdBDvS10=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/restjson", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "Y6Db2GGfGD9LPpcJIPj8vXE8BbQ=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/restxml", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "eUEkjyMPAuekKBE4ou+nM9tXEas=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "F6mth+G7dXN1GI+nktaGo8Lx8aE=", "path": "github.com/aws/aws-sdk-go/private/signer/v2", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" - }, - { - "comment": "v1.1.23", - "path": "github.com/aws/aws-sdk-go/private/signer/v4", - "revision": "2cc71659118a868dc7544a7ef0808eb42d487011", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "Eo9yODN5U99BK0pMzoqnBm7PCrY=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/private/waiter", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "j8CUd3jhZ8K+cI8fy785NmqJyzg=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/apigateway", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "qoTWohhN8wMZvdMAbwi+B5YhQJ0=", "path": "github.com/aws/aws-sdk-go/service/applicationautoscaling", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "AUA6op9dlm0X4vv1YPFnIFs6404=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/autoscaling", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "vp/AYdsQnZtoPqtX86VsgmLIx1w=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/cloudformation", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "4deSd9La3EF2Cmq+tD5rcvhfTGQ=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/cloudfront", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "eCFTaV9GKqv/UEzwRgFFUaFz098=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/cloudtrail", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "G9CmCfw00Bjz0TtJsEnxGE6mv/0=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/cloudwatch", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "mWNJKpt18ASs9/RhnIjILcsGlng=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/cloudwatchevents", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { - "checksumSHA1": "Q6xeArbCzOunYsn2tFyTA5LN1Cg=", - "comment": "v1.1.23", + "checksumSHA1": "sP/qEaDICVBV3rRw2sl759YI0iw=", "path": "github.com/aws/aws-sdk-go/service/cloudwatchlogs", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "p5a/DcdUvhTx0PCRR+/CRXk9g6c=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/codecommit", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "N8Sgq+xG2vYJdKBikM3yQuIBZfs=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/codedeploy", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { - "checksumSHA1": "BiT1NC5G4H7OeNcI7jzkZUzlpr4=", - "comment": "v1.1.23", + "checksumSHA1": "i4hrcsFXLAQXzaxvWh6+BG8XcIU=", "path": "github.com/aws/aws-sdk-go/service/directoryservice", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "y+pZPK8hcTDwq1zHuRduWE14flw=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/dynamodb", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "Ao/Vq8RYiaW63HasBBPkNg/i7CM=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/ec2", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "IEHq+VLH1fud1oQ4MXj1nqfpgUY=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/ecr", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "sHPoLMWXO5tM63ipuxVXduuRypI=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/ecs", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "1vOgFGxLhjNe6BK3RJaV1OqisCs=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/efs", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "rjSScNzMTvEHv7Lk5KcxDpNU5EE=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/elasticache", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "RZF1yHtJhAqaMwbeAM/6BdLLavk=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/elasticbeanstalk", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "VAlXnW+WxxWRcCv4xsCoox2kgE0=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/elasticsearchservice", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "qHuJHGUAuuizD9834MP3gVupfdo=", "path": "github.com/aws/aws-sdk-go/service/elastictranscoder", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "1c9xsISLQWKSrORIpdokCCWCe2M=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/elb", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { - "checksumSHA1": "bvVmHWxCOk0Cmw333zQ5jutPCZQ=", - "comment": "v1.1.15", + "checksumSHA1": "MA6U/Vj0D00yihMHD6bXKyjtfeE=", "path": "github.com/aws/aws-sdk-go/service/emr", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "TtIAgZ+evpkKB5bBYCB69k0wZoU=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/firehose", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "B1EtgBrv//gYqA+Sp6a/SK2zLO4=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/glacier", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "kXJ9ycLAIj0PFSFbfrA/LR/hIi8=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/iam", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "2n5/m0ClE4OyQRNdjfLwg+nSY3o=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/kinesis", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "/cFX1/Gr6M+r9232gLIV+4np7Po=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/kms", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { - "checksumSHA1": "jM0EhAIybh0fyLHxrmVSmG3JLmU=", - "comment": "v1.1.23", + "checksumSHA1": "Qpi347xz5FIQISq73dZSdIf47AU=", "path": "github.com/aws/aws-sdk-go/service/lambda", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "aLwDFgrPzIBidURxso1ujcr2pDs=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/opsworks", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { - "checksumSHA1": "w0aQAtZ42oGeVOqwwG15OBGoU1s=", - "comment": "v1.1.23", + "checksumSHA1": "9JvmBN9zOBFAIMhBUNU81ZTdFQA=", "path": "github.com/aws/aws-sdk-go/service/rds", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "mgImZ/bluUOY9GpQ/oAnscIXwrA=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/redshift", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "y6jKUvrpTJxj5uh6OqQ4FujhCHU=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/route53", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "+608jtc5uRpGqGu5ntpKhfWgwGc=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/s3", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "o+bjuT6ycywUf+vXY9hYK4Z3okE=", "path": "github.com/aws/aws-sdk-go/service/ses", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "DW5kDRWLA2yAgYh9vsI+0uVqq/Q=", "path": "github.com/aws/aws-sdk-go/service/simpledb", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "+ic7vevBfganFLENR29pJaEf4Tw=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/sns", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "oLAlquYlQzgYFS9ochS/iQ9+uXY=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/sqs", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "checksumSHA1": "nH/itbdeFHpl4ysegdtgww9bFSA=", - "comment": "v1.1.23", "path": "github.com/aws/aws-sdk-go/service/sts", - "revision": "3b8c171554fc7d4fc53b87e25d4926a9e7495c2e", - "revisionTime": "2016-07-29T00:51:21Z", - "version": "v1.2.10", - "versionExact": "v1.2.10" + "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", + "revisionTime": "2016-08-06T21:45:34Z" }, { "path": "github.com/bgentry/speakeasy", From 6ddb8f5975200d4572f5d77820526b82a11a7681 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Mon, 8 Aug 2016 17:19:06 -0500 Subject: [PATCH 0599/1238] provider/aws: session.New -> session.NewSession() Version 1.3.1 deprecates use of `session.New()` in favour of `session.NewSession()`, which also returns an error. This commit updates the various call sites previously making use of `session.New()`. --- builtin/providers/aws/auth_helpers.go | 14 ++++++++++++-- builtin/providers/aws/auth_helpers_test.go | 5 ++++- builtin/providers/aws/config.go | 18 ++++++++++-------- 3 files changed, 26 insertions(+), 11 deletions(-) diff --git a/builtin/providers/aws/auth_helpers.go b/builtin/providers/aws/auth_helpers.go index 552a4234f..1d1bb246b 100644 --- a/builtin/providers/aws/auth_helpers.go +++ b/builtin/providers/aws/auth_helpers.go @@ -15,6 +15,7 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/iam" "github.com/aws/aws-sdk-go/service/sts" + "github.com/hashicorp/errwrap" "github.com/hashicorp/go-cleanhttp" ) @@ -25,7 +26,12 @@ func GetAccountId(iamconn *iam.IAM, stsconn *sts.STS, authProviderName string) ( cfg := &aws.Config{} setOptionalEndpoint(cfg) - metadataClient := ec2metadata.New(session.New(cfg)) + sess, err := session.NewSession(cfg) + if err != nil { + return "", errwrap.Wrapf("Error creating AWS session: %s", err) + } + + metadataClient := ec2metadata.New(sess) info, err := metadataClient.IAMInfo() if err != nil { // This can be triggered when no IAM Role is assigned @@ -114,7 +120,11 @@ func GetCredentials(key, secret, token, profile, credsfile string) *awsCredentia // Real AWS should reply to a simple metadata request. // We check it actually does to ensure something else didn't just // happen to be listening on the same IP:Port - metadataClient := ec2metadata.New(session.New(cfg)) + sess, err := session.NewSession(cfg) + if err != nil { + log.Printf("[INFO] Error creating AWS session for metadata client: %s", err) + } + metadataClient := ec2metadata.New(sess) if metadataClient.Available() { providers = append(providers, &ec2rolecreds.EC2RoleProvider{ Client: metadataClient, diff --git a/builtin/providers/aws/auth_helpers_test.go b/builtin/providers/aws/auth_helpers_test.go index a9de0fcc6..4ec0cfd26 100644 --- a/builtin/providers/aws/auth_helpers_test.go +++ b/builtin/providers/aws/auth_helpers_test.go @@ -648,12 +648,15 @@ func getMockedAwsIamStsApi(endpoints []*iamEndpoint) (func(), *iam.IAM, *sts.STS sc := awsCredentials.NewStaticCredentials("accessKey", "secretKey", "") - sess := session.New(&aws.Config{ + sess, err := session.NewSession(&aws.Config{ Credentials: sc, Region: aws.String("us-east-1"), Endpoint: aws.String(ts.URL), CredentialsChainVerboseErrors: aws.Bool(true), }) + if err != nil { + panic(fmt.Sprintf("Error creating AWS Session: %s", err)) + } iamConn := iam.New(sess) stsConn := sts.New(sess) return ts.Close, iamConn, stsConn diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go index 434bdffdd..5a50750a2 100644 --- a/builtin/providers/aws/config.go +++ b/builtin/providers/aws/config.go @@ -1,18 +1,12 @@ package aws import ( + "crypto/tls" "fmt" "log" "net/http" "strings" - "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform/helper/logging" - "github.com/hashicorp/terraform/terraform" - - "crypto/tls" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/request" @@ -56,6 +50,11 @@ import ( "github.com/aws/aws-sdk-go/service/sns" "github.com/aws/aws-sdk-go/service/sqs" "github.com/aws/aws-sdk-go/service/sts" + "github.com/hashicorp/errwrap" + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/terraform/helper/logging" + "github.com/hashicorp/terraform/terraform" ) type Config struct { @@ -178,7 +177,10 @@ func (c *Config) Client() (interface{}, error) { } // Set up base session - sess := session.New(awsConfig) + sess, err := session.NewSession(awsConfig) + if err != nil { + return nil, errwrap.Wrapf("Error creating AWS session: %s", err) + } sess.Handlers.Build.PushFrontNamed(addTerraformVersionToUserAgent) // Some services exist only in us-east-1, e.g. because they manage From c39bad01a197368bd1637aaf48f06acc59136ee7 Mon Sep 17 00:00:00 2001 From: stack72 Date: Tue, 9 Aug 2016 15:11:49 +1200 Subject: [PATCH 0600/1238] provider/aws: Changing the region that * tests run against --- ...resource_aws_load_balancer_backend_server_policy_test.go | 6 +++--- .../aws/resource_aws_load_balancer_listener_policy_test.go | 6 +++--- .../providers/aws/resource_aws_load_balancer_policy_test.go | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/builtin/providers/aws/resource_aws_load_balancer_backend_server_policy_test.go b/builtin/providers/aws/resource_aws_load_balancer_backend_server_policy_test.go index bf783bb00..b52be936d 100644 --- a/builtin/providers/aws/resource_aws_load_balancer_backend_server_policy_test.go +++ b/builtin/providers/aws/resource_aws_load_balancer_backend_server_policy_test.go @@ -167,7 +167,7 @@ resource "aws_iam_server_certificate" "test-iam-cert0" { resource "aws_elb" "test-lb" { name = "test-aws-policies-lb" - availability_zones = ["us-east-1a"] + availability_zones = ["us-west-2a"] listener { instance_port = 443 @@ -264,7 +264,7 @@ resource "aws_iam_server_certificate" "test-iam-cert0" { resource "aws_elb" "test-lb" { name = "test-aws-policies-lb" - availability_zones = ["us-east-1a"] + availability_zones = ["us-west-2a"] listener { instance_port = 443 @@ -371,7 +371,7 @@ resource "aws_iam_server_certificate" "test-iam-cert0" { resource "aws_elb" "test-lb" { name = "test-aws-policies-lb" - availability_zones = ["us-east-1a"] + availability_zones = ["us-west-2a"] listener { instance_port = 443 diff --git a/builtin/providers/aws/resource_aws_load_balancer_listener_policy_test.go b/builtin/providers/aws/resource_aws_load_balancer_listener_policy_test.go index 9f3c02105..b8f9816a8 100644 --- a/builtin/providers/aws/resource_aws_load_balancer_listener_policy_test.go +++ b/builtin/providers/aws/resource_aws_load_balancer_listener_policy_test.go @@ -145,7 +145,7 @@ func testAccCheckAWSLoadBalancerListenerPolicyState(loadBalancerName string, loa const testAccAWSLoadBalancerListenerPolicyConfig_basic0 = ` resource "aws_elb" "test-lb" { name = "test-aws-policies-lb" - availability_zones = ["us-east-1a"] + availability_zones = ["us-west-2a"] listener { instance_port = 80 @@ -181,7 +181,7 @@ resource "aws_load_balancer_listener_policy" "test-lb-listener-policies-80" { const testAccAWSLoadBalancerListenerPolicyConfig_basic1 = ` resource "aws_elb" "test-lb" { name = "test-aws-policies-lb" - availability_zones = ["us-east-1a"] + availability_zones = ["us-west-2a"] listener { instance_port = 80 @@ -217,7 +217,7 @@ resource "aws_load_balancer_listener_policy" "test-lb-listener-policies-80" { const testAccAWSLoadBalancerListenerPolicyConfig_basic2 = ` resource "aws_elb" "test-lb" { name = "test-aws-policies-lb" - availability_zones = ["us-east-1a"] + availability_zones = ["us-west-2a"] listener { instance_port = 80 diff --git a/builtin/providers/aws/resource_aws_load_balancer_policy_test.go b/builtin/providers/aws/resource_aws_load_balancer_policy_test.go index 29771f789..9fe60cd96 100644 --- a/builtin/providers/aws/resource_aws_load_balancer_policy_test.go +++ b/builtin/providers/aws/resource_aws_load_balancer_policy_test.go @@ -142,7 +142,7 @@ func testAccCheckAWSLoadBalancerPolicyState(elbResource string, policyResource s const testAccAWSLoadBalancerPolicyConfig_basic = ` resource "aws_elb" "test-lb" { name = "test-aws-policies-lb" - availability_zones = ["us-east-1a"] + availability_zones = ["us-west-2a"] listener { instance_port = 80 @@ -170,7 +170,7 @@ resource "aws_load_balancer_policy" "test-policy" { const testAccAWSLoadBalancerPolicyConfig_updateWhileAssigned0 = ` resource "aws_elb" "test-lb" { name = "test-aws-policies-lb" - availability_zones = ["us-east-1a"] + availability_zones = ["us-west-2a"] listener { instance_port = 80 @@ -206,7 +206,7 @@ resource "aws_load_balancer_listener_policy" "test-lb-test-policy-80" { const testAccAWSLoadBalancerPolicyConfig_updateWhileAssigned1 = ` resource "aws_elb" "test-lb" { name = "test-aws-policies-lb" - availability_zones = ["us-east-1a"] + availability_zones = ["us-west-2a"] listener { instance_port = 80 From 1e6e8bfc3d4512b5cd8bf6abe82dadf993524309 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Tue, 9 Aug 2016 15:19:16 +1200 Subject: [PATCH 0601/1238] Update CHANGELOG.md --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 49ae9df19..32cef3c10 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,6 +3,9 @@ FEATURES: * **New Provider:** `archive` [GH-7322] * **New Resource:** `aws_vpn_gateway_attachment` [GH-7870] + * **New Resource:** `aws_load_balancer_policy` [GH-7458] + * **New Resource:** `aws_load_balancer_backend_server_policy` [GH-7458] + * **New Resource:** `aws_load_balancer_listener_policy` [GH-7458] IMPROVEMENTS * provider/aws: Introduce `aws_elasticsearch_domain` `elasticsearch_version` field (to specify ES version) [GH-7860] From de747fae5958c7e970270068965cb21618e04a1c Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Tue, 9 Aug 2016 15:20:53 +1200 Subject: [PATCH 0602/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 32cef3c10..45d6c45dc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ IMPROVEMENTS * provider/aws: Change the way ARNs are built [GH-7151] * provider/aws: Add support for Elasticsearch destination to firehose delivery streams [GH-7839] * provider/aws: Retry AttachInternetGateway and increase timeout on `aws_internet_gateway` [GH-7891] + * provider/aws: Add support for Enhanced monitoring to `aws_rds_cluster_instance` [GH-8038] * provider/azurerm: Adds support for uploading blobs to azure storage from local source [GH-7994] * provider/google: allows atomic Cloud DNS record changes [GH-6575] * provider/google: Move URLMap hosts to TypeSet from TypeList [GH-7472] From ec310754cda7593b86ed76e2c3f2b9d990132cba Mon Sep 17 00:00:00 2001 From: stack72 Date: Tue, 9 Aug 2016 15:43:02 +1200 Subject: [PATCH 0603/1238] provider/aws: Add the documentation for the new * resources to the ERB layout --- .../aws/resource_aws_rds_cluster_instance_test.go | 2 +- ...ad_balancer_backend_server_policy.html.markdown | 2 +- website/source/layouts/aws.erb | 14 +++++++++++++- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go b/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go index 212fab1e5..12663a702 100644 --- a/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go +++ b/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go @@ -187,7 +187,7 @@ func testAccCheckAWSClusterInstanceExists(n string, v *rds.DBInstance) resource. } } -func TestAccAWSCluster_withInstanceEnhancedMonitor(t *testing.T) { +func TestAccAWSRDSClusterInstance_withInstanceEnhancedMonitor(t *testing.T) { var v rds.DBInstance resource.Test(t, resource.TestCase{ diff --git a/website/source/docs/providers/aws/r/load_balancer_backend_server_policy.html.markdown b/website/source/docs/providers/aws/r/load_balancer_backend_server_policy.html.markdown index b135bda26..7b6177ac2 100644 --- a/website/source/docs/providers/aws/r/load_balancer_backend_server_policy.html.markdown +++ b/website/source/docs/providers/aws/r/load_balancer_backend_server_policy.html.markdown @@ -60,7 +60,7 @@ resource "aws_load_balancer_backend_server_policy" "wu-tang-backend-auth-policie } ``` -Where the file `pubkey` in the current directoy contains only the _public key_ of the certificate. +Where the file `pubkey` in the current directory contains only the _public key_ of the certificate. ``` cat wu-tang-ca.pem | openssl x509 -pubkey -noout | grep -v '\-\-\-\-' | tr -d '\n' > wu-tang-pubkey diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb index 4eec00ae6..fe4f938fd 100644 --- a/website/source/layouts/aws.erb +++ b/website/source/layouts/aws.erb @@ -193,7 +193,7 @@ - > + > EC2 Resources From f3005f098470ddfdd053a5d0778eac1a9b202af0 Mon Sep 17 00:00:00 2001 From: Abhishek L Date: Tue, 9 Aug 2016 18:01:11 +0200 Subject: [PATCH 0616/1238] doc: openstack fix minor typo s/ommitted/omitted --- website/source/docs/providers/openstack/index.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/providers/openstack/index.html.markdown b/website/source/docs/providers/openstack/index.html.markdown index c9e23a765..c02bce24c 100644 --- a/website/source/docs/providers/openstack/index.html.markdown +++ b/website/source/docs/providers/openstack/index.html.markdown @@ -51,7 +51,7 @@ The following arguments are supported: Keystone service. By specifying a token, you do not have to specify a username/password combination, since the token was already created by a username/password out of band of Terraform. - If ommitted, the `OS_AUTH_TOKEN` environment variable is used. + If omitted, the `OS_AUTH_TOKEN` environment variable is used. * `api_key` - (Optional; Required if not using `password`) An API Key is issued by a cloud provider as alternative password. Unless From 34d425fc267d1ec06ac0acf4f4a318a3b7974bc3 Mon Sep 17 00:00:00 2001 From: Joe Topjian Date: Tue, 9 Aug 2016 16:49:46 +0000 Subject: [PATCH 0617/1238] Missing OS_EXTGW_ID in OpenStack Docs This commit adds a note about the requirement for the OS_EXTGW_ID environment variable in order to run the OpenStack Provider acceptance tests. --- website/source/docs/providers/openstack/index.html.markdown | 2 ++ 1 file changed, 2 insertions(+) diff --git a/website/source/docs/providers/openstack/index.html.markdown b/website/source/docs/providers/openstack/index.html.markdown index c02bce24c..867b02b1b 100644 --- a/website/source/docs/providers/openstack/index.html.markdown +++ b/website/source/docs/providers/openstack/index.html.markdown @@ -144,6 +144,8 @@ variables must also be set: * `OS_NETWORK_ID` - The UUID of a network in your test environment. +* `OS_EXTGW_ID` - The UUID of the external gateway. + To make development easier, the `builtin/providers/openstack/devstack/deploy.sh` script will assist in installing and configuring a standardized [DevStack](http://docs.openstack.org/developer/devstack/) environment along with From e181d753fee18f0ff04a70c10d78a87d328dea78 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Tue, 9 Aug 2016 15:03:07 -0400 Subject: [PATCH 0618/1238] build: Fix ordering of plugin list --- command/internal_plugin_list.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/command/internal_plugin_list.go b/command/internal_plugin_list.go index e4807b799..e3fc743c3 100644 --- a/command/internal_plugin_list.go +++ b/command/internal_plugin_list.go @@ -61,6 +61,7 @@ import ( ) var InternalProviders = map[string]plugin.ProviderFunc{ + "archive": archiveprovider.Provider, "atlas": atlasprovider.Provider, "aws": awsprovider.Provider, "azure": azureprovider.Provider, @@ -105,7 +106,6 @@ var InternalProviders = map[string]plugin.ProviderFunc{ "ultradns": ultradnsprovider.Provider, "vcd": vcdprovider.Provider, "vsphere": vsphereprovider.Provider, - "archive": archiveprovider.Provider, } var InternalProvisioners = map[string]plugin.ProvisionerFunc{ From 22684200b534c1e88c713903a9853426c494dc0f Mon Sep 17 00:00:00 2001 From: Clint Date: Tue, 9 Aug 2016 14:19:12 -0500 Subject: [PATCH 0619/1238] provider/aws: Fix line ending errors/diffs with IAM Server Certs (#8074) * provider/aws: Failing IAM Server Cert test with windows line endings * provider/aws: Fix IAM Server Cert issue with line encodings --- .../resource_aws_iam_server_certificate.go | 24 +++++-- ...esource_aws_iam_server_certificate_test.go | 70 +++++++++++++++++++ .../iam-ssl-unix-line-endings.pem | 19 +++++ .../iam-ssl-windows-line-endings.pem | 19 +++++ 4 files changed, 128 insertions(+), 4 deletions(-) create mode 100644 builtin/providers/aws/test-fixtures/iam-ssl-unix-line-endings.pem create mode 100644 builtin/providers/aws/test-fixtures/iam-ssl-windows-line-endings.pem diff --git a/builtin/providers/aws/resource_aws_iam_server_certificate.go b/builtin/providers/aws/resource_aws_iam_server_certificate.go index a3f170c17..28258ef15 100644 --- a/builtin/providers/aws/resource_aws_iam_server_certificate.go +++ b/builtin/providers/aws/resource_aws_iam_server_certificate.go @@ -201,14 +201,30 @@ func normalizeCert(cert interface{}) string { return "" } + var rawCert string switch cert.(type) { case string: - hash := sha1.Sum([]byte(strings.TrimSpace(cert.(string)))) - return hex.EncodeToString(hash[:]) + rawCert = cert.(string) case *string: - hash := sha1.Sum([]byte(strings.TrimSpace(*cert.(*string)))) - return hex.EncodeToString(hash[:]) + rawCert = *cert.(*string) default: return "" } + + cleanVal := sha1.Sum(stripCR([]byte(strings.TrimSpace(rawCert)))) + return hex.EncodeToString(cleanVal[:]) +} + +// strip CRs from raw literals. Lifted from go/scanner/scanner.go +// See https://github.com/golang/go/blob/release-branch.go1.6/src/go/scanner/scanner.go#L479 +func stripCR(b []byte) []byte { + c := make([]byte, len(b)) + i := 0 + for _, ch := range b { + if ch != '\r' { + c[i] = ch + i++ + } + } + return c[:i] } diff --git a/builtin/providers/aws/resource_aws_iam_server_certificate_test.go b/builtin/providers/aws/resource_aws_iam_server_certificate_test.go index fe375e8c0..a6fccb988 100644 --- a/builtin/providers/aws/resource_aws_iam_server_certificate_test.go +++ b/builtin/providers/aws/resource_aws_iam_server_certificate_test.go @@ -9,6 +9,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iam" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" ) @@ -86,6 +87,35 @@ func TestAccAWSIAMServerCertificate_disappears(t *testing.T) { }) } +func TestAccAWSIAMServerCertificate_file(t *testing.T) { + var cert iam.ServerCertificate + + rInt := acctest.RandInt() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckIAMServerCertificateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccIAMServerCertConfig_file(rInt, "iam-ssl-unix-line-endings"), + Check: resource.ComposeTestCheckFunc( + testAccCheckCertExists("aws_iam_server_certificate.test_cert", &cert), + testAccCheckAWSServerCertAttributes(&cert), + ), + }, + + resource.TestStep{ + Config: testAccIAMServerCertConfig_file(rInt, "iam-ssl-windows-line-endings"), + Check: resource.ComposeTestCheckFunc( + testAccCheckCertExists("aws_iam_server_certificate.test_cert", &cert), + testAccCheckAWSServerCertAttributes(&cert), + ), + }, + }, + }) +} + func testAccCheckCertExists(n string, cert *iam.ServerCertificate) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -285,3 +315,43 @@ dg+Sd4Wjm89UQoUUoiIcstY7FPbqfBtYKfh4RYHAHV2BwDFqzZCM EOF } ` + +// iam-ssl-unix-line-endings +func testAccIAMServerCertConfig_file(rInt int, fName string) string { + return fmt.Sprintf(` +resource "aws_iam_server_certificate" "test_cert" { + name = "terraform-test-cert-%d" + certificate_body = "${file("test-fixtures/%s.pem")}" + + private_key = < Date: Tue, 9 Aug 2016 14:19:58 -0500 Subject: [PATCH 0620/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e21dd2dbe..50fed8f54 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,6 +33,7 @@ BUG FIXES: * provider/aws: Fix `aws_s3_bucket` resource `redirect_all_requests_to` action [GH-7883] * provider/aws: Fix issue updating ElasticBeanstalk Environment Settings [GH-7777] * providers/aws: `aws_rds_cluster` creation timeout bumped to 40 minutes [GH-8052] + * provider/aws: Fix line ending errors/diffs with IAM Server Certs [GH-8074] * provider/google: Use resource specific project when making queries/changes [GH-7029] * provider/google: Fix read for the backend service resource [GH-7476] From 9fa978a45fe86891d1fb7cae05d9773b051e9f57 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Tue, 9 Aug 2016 16:03:05 -0400 Subject: [PATCH 0621/1238] docs: Fix map key interpolation documentation Previously was recommending the now-invalid dot syntax for map keys, change to using HIL indexing. --- website/source/docs/configuration/interpolation.html.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/website/source/docs/configuration/interpolation.html.md b/website/source/docs/configuration/interpolation.html.md index 22b8c04d1..18374f5cc 100644 --- a/website/source/docs/configuration/interpolation.html.md +++ b/website/source/docs/configuration/interpolation.html.md @@ -27,9 +27,8 @@ will be rendered as a literal `${foo}`. variable name. For example, `${var.foo}` will interpolate the `foo` variable value. If the variable is a map, then you can reference static keys in the map with the syntax -`var.MAP.KEY`. For example, `${var.amis.us-east-1}` would -get the value of the `us-east-1` key within the `amis` variable -that is a map. +`var.MAP["KEY"]`. For example, `${var.amis["us-east-1"]` would +get the value of the `us-east-1` key within the `amis` map variable. **To reference attributes of your own resource**, the syntax is `self.ATTRIBUTE`. For example `${self.private_ip_address}` will From a6de08837563b235389d9b8ad7a9dd092eeab195 Mon Sep 17 00:00:00 2001 From: "Mosley, Franklin" Date: Mon, 14 Mar 2016 23:13:44 -0500 Subject: [PATCH 0622/1238] Add new resource aws_lb_ssl_negotiation_policy Added new resource to create an AWS ELB SSL negotiation policy, and an acceptance test. --- builtin/providers/aws/provider.go | 1 + .../resource_aws_lb_ssl_negotiation_policy.go | 181 ++++++++++++ ...urce_aws_lb_ssl_negotiation_policy_test.go | 272 ++++++++++++++++++ builtin/providers/aws/structure.go | 38 +++ 4 files changed, 492 insertions(+) create mode 100644 builtin/providers/aws/resource_aws_lb_ssl_negotiation_policy.go create mode 100644 builtin/providers/aws/resource_aws_lb_ssl_negotiation_policy_test.go diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index dab42ba87..c16e8cf9d 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -223,6 +223,7 @@ func Provider() terraform.ResourceProvider { "aws_load_balancer_policy": resourceAwsLoadBalancerPolicy(), "aws_load_balancer_backend_server_policy": resourceAwsLoadBalancerBackendServerPolicies(), "aws_load_balancer_listener_policy": resourceAwsLoadBalancerListenerPolicies(), + "aws_lb_ssl_negotiation_policy": resourceAwsLBSSLNegotiationPolicy(), "aws_main_route_table_association": resourceAwsMainRouteTableAssociation(), "aws_nat_gateway": resourceAwsNatGateway(), "aws_network_acl": resourceAwsNetworkAcl(), diff --git a/builtin/providers/aws/resource_aws_lb_ssl_negotiation_policy.go b/builtin/providers/aws/resource_aws_lb_ssl_negotiation_policy.go new file mode 100644 index 000000000..cee4a4d44 --- /dev/null +++ b/builtin/providers/aws/resource_aws_lb_ssl_negotiation_policy.go @@ -0,0 +1,181 @@ +package aws + +import ( + "bytes" + "fmt" + "log" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/elb" + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceAwsLBSSLNegotiationPolicy() *schema.Resource { + return &schema.Resource{ + // There is no concept of "updating" an LB policy in + // the AWS API. + Create: resourceAwsLBSSLNegotiationPolicyCreate, + Read: resourceAwsLBSSLNegotiationPolicyRead, + Delete: resourceAwsLBSSLNegotiationPolicyDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "load_balancer": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "lb_port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "attribute": &schema.Schema{ + Type: schema.TypeSet, + Required: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "value": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + Set: func(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["name"].(string))) + return hashcode.String(buf.String()) + }, + }, + }, + } +} + +func resourceAwsLBSSLNegotiationPolicyCreate(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + // Provision the SSLNegotiationPolicy + lbspOpts := &elb.CreateLoadBalancerPolicyInput{ + LoadBalancerName: aws.String(d.Get("load_balancer").(string)), + PolicyName: aws.String(d.Get("name").(string)), + PolicyTypeName: aws.String("SSLNegotiationPolicyType"), + } + + // Check for Policy Attributes + if v, ok := d.GetOk("attribute"); ok { + // Expand the "attribute" set to aws-sdk-go compat []*elb.PolicyAttribute + lbspOpts.PolicyAttributes = expandPolicyAttributes(v.(*schema.Set).List()) + } + + log.Printf("[DEBUG] Load Balancer Policy opts: %#v", lbspOpts) + if _, err := elbconn.CreateLoadBalancerPolicy(lbspOpts); err != nil { + return fmt.Errorf("Error creating Load Balancer Policy: %s", err) + } + + setLoadBalancerOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ + LoadBalancerName: aws.String(d.Get("load_balancer").(string)), + LoadBalancerPort: aws.Int64(int64(d.Get("lb_port").(int))), + PolicyNames: []*string{aws.String(d.Get("name").(string))}, + } + + log.Printf("[DEBUG] SSL Negotiation create configuration: %#v", setLoadBalancerOpts) + if _, err := elbconn.SetLoadBalancerPoliciesOfListener(setLoadBalancerOpts); err != nil { + return fmt.Errorf("Error setting SSLNegotiationPolicy: %s", err) + } + + d.SetId(fmt.Sprintf("%s:%d:%s", + *lbspOpts.LoadBalancerName, + *setLoadBalancerOpts.LoadBalancerPort, + *lbspOpts.PolicyName)) + return nil +} + +func resourceAwsLBSSLNegotiationPolicyRead(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + lbName, lbPort, policyName := resourceAwsLBSSLNegotiationPolicyParseId(d.Id()) + + request := &elb.DescribeLoadBalancerPoliciesInput{ + LoadBalancerName: aws.String(lbName), + PolicyNames: []*string{aws.String(policyName)}, + } + + getResp, err := elbconn.DescribeLoadBalancerPolicies(request) + if err != nil { + if ec2err, ok := err.(awserr.Error); ok && ec2err.Code() == "PolicyNotFound" { + // The policy is gone. + d.SetId("") + return nil + } + return fmt.Errorf("Error retrieving policy: %s", err) + } + + if len(getResp.PolicyDescriptions) != 1 { + return fmt.Errorf("Unable to find policy %#v", getResp.PolicyDescriptions) + } + + // We can get away with this because there's only one policy returned + policyDesc := getResp.PolicyDescriptions[0] + attributes := flattenPolicyAttributes(policyDesc.PolicyAttributeDescriptions) + d.Set("attributes", attributes) + + d.Set("name", policyName) + d.Set("load_balancer", lbName) + d.Set("lb_port", lbPort) + + return nil +} + +func resourceAwsLBSSLNegotiationPolicyDelete(d *schema.ResourceData, meta interface{}) error { + elbconn := meta.(*AWSClient).elbconn + + lbName, _, policyName := resourceAwsLBSSLNegotiationPolicyParseId(d.Id()) + + // Perversely, if we Set an empty list of PolicyNames, we detach the + // policies attached to a listener, which is required to delete the + // policy itself. + setLoadBalancerOpts := &elb.SetLoadBalancerPoliciesOfListenerInput{ + LoadBalancerName: aws.String(d.Get("load_balancer").(string)), + LoadBalancerPort: aws.Int64(int64(d.Get("lb_port").(int))), + PolicyNames: []*string{}, + } + + if _, err := elbconn.SetLoadBalancerPoliciesOfListener(setLoadBalancerOpts); err != nil { + return fmt.Errorf("Error removing SSLNegotiationPolicy: %s", err) + } + + request := &elb.DeleteLoadBalancerPolicyInput{ + LoadBalancerName: aws.String(lbName), + PolicyName: aws.String(policyName), + } + + if _, err := elbconn.DeleteLoadBalancerPolicy(request); err != nil { + return fmt.Errorf("Error deleting SSL negotiation policy %s: %s", d.Id(), err) + } + return nil +} + +// resourceAwsLBSSLNegotiationPolicyParseId takes an ID and parses it into +// it's constituent parts. You need three axes (LB name, policy name, and LB +// port) to create or identify an SSL negotiation policy in AWS's API. +func resourceAwsLBSSLNegotiationPolicyParseId(id string) (string, string, string) { + parts := strings.SplitN(id, ":", 3) + return parts[0], parts[1], parts[2] +} diff --git a/builtin/providers/aws/resource_aws_lb_ssl_negotiation_policy_test.go b/builtin/providers/aws/resource_aws_lb_ssl_negotiation_policy_test.go new file mode 100644 index 000000000..58d948468 --- /dev/null +++ b/builtin/providers/aws/resource_aws_lb_ssl_negotiation_policy_test.go @@ -0,0 +1,272 @@ +package aws + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/elb" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAWSLBSSLNegotiationPolicy_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckLBSSLNegotiationPolicyDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccSslNegotiationPolicyConfig( + fmt.Sprintf("tf-acctest-%s", acctest.RandString(10))), + Check: resource.ComposeTestCheckFunc( + testAccCheckLBSSLNegotiationPolicy( + "aws_elb.lb", + "aws_lb_ssl_negotiation_policy.foo", + ), + resource.TestCheckResourceAttr( + "aws_lb_ssl_negotiation_policy.foo", "attribute.#", "7"), + ), + }, + }, + }) +} + +func testAccCheckLBSSLNegotiationPolicyDestroy(s *terraform.State) error { + elbconn := testAccProvider.Meta().(*AWSClient).elbconn + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_elb" && rs.Type != "aws_lb_ssl_negotiation_policy" { + continue + } + + // Check that the ELB is destroyed + if rs.Type == "aws_elb" { + describe, err := elbconn.DescribeLoadBalancers(&elb.DescribeLoadBalancersInput{ + LoadBalancerNames: []*string{aws.String(rs.Primary.ID)}, + }) + + if err == nil { + if len(describe.LoadBalancerDescriptions) != 0 && + *describe.LoadBalancerDescriptions[0].LoadBalancerName == rs.Primary.ID { + return fmt.Errorf("ELB still exists") + } + } + + // Verify the error + providerErr, ok := err.(awserr.Error) + if !ok { + return err + } + + if providerErr.Code() != "LoadBalancerNotFound" { + return fmt.Errorf("Unexpected error: %s", err) + } + } else { + // Check that the SSL Negotiation Policy is destroyed + elbName, _, policyName := resourceAwsLBSSLNegotiationPolicyParseId(rs.Primary.ID) + _, err := elbconn.DescribeLoadBalancerPolicies(&elb.DescribeLoadBalancerPoliciesInput{ + LoadBalancerName: aws.String(elbName), + PolicyNames: []*string{aws.String(policyName)}, + }) + + if err == nil { + return fmt.Errorf("ELB SSL Negotiation Policy still exists") + } + } + } + + return nil +} + +func testAccCheckLBSSLNegotiationPolicy(elbResource string, policyResource string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[elbResource] + if !ok { + return fmt.Errorf("Not found: %s", elbResource) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + policy, ok := s.RootModule().Resources[policyResource] + if !ok { + return fmt.Errorf("Not found: %s", policyResource) + } + + elbconn := testAccProvider.Meta().(*AWSClient).elbconn + + elbName, _, policyName := resourceAwsLBSSLNegotiationPolicyParseId(policy.Primary.ID) + resp, err := elbconn.DescribeLoadBalancerPolicies(&elb.DescribeLoadBalancerPoliciesInput{ + LoadBalancerName: aws.String(elbName), + PolicyNames: []*string{aws.String(policyName)}, + }) + + if err != nil { + fmt.Printf("[ERROR] Problem describing load balancer policy '%s': %s", policyName, err) + return err + } + + if len(resp.PolicyDescriptions) != 1 { + return fmt.Errorf("Unable to find policy %#v", resp.PolicyDescriptions) + } + + attrmap := policyAttributesToMap(&resp.PolicyDescriptions[0].PolicyAttributeDescriptions) + if attrmap["Protocol-TLSv1"] != "false" { + return fmt.Errorf("Policy attribute 'Protocol-TLSv1' was of value %s instead of false!", attrmap["Protocol-TLSv1"]) + } + if attrmap["Protocol-TLSv1.1"] != "false" { + return fmt.Errorf("Policy attribute 'Protocol-TLSv1.1' was of value %s instead of false!", attrmap["Protocol-TLSv1.1"]) + } + if attrmap["Protocol-TLSv1.2"] != "true" { + return fmt.Errorf("Policy attribute 'Protocol-TLSv1.2' was of value %s instead of true!", attrmap["Protocol-TLSv1.2"]) + } + if attrmap["Server-Defined-Cipher-Order"] != "true" { + return fmt.Errorf("Policy attribute 'Server-Defined-Cipher-Order' was of value %s instead of true!", attrmap["Server-Defined-Cipher-Order"]) + } + if attrmap["ECDHE-RSA-AES128-GCM-SHA256"] != "true" { + return fmt.Errorf("Policy attribute 'ECDHE-RSA-AES128-GCM-SHA256' was of value %s instead of true!", attrmap["ECDHE-RSA-AES128-GCM-SHA256"]) + } + if attrmap["AES128-GCM-SHA256"] != "true" { + return fmt.Errorf("Policy attribute 'AES128-GCM-SHA256' was of value %s instead of true!", attrmap["AES128-GCM-SHA256"]) + } + if attrmap["EDH-RSA-DES-CBC3-SHA"] != "false" { + return fmt.Errorf("Policy attribute 'EDH-RSA-DES-CBC3-SHA' was of value %s instead of false!", attrmap["EDH-RSA-DES-CBC3-SHA"]) + } + + return nil + } +} + +func policyAttributesToMap(attributes *[]*elb.PolicyAttributeDescription) map[string]string { + attrmap := make(map[string]string) + + for _, attrdef := range *attributes { + attrmap[*attrdef.AttributeName] = *attrdef.AttributeValue + } + + return attrmap +} + +// Sets the SSL Negotiation policy with attributes. +// The IAM Server Cert config is lifted from +// builtin/providers/aws/resource_aws_iam_server_certificate_test.go +func testAccSslNegotiationPolicyConfig(certName string) string { + return fmt.Sprintf(` +resource "aws_iam_server_certificate" "test_cert" { + name = "%s" + certificate_body = < Date: Mon, 14 Mar 2016 23:25:15 -0500 Subject: [PATCH 0623/1238] Added resource documentation. Updated the Terraform documentation to add a new page, and a link in the sidebar, for the aws_lb_ssl_negotiation_policy resource. --- .../r/lb_ssl_negotiation_policy.html.markdown | 87 +++++++++++++++++++ website/source/layouts/aws.erb | 4 + 2 files changed, 91 insertions(+) create mode 100644 website/source/docs/providers/aws/r/lb_ssl_negotiation_policy.html.markdown diff --git a/website/source/docs/providers/aws/r/lb_ssl_negotiation_policy.html.markdown b/website/source/docs/providers/aws/r/lb_ssl_negotiation_policy.html.markdown new file mode 100644 index 000000000..f0b690ab7 --- /dev/null +++ b/website/source/docs/providers/aws/r/lb_ssl_negotiation_policy.html.markdown @@ -0,0 +1,87 @@ +--- +layout: "aws" +page_title: "AWS: aws_lb_ssl_negotiation_policy" +sidebar_current: "docs-aws-resource-lb-ssl-negotiation-policy" +description: |- + Provides a load balancer SSL negotiation policy, which allows an ELB to control which ciphers and protocols are supported during SSL negotiations between a client and a load balancer. +--- + +# aws\_lb\_ssl\_negotiation\_policy + +Provides a load balancer SSL negotiation policy, which allows an ELB to control the ciphers and protocols that are supported during SSL negotiations between a client and a load balancer. + +## Example Usage + +``` +resource "aws_elb" "lb" { + name = "test-lb" + availability_zones = ["us-east-1a"] + listener { + instance_port = 8000 + instance_protocol = "https" + lb_port = 443 + lb_protocol = "https" + ssl_certificate_id = "arn:aws:iam::123456789012:server-certificate/certName" + } +} + +resource "aws_lb_ssl_negotiation_policy" "foo" { + name = "foo-policy" + load_balancer = "${aws_elb.lb.id}" + lb_port = 443 + attribute { + name = "Protocol-TLSv1" + value = "false" + } + attribute { + name = "Protocol-TLSv1.1" + value = "false" + } + attribute { + name = "Protocol-TLSv1.2" + value = "true" + } + attribute { + name = "Server-Defined-Cipher-Order" + value = "true" + } + attribute { + name = "ECDHE-RSA-AES128-GCM-SHA256" + value = "true" + } + attribute { + name = "AES128-GCM-SHA256" + value = "true" + } + attribute { + name = "EDH-RSA-DES-CBC3-SHA" + value = "false" + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the SSL negotiation policy. +* `load_balancer` - (Required) The load balancer to which the policy + should be attached. +* `lb_port` - (Required) The load balancer port to which the policy + should be applied. This must be an active listener on the load +balancer. +* `attribute` - (At least one Required) An SSL Negotiation policy attribute. Each has two properties: + * `name` - The name of the attribute + * `value` - The value of the attribute + +To set your attributes, please see the [AWS Elastic Load Balancer Developer Guide](http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-security-policy-table.html) for a listing of the supported SSL protocols, SSL options, and SSL ciphers. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The ID of the policy. +* `name` - The name of the stickiness policy. +* `load_balancer` - The load balancer to which the policy is attached. +* `lb_port` - The load balancer port to which the policy is applied. +* `attribute` - The SSL Negotiation policy attributes. diff --git a/website/source/layouts/aws.erb b/website/source/layouts/aws.erb index 6f9fbbe72..6f1f4f183 100644 --- a/website/source/layouts/aws.erb +++ b/website/source/layouts/aws.erb @@ -288,6 +288,10 @@ aws_load_balancer_policy + > + aws_lb_ssl_negotiation_policy + + > aws_placement_group From eb0cd14f416c56a8a2009c30fe47cb9c099546f0 Mon Sep 17 00:00:00 2001 From: "Mosley, Franklin" Date: Tue, 15 Mar 2016 00:27:58 -0500 Subject: [PATCH 0624/1238] Changed `attribute` argument to be optional. Changed the `attribute` argument of the resource to be optional vs. required. --- .../providers/aws/resource_aws_lb_ssl_negotiation_policy.go | 2 +- .../providers/aws/r/lb_ssl_negotiation_policy.html.markdown | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/builtin/providers/aws/resource_aws_lb_ssl_negotiation_policy.go b/builtin/providers/aws/resource_aws_lb_ssl_negotiation_policy.go index cee4a4d44..dd8129de4 100644 --- a/builtin/providers/aws/resource_aws_lb_ssl_negotiation_policy.go +++ b/builtin/providers/aws/resource_aws_lb_ssl_negotiation_policy.go @@ -42,7 +42,7 @@ func resourceAwsLBSSLNegotiationPolicy() *schema.Resource { "attribute": &schema.Schema{ Type: schema.TypeSet, - Required: true, + Optional: true, ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ diff --git a/website/source/docs/providers/aws/r/lb_ssl_negotiation_policy.html.markdown b/website/source/docs/providers/aws/r/lb_ssl_negotiation_policy.html.markdown index f0b690ab7..e45b00a7b 100644 --- a/website/source/docs/providers/aws/r/lb_ssl_negotiation_policy.html.markdown +++ b/website/source/docs/providers/aws/r/lb_ssl_negotiation_policy.html.markdown @@ -70,11 +70,11 @@ The following arguments are supported: * `lb_port` - (Required) The load balancer port to which the policy should be applied. This must be an active listener on the load balancer. -* `attribute` - (At least one Required) An SSL Negotiation policy attribute. Each has two properties: +* `attribute` - (Optional) An SSL Negotiation policy attribute. Each has two properties: * `name` - The name of the attribute * `value` - The value of the attribute -To set your attributes, please see the [AWS Elastic Load Balancer Developer Guide](http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-security-policy-table.html) for a listing of the supported SSL protocols, SSL options, and SSL ciphers. +To set your attributes, please see the [AWS Elastic Load Balancing Developer Guide](http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-security-policy-table.html) for a listing of the supported SSL protocols, SSL options, and SSL ciphers. ## Attributes Reference From 6d7933b41a2c8376106d7615e4ed59cd37bb3d7c Mon Sep 17 00:00:00 2001 From: "Mosley, Franklin" Date: Tue, 15 Mar 2016 02:20:50 -0500 Subject: [PATCH 0625/1238] Added tests for expandPolicyAttributes/flattenPolicyAttributes --- .../resource_aws_lb_ssl_negotiation_policy.go | 6 +- builtin/providers/aws/structure.go | 4 +- builtin/providers/aws/structure_test.go | 104 ++++++++++++++++++ 3 files changed, 111 insertions(+), 3 deletions(-) diff --git a/builtin/providers/aws/resource_aws_lb_ssl_negotiation_policy.go b/builtin/providers/aws/resource_aws_lb_ssl_negotiation_policy.go index dd8129de4..5d0ae7850 100644 --- a/builtin/providers/aws/resource_aws_lb_ssl_negotiation_policy.go +++ b/builtin/providers/aws/resource_aws_lb_ssl_negotiation_policy.go @@ -80,8 +80,12 @@ func resourceAwsLBSSLNegotiationPolicyCreate(d *schema.ResourceData, meta interf // Check for Policy Attributes if v, ok := d.GetOk("attribute"); ok { + var err error // Expand the "attribute" set to aws-sdk-go compat []*elb.PolicyAttribute - lbspOpts.PolicyAttributes = expandPolicyAttributes(v.(*schema.Set).List()) + lbspOpts.PolicyAttributes, err = expandPolicyAttributes(v.(*schema.Set).List()) + if err != nil { + return err + } } log.Printf("[DEBUG] Load Balancer Policy opts: %#v", lbspOpts) diff --git a/builtin/providers/aws/structure.go b/builtin/providers/aws/structure.go index db4e548a0..0c9fb77ce 100644 --- a/builtin/providers/aws/structure.go +++ b/builtin/providers/aws/structure.go @@ -1450,7 +1450,7 @@ func (s setMap) MapList() []map[string]interface{} { // Takes the result of flatmap.Expand for an array of policy attributes and // returns ELB API compatible objects -func expandPolicyAttributes(configured []interface{}) []*elb.PolicyAttribute { +func expandPolicyAttributes(configured []interface{}) ([]*elb.PolicyAttribute, error) { attributes := make([]*elb.PolicyAttribute, 0, len(configured)) // Loop over our configured attributes and create @@ -1467,7 +1467,7 @@ func expandPolicyAttributes(configured []interface{}) []*elb.PolicyAttribute { } - return attributes + return attributes, nil } // Flattens an array of PolicyAttributes into a []interface{} diff --git a/builtin/providers/aws/structure_test.go b/builtin/providers/aws/structure_test.go index d83e458a4..f80c4bb32 100644 --- a/builtin/providers/aws/structure_test.go +++ b/builtin/providers/aws/structure_test.go @@ -1012,3 +1012,107 @@ func TestFlattenApiGatewayStageKeys(t *testing.T) { } } } + +func TestExpandPolicyAttributes(t *testing.T) { + expanded := []interface{}{ + map[string]interface{}{ + "name": "Protocol-TLSv1", + "value": "false", + }, + map[string]interface{}{ + "name": "Protocol-TLSv1.1", + "value": "false", + }, + map[string]interface{}{ + "name": "Protocol-TLSv1.2", + "value": "true", + }, + } + attributes, err := expandPolicyAttributes(expanded) + if err != nil { + t.Fatalf("bad: %#v", err) + } + + if len(attributes) != 3 { + t.Fatalf("expected number of attributes to be 3, but got %s", len(attributes)) + } + + expected := &elb.PolicyAttribute{ + AttributeName: aws.String("Protocol-TLSv1.2"), + AttributeValue: aws.String("true"), + } + + if !reflect.DeepEqual(attributes[2], expected) { + t.Fatalf( + "Got:\n\n%#v\n\nExpected:\n\n%#v\n", + attributes[2], + expected) + } +} + +func TestExpandPolicyAttributes_invalid(t *testing.T) { + expanded := []interface{}{ + map[string]interface{}{ + "name": "Protocol-TLSv1.2", + "value": "true", + }, + } + attributes, err := expandPolicyAttributes(expanded) + if err != nil { + t.Fatalf("bad: %#v", err) + } + + expected := &elb.PolicyAttribute{ + AttributeName: aws.String("Protocol-TLSv1.2"), + AttributeValue: aws.String("false"), + } + + if reflect.DeepEqual(attributes[0], expected) { + t.Fatalf( + "Got:\n\n%#v\n\nExpected:\n\n%#v\n", + attributes[0], + expected) + } +} + +func TestExpandPolicyAttributes_empty(t *testing.T) { + var expanded []interface{} + + attributes, err := expandPolicyAttributes(expanded) + if err != nil { + t.Fatalf("bad: %#v", err) + } + + if len(attributes) != 0 { + t.Fatalf("expected number of attributes to be 0, but got %s", len(attributes)) + } +} + +func TestFlattenPolicyAttributes(t *testing.T) { + cases := []struct { + Input []*elb.PolicyAttributeDescription + Output []interface{} + }{ + { + Input: []*elb.PolicyAttributeDescription{ + &elb.PolicyAttributeDescription{ + AttributeName: aws.String("Protocol-TLSv1.2"), + AttributeValue: aws.String("true"), + }, + }, + Output: []interface{}{ + map[string]string{ + "name": "Protocol-TLSv1.2", + "value": "true", + }, + }, + }, + } + + for _, tc := range cases { + output := flattenPolicyAttributes(tc.Input) + if !reflect.DeepEqual(output, tc.Output) { + t.Fatalf("Got:\n\n%#v\n\nExpected:\n\n%#v", output, tc.Output) + } + } +} From 4d55b8e9eaad1587fab4805a068658dc192ba3d4 Mon Sep 17 00:00:00 2001 From: "Mosley, Franklin" Date: Tue, 15 Mar 2016 14:49:50 -0500 Subject: [PATCH 0626/1238] Corrected printf verb of wrong type Corrected printf verb by changing it from a string type to an int type. --- builtin/providers/aws/structure_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/builtin/providers/aws/structure_test.go b/builtin/providers/aws/structure_test.go index f80c4bb32..90a66653b 100644 --- a/builtin/providers/aws/structure_test.go +++ b/builtin/providers/aws/structure_test.go @@ -1034,7 +1034,7 @@ func TestExpandPolicyAttributes(t *testing.T) { } if len(attributes) != 3 { - t.Fatalf("expected number of attributes to be 3, but got %s", len(attributes)) + t.Fatalf("expected number of attributes to be 3, but got %d", len(attributes)) } expected := &elb.PolicyAttribute{ @@ -1084,7 +1084,7 @@ func TestExpandPolicyAttributes_empty(t *testing.T) { } if len(attributes) != 0 { - t.Fatalf("expected number of attributes to be 0, but got %s", len(attributes)) + t.Fatalf("expected number of attributes to be 0, but got %d", len(attributes)) } } From 92d75b263c602d9b57789b21ae8be0a75e1a8516 Mon Sep 17 00:00:00 2001 From: Krzysztof Wilczynski Date: Wed, 10 Aug 2016 08:01:17 +0900 Subject: [PATCH 0627/1238] Add ability to set Requests Payer in aws_s3_bucket. (#8065) Any S3 Bucket owner may wish to share data but not incur charges associated with others accessing the data. This commit adds an optional "request_payer" attribute to the aws_s3_bucket resource so that the owner of the S3 bucket can specify who should bear the cost of Amazon S3 data transfer. Signed-off-by: Krzysztof Wilczynski --- .../providers/aws/resource_aws_s3_bucket.go | 62 +++++++++- .../aws/resource_aws_s3_bucket_test.go | 108 ++++++++++++++++++ .../providers/aws/r/s3_bucket.html.markdown | 8 +- 3 files changed, 174 insertions(+), 4 deletions(-) diff --git a/builtin/providers/aws/resource_aws_s3_bucket.go b/builtin/providers/aws/resource_aws_s3_bucket.go index 840b00d09..0978009a9 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket.go +++ b/builtin/providers/aws/resource_aws_s3_bucket.go @@ -286,8 +286,6 @@ func resourceAwsS3Bucket() *schema.Resource { }, }, - "tags": tagsSchema(), - "force_destroy": &schema.Schema{ Type: schema.TypeBool, Optional: true, @@ -300,6 +298,15 @@ func resourceAwsS3Bucket() *schema.Resource { Computed: true, ValidateFunc: validateS3BucketAccelerationStatus, }, + + "request_payer": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateS3BucketRequestPayerType, + }, + + "tags": tagsSchema(), }, } } @@ -408,6 +415,12 @@ func resourceAwsS3BucketUpdate(d *schema.ResourceData, meta interface{}) error { } } + if d.HasChange("request_payer") { + if err := resourceAwsS3BucketRequestPayerUpdate(s3conn, d); err != nil { + return err + } + } + return resourceAwsS3BucketRead(d, meta) } @@ -568,6 +581,20 @@ func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error { d.Set("acceleration_status", accelerate.Status) } + // Read the request payer configuration. + payer, err := s3conn.GetBucketRequestPayment(&s3.GetBucketRequestPaymentInput{ + Bucket: aws.String(d.Id()), + }) + if err != nil { + return err + } + log.Printf("[DEBUG] S3 Bucket: %s, read request payer: %v", d.Id(), payer) + if payer.Payer != nil { + if err := d.Set("request_payer", *payer.Payer); err != nil { + return err + } + } + // Read the logging configuration logging, err := s3conn.GetBucketLogging(&s3.GetBucketLoggingInput{ Bucket: aws.String(d.Id()), @@ -575,6 +602,7 @@ func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error { if err != nil { return err } + log.Printf("[DEBUG] S3 Bucket: %s, logging: %v", d.Id(), logging) if v := logging.LoggingEnabled; v != nil { lcl := make([]map[string]interface{}, 0, 1) @@ -1163,6 +1191,26 @@ func resourceAwsS3BucketAccelerationUpdate(s3conn *s3.S3, d *schema.ResourceData return nil } +func resourceAwsS3BucketRequestPayerUpdate(s3conn *s3.S3, d *schema.ResourceData) error { + bucket := d.Get("bucket").(string) + payer := d.Get("request_payer").(string) + + i := &s3.PutBucketRequestPaymentInput{ + Bucket: aws.String(bucket), + RequestPaymentConfiguration: &s3.RequestPaymentConfiguration{ + Payer: aws.String(payer), + }, + } + log.Printf("[DEBUG] S3 put bucket request payer: %#v", i) + + _, err := s3conn.PutBucketRequestPayment(i) + if err != nil { + return fmt.Errorf("Error putting S3 request payer: %s", err) + } + + return nil +} + func resourceAwsS3BucketLifecycleUpdate(s3conn *s3.S3, d *schema.ResourceData) error { bucket := d.Get("bucket").(string) @@ -1370,6 +1418,16 @@ func validateS3BucketAccelerationStatus(v interface{}, k string) (ws []string, e return } +func validateS3BucketRequestPayerType(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != s3.PayerRequester && value != s3.PayerBucketOwner { + errors = append(errors, fmt.Errorf( + "%q contains an invalid Request Payer type %q. Valid types are either %q or %q", + k, value, s3.PayerRequester, s3.PayerBucketOwner)) + } + return +} + func expirationHash(v interface{}) int { var buf bytes.Buffer m := v.(map[string]interface{}) diff --git a/builtin/providers/aws/resource_aws_s3_bucket_test.go b/builtin/providers/aws/resource_aws_s3_bucket_test.go index 28dd80f12..25f64479a 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket_test.go +++ b/builtin/providers/aws/resource_aws_s3_bucket_test.go @@ -77,6 +77,72 @@ func TestAccAWSS3Bucket_acceleration(t *testing.T) { }) } +func TestAccAWSS3Bucket_RequestPayer(t *testing.T) { + rInt := acctest.RandInt() + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3BucketDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSS3BucketConfigRequestPayerBucketOwner(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExists("aws_s3_bucket.bucket"), + resource.TestCheckResourceAttr( + "aws_s3_bucket.bucket", + "request_payer", + "BucketOwner"), + testAccCheckAWSS3RequestPayer( + "aws_s3_bucket.bucket", + "BucketOwner"), + ), + }, + resource.TestStep{ + Config: testAccAWSS3BucketConfigRequestPayerRequester(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExists("aws_s3_bucket.bucket"), + resource.TestCheckResourceAttr( + "aws_s3_bucket.bucket", + "request_payer", + "Requester"), + testAccCheckAWSS3RequestPayer( + "aws_s3_bucket.bucket", + "Requester"), + ), + }, + }, + }) +} + +func TestResourceAWSS3BucketRequestPayer_validation(t *testing.T) { + _, errors := validateS3BucketRequestPayerType("incorrect", "request_payer") + if len(errors) == 0 { + t.Fatalf("Expected to trigger a validation error") + } + + var testCases = []struct { + Value string + ErrCount int + }{ + { + Value: "Requester", + ErrCount: 0, + }, + { + Value: "BucketOwner", + ErrCount: 0, + }, + } + + for _, tc := range testCases { + _, errors := validateS3BucketRequestPayerType(tc.Value, "request_payer") + if len(errors) != tc.ErrCount { + t.Fatalf("Expected not to trigger a validation error") + } + } +} + func TestAccAWSS3Bucket_Policy(t *testing.T) { rInt := acctest.RandInt() @@ -689,6 +755,28 @@ func testAccCheckAWSS3BucketCors(n string, corsRules []*s3.CORSRule) resource.Te } } +func testAccCheckAWSS3RequestPayer(n, expectedPayer string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, _ := s.RootModule().Resources[n] + conn := testAccProvider.Meta().(*AWSClient).s3conn + + out, err := conn.GetBucketRequestPayment(&s3.GetBucketRequestPaymentInput{ + Bucket: aws.String(rs.Primary.ID), + }) + + if err != nil { + return fmt.Errorf("GetBucketRequestPayment error: %v", err) + } + + if *out.Payer != expectedPayer { + return fmt.Errorf("bad error request payer type, expected: %v, got %v", + expectedPayer, out.Payer) + } + + return nil + } +} + func testAccCheckAWSS3BucketLogging(n, b, p string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, _ := s.RootModule().Resources[n] @@ -844,6 +932,26 @@ resource "aws_s3_bucket" "bucket" { `, randInt) } +func testAccAWSS3BucketConfigRequestPayerBucketOwner(randInt int) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "bucket" { + bucket = "tf-test-bucket-%d" + acl = "public-read" + request_payer = "BucketOwner" +} +`, randInt) +} + +func testAccAWSS3BucketConfigRequestPayerRequester(randInt int) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "bucket" { + bucket = "tf-test-bucket-%d" + acl = "public-read" + request_payer = "Requester" +} +`, randInt) +} + func testAccAWSS3BucketConfigWithPolicy(randInt int) string { return fmt.Sprintf(` resource "aws_s3_bucket" "bucket" { diff --git a/website/source/docs/providers/aws/r/s3_bucket.html.markdown b/website/source/docs/providers/aws/r/s3_bucket.html.markdown index 821a48192..24cf910b3 100644 --- a/website/source/docs/providers/aws/r/s3_bucket.html.markdown +++ b/website/source/docs/providers/aws/r/s3_bucket.html.markdown @@ -173,8 +173,12 @@ The following arguments are supported: * `logging` - (Optional) A settings of [bucket logging](https://docs.aws.amazon.com/AmazonS3/latest/UG/ManagingBucketLogging.html) (documented below). * `lifecycle_rule` - (Optional) A configuration of [object lifecycle management](http://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html) (documented below). * `acceleration_status` - (Optional) Sets the accelerate configuration of an existing bucket. Can be `Enabled` or `Suspended`. +* `request_payer` - (Optional) Specifies who should bear the cost of Amazon S3 data transfer. +Can be either `BucketOwner` or `Requester`. By default, the owner of the S3 bucket would incur +the costs of any data transfer. See [Requester Pays Buckets](http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html) +developer guide for more information. -~> **NOTE:** You cannot use `acceleration_status` in `cn-north-1` or `us-gov-west-1` +~> **NOTE:** You cannot use `acceleration_status` in `cn-north-1` or `us-gov-west-1` The `website` object supports the following: @@ -218,7 +222,7 @@ The `expiration` object supports the following * `date` (Optional) Specifies the date after which you want the corresponding action to take effect. * `days` (Optional) Specifies the number of days after object creation when the specific rule action takes effect. -* `expired_object_delete_marker` (Optional) On a versioned bucket (versioning-enabled or versioning-suspended bucket), you can add this element in the lifecycle configuration to direct Amazon S3 to delete expired object delete markers. +* `expired_object_delete_marker` (Optional) On a versioned bucket (versioning-enabled or versioning-suspended bucket), you can add this element in the lifecycle configuration to direct Amazon S3 to delete expired object delete markers. The `transition` object supports the following From 8925235ef16f3d4ac161ba83ad8c576275138284 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 10 Aug 2016 11:06:01 +1200 Subject: [PATCH 0628/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 50fed8f54..927fdf574 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ IMPROVEMENTS * provider/aws: Add support for Elasticsearch destination to firehose delivery streams [GH-7839] * provider/aws: Retry AttachInternetGateway and increase timeout on `aws_internet_gateway` [GH-7891] * provider/aws: Add support for Enhanced monitoring to `aws_rds_cluster_instance` [GH-8038] + * provider/aws: Add ability to set Requests Payer in `aws_s3_bucket` [GH-8065] * provider/azurerm: Adds support for uploading blobs to azure storage from local source [GH-7994] * provider/google: allows atomic Cloud DNS record changes [GH-6575] * provider/google: Move URLMap hosts to TypeSet from TypeList [GH-7472] From 41c23b2f04acc2c2c1d3b183caf8c64d4b1698e1 Mon Sep 17 00:00:00 2001 From: Chris Marchesi Date: Tue, 9 Aug 2016 17:06:38 -0700 Subject: [PATCH 0629/1238] provider/aws: Various IAM policy normalizations for IAM data source (#6956) * Various string slices are sorted and truncated to strings if they only contain one element. * Sids are now included if they are empty. This is to ensure what is sent to AWS matches what comes back, to prevent recurring diffs even when the policy has changed. --- .../data_source_aws_iam_policy_document.go | 17 ++++-- ...ata_source_aws_iam_policy_document_test.go | 45 +++++---------- builtin/providers/aws/iam_policy_model.go | 55 +++++++++++++------ 3 files changed, 65 insertions(+), 52 deletions(-) diff --git a/builtin/providers/aws/data_source_aws_iam_policy_document.go b/builtin/providers/aws/data_source_aws_iam_policy_document.go index 8d5051f77..5bea111ee 100644 --- a/builtin/providers/aws/data_source_aws_iam_policy_document.go +++ b/builtin/providers/aws/data_source_aws_iam_policy_document.go @@ -150,12 +150,19 @@ func dataSourceAwsIamPolicyDocumentRead(d *schema.ResourceData, meta interface{} return nil } -func dataSourceAwsIamPolicyDocumentReplaceVarsInList(in []string) []string { - out := make([]string, len(in)) - for i, item := range in { - out[i] = dataSourceAwsIamPolicyDocumentVarReplacer.Replace(item) +func dataSourceAwsIamPolicyDocumentReplaceVarsInList(in interface{}) interface{} { + switch v := in.(type) { + case string: + return dataSourceAwsIamPolicyDocumentVarReplacer.Replace(v) + case []string: + out := make([]string, len(v)) + for i, item := range v { + out[i] = dataSourceAwsIamPolicyDocumentVarReplacer.Replace(item) + } + return out + default: + panic("dataSourceAwsIamPolicyDocumentReplaceVarsInList: input not string nor []string") } - return out } func dataSourceAwsIamPolicyDocumentMakeConditions(in []interface{}) IAMPolicyStatementConditionSet { diff --git a/builtin/providers/aws/data_source_aws_iam_policy_document_test.go b/builtin/providers/aws/data_source_aws_iam_policy_document_test.go index 8a2210265..a50a8ae29 100644 --- a/builtin/providers/aws/data_source_aws_iam_policy_document_test.go +++ b/builtin/providers/aws/data_source_aws_iam_policy_document_test.go @@ -75,7 +75,6 @@ data "aws_iam_policy_document" "test" { test = "StringLike" variable = "s3:prefix" values = [ - "", "home/", "home/&{aws:username}/", ] @@ -118,59 +117,45 @@ var testAccAWSIAMPolicyDocumentExpectedJSON = `{ "Sid": "1", "Effect": "Allow", "Action": [ - "s3:GetBucketLocation", - "s3:ListAllMyBuckets" + "s3:ListAllMyBuckets", + "s3:GetBucketLocation" ], - "Resource": [ - "arn:aws:s3:::*" - ] + "Resource": "arn:aws:s3:::*" }, { + "Sid": "", "Effect": "Allow", - "Action": [ - "s3:ListBucket" - ], - "Resource": [ - "arn:aws:s3:::foo" - ], + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::foo", "NotPrincipal": { - "AWS": [ - "arn:blahblah:example" - ] + "AWS": "arn:blahblah:example" }, "Condition": { "StringLike": { "s3:prefix": [ - "", - "home/", - "home/${aws:username}/" + "home/${aws:username}/", + "home/" ] } } }, { + "Sid": "", "Effect": "Allow", - "Action": [ - "s3:*" - ], + "Action": "s3:*", "Resource": [ "arn:aws:s3:::foo/home/${aws:username}/*", "arn:aws:s3:::foo/home/${aws:username}" ], "Principal": { - "AWS": [ - "arn:blahblah:example" - ] + "AWS": "arn:blahblah:example" } }, { + "Sid": "", "Effect": "Deny", - "NotAction": [ - "s3:*" - ], - "NotResource": [ - "arn:aws:s3:::*" - ] + "NotAction": "s3:*", + "NotResource": "arn:aws:s3:::*" } ] }` diff --git a/builtin/providers/aws/iam_policy_model.go b/builtin/providers/aws/iam_policy_model.go index 56ffc9d5c..59192fbf1 100644 --- a/builtin/providers/aws/iam_policy_model.go +++ b/builtin/providers/aws/iam_policy_model.go @@ -2,6 +2,7 @@ package aws import ( "encoding/json" + "sort" ) type IAMPolicyDoc struct { @@ -11,12 +12,12 @@ type IAMPolicyDoc struct { } type IAMPolicyStatement struct { - Sid string `json:",omitempty"` + Sid string Effect string `json:",omitempty"` - Actions []string `json:"Action,omitempty"` - NotActions []string `json:"NotAction,omitempty"` - Resources []string `json:"Resource,omitempty"` - NotResources []string `json:"NotResource,omitempty"` + Actions interface{} `json:"Action,omitempty"` + NotActions interface{} `json:"NotAction,omitempty"` + Resources interface{} `json:"Resource,omitempty"` + NotResources interface{} `json:"NotResource,omitempty"` Principals IAMPolicyStatementPrincipalSet `json:"Principal,omitempty"` NotPrincipals IAMPolicyStatementPrincipalSet `json:"NotPrincipal,omitempty"` Conditions IAMPolicyStatementConditionSet `json:"Condition,omitempty"` @@ -24,51 +25,71 @@ type IAMPolicyStatement struct { type IAMPolicyStatementPrincipal struct { Type string - Identifiers []string + Identifiers interface{} } type IAMPolicyStatementCondition struct { Test string Variable string - Values []string + Values interface{} } type IAMPolicyStatementPrincipalSet []IAMPolicyStatementPrincipal type IAMPolicyStatementConditionSet []IAMPolicyStatementCondition func (ps IAMPolicyStatementPrincipalSet) MarshalJSON() ([]byte, error) { - raw := map[string][]string{} + raw := map[string]interface{}{} for _, p := range ps { - if _, ok := raw[p.Type]; !ok { - raw[p.Type] = make([]string, 0, len(p.Identifiers)) + switch i := p.Identifiers.(type) { + case []string: + if _, ok := raw[p.Type]; !ok { + raw[p.Type] = make([]string, 0, len(i)) + } + sort.Sort(sort.Reverse(sort.StringSlice(i))) + raw[p.Type] = append(raw[p.Type].([]string), i...) + case string: + raw[p.Type] = i + default: + panic("Unsupported data type for IAMPolicyStatementPrincipalSet") } - raw[p.Type] = append(raw[p.Type], p.Identifiers...) } return json.Marshal(&raw) } func (cs IAMPolicyStatementConditionSet) MarshalJSON() ([]byte, error) { - raw := map[string]map[string][]string{} + raw := map[string]map[string]interface{}{} for _, c := range cs { if _, ok := raw[c.Test]; !ok { - raw[c.Test] = map[string][]string{} + raw[c.Test] = map[string]interface{}{} } - if _, ok := raw[c.Test][c.Variable]; !ok { - raw[c.Test][c.Variable] = make([]string, 0, len(c.Values)) + switch i := c.Values.(type) { + case []string: + if _, ok := raw[c.Test][c.Variable]; !ok { + raw[c.Test][c.Variable] = make([]string, 0, len(i)) + } + sort.Sort(sort.Reverse(sort.StringSlice(i))) + raw[c.Test][c.Variable] = append(raw[c.Test][c.Variable].([]string), i...) + case string: + raw[c.Test][c.Variable] = i + default: + panic("Unsupported data type for IAMPolicyStatementConditionSet") } - raw[c.Test][c.Variable] = append(raw[c.Test][c.Variable], c.Values...) } return json.Marshal(&raw) } -func iamPolicyDecodeConfigStringList(lI []interface{}) []string { +func iamPolicyDecodeConfigStringList(lI []interface{}) interface{} { + if len(lI) == 1 { + return lI[0].(string) + } ret := make([]string, len(lI)) for i, vI := range lI { ret[i] = vI.(string) } + sort.Sort(sort.Reverse(sort.StringSlice(ret))) return ret } From 25f1c6dc02340c1852770d3fba5108aad48fef3e Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 10 Aug 2016 12:07:46 +1200 Subject: [PATCH 0630/1238] Update CHANGELOG.md --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 927fdf574..5fd893998 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,8 +33,9 @@ BUG FIXES: * provider/aws: Retry association of IAM Role & instance profile [GH-7938] * provider/aws: Fix `aws_s3_bucket` resource `redirect_all_requests_to` action [GH-7883] * provider/aws: Fix issue updating ElasticBeanstalk Environment Settings [GH-7777] - * providers/aws: `aws_rds_cluster` creation timeout bumped to 40 minutes [GH-8052] + * provider/aws: `aws_rds_cluster` creation timeout bumped to 40 minutes [GH-8052] * provider/aws: Fix line ending errors/diffs with IAM Server Certs [GH-8074] + * provider/aws: Fixing IAM data source policy generation to prevent spurious diffs [GH-6956] * provider/google: Use resource specific project when making queries/changes [GH-7029] * provider/google: Fix read for the backend service resource [GH-7476] From 6c2949fdac4f4192840ae588a0148863d289d46c Mon Sep 17 00:00:00 2001 From: Kraig Amador Date: Tue, 9 Aug 2016 17:16:59 -0700 Subject: [PATCH 0631/1238] Added aws_iam_role import. Now that we read the assume_role_policy it highlights all of the tests that change this, so I've fixed a bunch of those while i'm in here. (#7617) --- .../providers/aws/import_aws_iam_role_test.go | 28 +++++++ .../resource_aws_iam_instance_profile_test.go | 8 +- ...resource_aws_iam_policy_attachment_test.go | 75 ++----------------- .../providers/aws/resource_aws_iam_role.go | 9 +++ ...rce_aws_iam_role_policy_attachment_test.go | 36 +-------- .../aws/resource_aws_iam_role_policy_test.go | 30 ++++---- .../aws/resource_aws_iam_role_test.go | 45 ++--------- 7 files changed, 76 insertions(+), 155 deletions(-) create mode 100644 builtin/providers/aws/import_aws_iam_role_test.go diff --git a/builtin/providers/aws/import_aws_iam_role_test.go b/builtin/providers/aws/import_aws_iam_role_test.go new file mode 100644 index 000000000..f46cedd56 --- /dev/null +++ b/builtin/providers/aws/import_aws_iam_role_test.go @@ -0,0 +1,28 @@ +package aws + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAWSRole_importBasic(t *testing.T) { + resourceName := "aws_iam_role.role" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSRoleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSRoleConfig, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/aws/resource_aws_iam_instance_profile_test.go b/builtin/providers/aws/resource_aws_iam_instance_profile_test.go index 93001184b..049ccecae 100644 --- a/builtin/providers/aws/resource_aws_iam_instance_profile_test.go +++ b/builtin/providers/aws/resource_aws_iam_instance_profile_test.go @@ -120,8 +120,8 @@ func testAccCheckAWSInstanceProfileExists(n string, res *iam.GetInstanceProfileO const testAccAwsIamInstanceProfileConfig = ` resource "aws_iam_role" "test" { - name = "test" - assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":[\"ec2.amazonaws.com\"]},\"Action\":[\"sts:AssumeRole\"]}]}" + name = "test" + assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}" } resource "aws_iam_instance_profile" "test" { @@ -132,8 +132,8 @@ resource "aws_iam_instance_profile" "test" { const testAccAWSInstanceProfilePrefixNameConfig = ` resource "aws_iam_role" "test" { - name = "test" - assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":[\"ec2.amazonaws.com\"]},\"Action\":[\"sts:AssumeRole\"]}]}" + name = "test" + assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}" } resource "aws_iam_instance_profile" "test" { diff --git a/builtin/providers/aws/resource_aws_iam_policy_attachment_test.go b/builtin/providers/aws/resource_aws_iam_policy_attachment_test.go index 11e50b0d9..446f38ef6 100644 --- a/builtin/providers/aws/resource_aws_iam_policy_attachment_test.go +++ b/builtin/providers/aws/resource_aws_iam_policy_attachment_test.go @@ -113,22 +113,8 @@ resource "aws_iam_user" "user" { name = "test-user" } resource "aws_iam_role" "role" { - name = "test-role" - assume_role_policy = < Date: Thu, 4 Aug 2016 01:52:21 +0100 Subject: [PATCH 0632/1238] Update AMI ID on documentation --- website/source/intro/getting-started/change.html.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/intro/getting-started/change.html.md b/website/source/intro/getting-started/change.html.md index 0ab6d500f..20fb9ce84 100644 --- a/website/source/intro/getting-started/change.html.md +++ b/website/source/intro/getting-started/change.html.md @@ -33,7 +33,7 @@ resource "aws_instance" "example" { } ``` -~> **Note:** EC2 Classic users please use AMI `ami-2106ed4c` and type `t1.micro` +~> **Note:** EC2 Classic users please use AMI `ami-656be372` and type `t1.micro` We've changed the AMI from being an Ubuntu 14.04 LTS AMI to being an Ubuntu 16.04 LTS AMI. Terraform configurations are meant to be From a82f96f93982050f08ce35986cdf3c82714b5eb4 Mon Sep 17 00:00:00 2001 From: f440 Date: Wed, 10 Aug 2016 10:16:31 +0900 Subject: [PATCH 0633/1238] Fix invalid markdown syntax (#8089) --- website/source/docs/providers/powerdns/r/record.html.markdown | 2 ++ 1 file changed, 2 insertions(+) diff --git a/website/source/docs/providers/powerdns/r/record.html.markdown b/website/source/docs/providers/powerdns/r/record.html.markdown index 8d9502604..6fb4ab292 100644 --- a/website/source/docs/providers/powerdns/r/record.html.markdown +++ b/website/source/docs/providers/powerdns/r/record.html.markdown @@ -15,6 +15,7 @@ Provides a PowerDNS record resource. Note that PowerDNS internally lowercases certain records (e.g. CNAME and AAAA), which can lead to resources being marked for a change in every singe plan. For the v1 API (PowerDNS version 4): + ``` # Add a record to the zone resource "powerdns_record" "foobar" { @@ -27,6 +28,7 @@ resource "powerdns_record" "foobar" { ``` For the legacy API (PowerDNS version 3.4): + ``` # Add a record to the zone resource "powerdns_record" "foobar" { From f5b46b80e7f5c7f4397b909609350e3ae9b9e705 Mon Sep 17 00:00:00 2001 From: Krzysztof Wilczynski Date: Wed, 10 Aug 2016 13:05:39 +0900 Subject: [PATCH 0634/1238] Add ability to set canned ACL in aws_s3_bucket_object. (#8091) An S3 Bucket owner may wish to set a canned ACL (as opposite to explicitly set grantees, etc.) for an object. This commit adds an optional "acl" attribute to the aws_s3_bucket_object resource so that the owner of the S3 bucket can specify an appropriate pre-defined ACL to use when creating an object. Signed-off-by: Krzysztof Wilczynski --- .../aws/resource_aws_s3_bucket_object.go | 46 +++++++ .../aws/resource_aws_s3_bucket_object_test.go | 114 ++++++++++++++++++ .../aws/r/s3_bucket_object.html.markdown | 5 +- 3 files changed, 163 insertions(+), 2 deletions(-) diff --git a/builtin/providers/aws/resource_aws_s3_bucket_object.go b/builtin/providers/aws/resource_aws_s3_bucket_object.go index c7ae47d75..2df9d5da0 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket_object.go +++ b/builtin/providers/aws/resource_aws_s3_bucket_object.go @@ -6,6 +6,7 @@ import ( "io" "log" "os" + "sort" "strings" "github.com/hashicorp/terraform/helper/schema" @@ -30,6 +31,13 @@ func resourceAwsS3BucketObject() *schema.Resource { ForceNew: true, }, + "acl": &schema.Schema{ + Type: schema.TypeString, + Default: "private", + Optional: true, + ValidateFunc: validateS3BucketObjectAclType, + }, + "cache_control": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -101,6 +109,7 @@ func resourceAwsS3BucketObjectPut(d *schema.ResourceData, meta interface{}) erro bucket := d.Get("bucket").(string) key := d.Get("key").(string) + acl := d.Get("acl").(string) var body io.ReadSeeker if v, ok := d.GetOk("source"); ok { @@ -131,6 +140,7 @@ func resourceAwsS3BucketObjectPut(d *schema.ResourceData, meta interface{}) erro putInput := &s3.PutObjectInput{ Bucket: aws.String(bucket), Key: aws.String(key), + ACL: aws.String(acl), Body: body, } @@ -251,3 +261,39 @@ func resourceAwsS3BucketObjectDelete(d *schema.ResourceData, meta interface{}) e return nil } + +func validateS3BucketObjectAclType(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + cannedAcls := map[string]bool{ + s3.ObjectCannedACLPrivate: true, + s3.ObjectCannedACLPublicRead: true, + s3.ObjectCannedACLPublicReadWrite: true, + s3.ObjectCannedACLAuthenticatedRead: true, + s3.ObjectCannedACLAwsExecRead: true, + s3.ObjectCannedACLBucketOwnerRead: true, + s3.ObjectCannedACLBucketOwnerFullControl: true, + } + + sentenceJoin := func(m map[string]bool) string { + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, fmt.Sprintf("%q", k)) + } + sort.Strings(keys) + + length := len(keys) + words := make([]string, length) + copy(words, keys) + + words[length-1] = fmt.Sprintf("or %s", words[length-1]) + return strings.Join(words, ", ") + } + + if _, ok := cannedAcls[value]; !ok { + errors = append(errors, fmt.Errorf( + "%q contains an invalid canned ACL type %q. Valid types are either %s", + k, value, sentenceJoin(cannedAcls))) + } + return +} diff --git a/builtin/providers/aws/resource_aws_s3_bucket_object_test.go b/builtin/providers/aws/resource_aws_s3_bucket_object_test.go index 63ccf6861..d88b3c99d 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket_object_test.go +++ b/builtin/providers/aws/resource_aws_s3_bucket_object_test.go @@ -4,6 +4,8 @@ import ( "fmt" "io/ioutil" "os" + "reflect" + "sort" "testing" "github.com/hashicorp/terraform/helper/acctest" @@ -265,6 +267,104 @@ func TestAccAWSS3BucketObject_kms(t *testing.T) { }) } +func TestAccAWSS3BucketObject_acl(t *testing.T) { + rInt := acctest.RandInt() + var obj s3.GetObjectOutput + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3BucketObjectDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSS3BucketObjectConfig_acl(rInt, "private"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists( + "aws_s3_bucket_object.object", &obj), + resource.TestCheckResourceAttr( + "aws_s3_bucket_object.object", + "acl", + "private"), + testAccCheckAWSS3BucketObjectAcl( + "aws_s3_bucket_object.object", + []string{"FULL_CONTROL"}), + ), + }, + resource.TestStep{ + Config: testAccAWSS3BucketObjectConfig_acl(rInt, "public-read"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists( + "aws_s3_bucket_object.object", + &obj), + resource.TestCheckResourceAttr( + "aws_s3_bucket_object.object", + "acl", + "public-read"), + testAccCheckAWSS3BucketObjectAcl( + "aws_s3_bucket_object.object", + []string{"FULL_CONTROL", "READ"}), + ), + }, + }, + }) +} + +func TestResourceAWSS3BucketObjectAcl_validation(t *testing.T) { + _, errors := validateS3BucketObjectAclType("incorrect", "acl") + if len(errors) == 0 { + t.Fatalf("Expected to trigger a validation error") + } + + var testCases = []struct { + Value string + ErrCount int + }{ + { + Value: "public-read", + ErrCount: 0, + }, + { + Value: "public-read-write", + ErrCount: 0, + }, + } + + for _, tc := range testCases { + _, errors := validateS3BucketObjectAclType(tc.Value, "acl") + if len(errors) != tc.ErrCount { + t.Fatalf("Expected not to trigger a validation error") + } + } +} + +func testAccCheckAWSS3BucketObjectAcl(n string, expectedPerms []string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, _ := s.RootModule().Resources[n] + s3conn := testAccProvider.Meta().(*AWSClient).s3conn + + out, err := s3conn.GetObjectAcl(&s3.GetObjectAclInput{ + Bucket: aws.String(rs.Primary.Attributes["bucket"]), + Key: aws.String(rs.Primary.Attributes["key"]), + }) + + if err != nil { + return fmt.Errorf("GetObjectAcl error: %v", err) + } + + var perms []string + for _, v := range out.Grants { + perms = append(perms, *v.Permission) + } + sort.Strings(perms) + + if !reflect.DeepEqual(perms, expectedPerms) { + return fmt.Errorf("Expected ACL permissions to be %v, got %v", expectedPerms, perms) + } + + return nil + } +} + func testAccAWSS3BucketObjectConfigSource(randInt int, source string) string { return fmt.Sprintf(` resource "aws_s3_bucket" "object_bucket" { @@ -358,3 +458,17 @@ resource "aws_s3_bucket_object" "object" { } `, randInt) } + +func testAccAWSS3BucketObjectConfig_acl(randInt int, acl string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "object_bucket" { + bucket = "tf-object-test-bucket-%d" +} +resource "aws_s3_bucket_object" "object" { + bucket = "${aws_s3_bucket.object_bucket.bucket}" + key = "test-key" + content = "some_bucket_content" + acl = "%s" +} +`, randInt, acl) +} diff --git a/website/source/docs/providers/aws/r/s3_bucket_object.html.markdown b/website/source/docs/providers/aws/r/s3_bucket_object.html.markdown index c34997c08..fc7f95b53 100644 --- a/website/source/docs/providers/aws/r/s3_bucket_object.html.markdown +++ b/website/source/docs/providers/aws/r/s3_bucket_object.html.markdown @@ -52,14 +52,15 @@ The following arguments are supported: * `key` - (Required) The name of the object once it is in the bucket. * `source` - (Required) The path to the source file being uploaded to the bucket. * `content` - (Required unless `source` given) The literal content being uploaded to the bucket. +* `acl` - (Optional) The [canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Defaults to "private". * `cache_control` - (Optional) Specifies caching behavior along the request/reply chain Read [w3c cache_control](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details. * `content_disposition` - (Optional) Specifies presentational information for the object. Read [wc3 content_disposition](http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1) for further information. * `content_encoding` - (Optional) Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [w3c content encoding](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11) for further information. * `content_language` - (Optional) The language the content is in e.g. en-US or en-GB. * `content_type` - (Optional) A standard MIME type describing the format of the object data, e.g. application/octet-stream. All Valid MIME Types are valid for this input. -* `etag` - (Optional) Used to trigger updates. The only meaningful value is `${md5(file("path/to/file"))}`. +* `etag` - (Optional) Used to trigger updates. The only meaningful value is `${md5(file("path/to/file"))}`. This attribute is not compatible with `kms_key_id` -* `kms_key_id` - (Optional) Specifies the AWS KMS Key ID to use for object encryption. +* `kms_key_id` - (Optional) Specifies the AWS KMS Key ID to use for object encryption. This value is a fully qualified **ARN** of the KMS Key. If using `aws_kms_key`, use the exported `arn` attribute: `kms_key_id = "${aws_kms_key.foo.arn}"` From 6b477e888ac20f2167a5434bc75df6dedb7b6b9e Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Wed, 10 Aug 2016 16:07:32 +1200 Subject: [PATCH 0635/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5fd893998..b2826b55f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ IMPROVEMENTS * provider/aws: Retry AttachInternetGateway and increase timeout on `aws_internet_gateway` [GH-7891] * provider/aws: Add support for Enhanced monitoring to `aws_rds_cluster_instance` [GH-8038] * provider/aws: Add ability to set Requests Payer in `aws_s3_bucket` [GH-8065] + * provider/aws: Add ability to set canned ACL in `aws_s3_bucket_object` [GH-8091] * provider/azurerm: Adds support for uploading blobs to azure storage from local source [GH-7994] * provider/google: allows atomic Cloud DNS record changes [GH-6575] * provider/google: Move URLMap hosts to TypeSet from TypeList [GH-7472] From ba42737e262caf58f428508fc255b75381018e22 Mon Sep 17 00:00:00 2001 From: Giovanni Paolo Gibilisco Date: Wed, 10 Aug 2016 12:35:53 +0200 Subject: [PATCH 0636/1238] add acceptance test for issue #8040 --- ...urce_openstack_compute_instance_v2_test.go | 129 ++++++++++++++++++ 1 file changed, 129 insertions(+) diff --git a/builtin/providers/openstack/resource_openstack_compute_instance_v2_test.go b/builtin/providers/openstack/resource_openstack_compute_instance_v2_test.go index 1cd47bcba..db54e21c4 100644 --- a/builtin/providers/openstack/resource_openstack_compute_instance_v2_test.go +++ b/builtin/providers/openstack/resource_openstack_compute_instance_v2_test.go @@ -178,6 +178,97 @@ func TestAccComputeV2Instance_volumeDetachPostCreation(t *testing.T) { }) } +func TestAccComputeV2Instance_additionalVolumeDetachPostCreation(t *testing.T) { + var instance servers.Server + var volume volumes.Volume + + var testAccComputeV2Instance_volumeDetachPostCreationInstanceAndAdditionalVolume = fmt.Sprintf(` + + resource "openstack_blockstorage_volume_v1" "root_volume" { + name = "root_volume" + size = 1 + image_id = "%s" + } + + resource "openstack_blockstorage_volume_v1" "additional_volume" { + name = "additional_volume" + size = 1 + } + + resource "openstack_compute_instance_v2" "foo" { + name = "terraform-test" + security_groups = ["default"] + + block_device { + uuid = "${openstack_blockstorage_volume_v1.root_volume.id}" + source_type = "volume" + boot_index = 0 + destination_type = "volume" + delete_on_termination = false + } + + volume { + volume_id = "${openstack_blockstorage_volume_v1.additional_volume.id}" + } + }`, + os.Getenv("OS_IMAGE_ID")) + + var testAccComputeV2Instance_volumeDetachPostCreationInstance = fmt.Sprintf(` + + resource "openstack_blockstorage_volume_v1" "root_volume" { + name = "root_volume" + size = 1 + image_id = "%s" + } + + resource "openstack_blockstorage_volume_v1" "additional_volume" { + name = "additional_volume" + size = 1 + } + + resource "openstack_compute_instance_v2" "foo" { + name = "terraform-test" + security_groups = ["default"] + + block_device { + uuid = "${openstack_blockstorage_volume_v1.root_volume.id}" + source_type = "volume" + boot_index = 0 + destination_type = "volume" + delete_on_termination = false + } + }`, + os.Getenv("OS_IMAGE_ID")) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeV2InstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeV2Instance_volumeDetachPostCreationInstanceAndAdditionalVolume, + Check: resource.ComposeTestCheckFunc( + testAccCheckBlockStorageV1VolumeExists(t, "openstack_blockstorage_volume_v1.root_volume", &volume), + testAccCheckBlockStorageV1VolumeExists(t, "openstack_blockstorage_volume_v1.additional_volume", &volume), + testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.foo", &instance), + testAccCheckComputeV2InstanceVolumeAttachment(&instance, &volume), + testAccCheckComputeV2InstanceVolumeAttachment(&instance, &volume), + ), + }, + resource.TestStep{ + Config: testAccComputeV2Instance_volumeDetachPostCreationInstance, + Check: resource.ComposeTestCheckFunc( + testAccCheckBlockStorageV1VolumeExists(t, "openstack_blockstorage_volume_v1.root_volume", &volume), + testAccCheckBlockStorageV1VolumeExists(t, "openstack_blockstorage_volume_v1.additional_volume", &volume), + testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.foo", &instance), + testAccCheckComputeV2InstanceVolumeAttachment(&instance, &volume), + testAccCheckComputeV2InstanceVolumeDetached(&instance, "openstack_blockstorage_volume_v1.additional_volume"), + ), + }, + }, + }) +} + func TestAccComputeV2Instance_floatingIPAttachGlobally(t *testing.T) { var instance servers.Server var fip floatingip.FloatingIP @@ -993,3 +1084,41 @@ func TestAccComputeV2Instance_stop_before_destroy(t *testing.T) { }, }) } + +func testAccCheckComputeV2InstanceVolumeDetached(instance *servers.Server, volume_id string) resource.TestCheckFunc { + return func(s *terraform.State) error { + var attachments []volumeattach.VolumeAttachment + + rs, ok := s.RootModule().Resources[volume_id] + if !ok { + return fmt.Errorf("Not found: %s", volume_id) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + computeClient, err := config.computeV2Client(OS_REGION_NAME) + if err != nil { + return err + } + err = volumeattach.List(computeClient, instance.ID).EachPage(func(page pagination.Page) (bool, error) { + actual, err := volumeattach.ExtractVolumeAttachments(page) + if err != nil { + return false, fmt.Errorf("Unable to lookup attachment: %s", err) + } + + attachments = actual + return true, nil + }) + + for _, attachment := range attachments { + if attachment.VolumeID == rs.Primary.ID { + return fmt.Errorf("Volume is still attached.") + } + } + + return nil + } +} From c2bcb5fbe526069e68e4ffb570c70430599ec84d Mon Sep 17 00:00:00 2001 From: Renier Morales Date: Wed, 10 Aug 2016 10:10:34 -0400 Subject: [PATCH 0637/1238] Skip IAM/STS validation and metadata check (#7874) * Skip IAM/STS validation and metadata check * Skip IAM/STS identity validation - For environments or other api implementations where there are no IAM/STS endpoints available, this option lets you opt out from that provider initialization step. * Skip metdata api check - For environments in which you know ahead of time there isn't going to be a metadta api endpoint, this option lets you opt out from that check to save time. * Allow iam/sts initialization even if skipping account/cred validation (#7874) * Split out skip of IAM validation into credentials and account id (#7874) --- builtin/providers/aws/auth_helpers.go | 38 +++++++-------- builtin/providers/aws/auth_helpers_test.go | 18 ++++---- builtin/providers/aws/config.go | 45 ++++++++++-------- builtin/providers/aws/provider.go | 54 ++++++++++++++++++---- state/remote/s3.go | 8 +++- 5 files changed, 107 insertions(+), 56 deletions(-) diff --git a/builtin/providers/aws/auth_helpers.go b/builtin/providers/aws/auth_helpers.go index 552a4234f..91671a801 100644 --- a/builtin/providers/aws/auth_helpers.go +++ b/builtin/providers/aws/auth_helpers.go @@ -86,18 +86,18 @@ func parseAccountIdFromArn(arn string) (string, error) { // This function is responsible for reading credentials from the // environment in the case that they're not explicitly specified // in the Terraform configuration. -func GetCredentials(key, secret, token, profile, credsfile string) *awsCredentials.Credentials { +func GetCredentials(c *Config) *awsCredentials.Credentials { // build a chain provider, lazy-evaulated by aws-sdk providers := []awsCredentials.Provider{ &awsCredentials.StaticProvider{Value: awsCredentials.Value{ - AccessKeyID: key, - SecretAccessKey: secret, - SessionToken: token, + AccessKeyID: c.AccessKey, + SecretAccessKey: c.SecretKey, + SessionToken: c.Token, }}, &awsCredentials.EnvProvider{}, &awsCredentials.SharedCredentialsProvider{ - Filename: credsfile, - Profile: profile, + Filename: c.CredsFilename, + Profile: c.Profile, }, } @@ -114,19 +114,21 @@ func GetCredentials(key, secret, token, profile, credsfile string) *awsCredentia // Real AWS should reply to a simple metadata request. // We check it actually does to ensure something else didn't just // happen to be listening on the same IP:Port - metadataClient := ec2metadata.New(session.New(cfg)) - if metadataClient.Available() { - providers = append(providers, &ec2rolecreds.EC2RoleProvider{ - Client: metadataClient, - }) - log.Printf("[INFO] AWS EC2 instance detected via default metadata" + - " API endpoint, EC2RoleProvider added to the auth chain") - } else { - if usedEndpoint == "" { - usedEndpoint = "default location" + if c.SkipMetadataApiCheck == false { + metadataClient := ec2metadata.New(session.New(cfg)) + if metadataClient.Available() { + providers = append(providers, &ec2rolecreds.EC2RoleProvider{ + Client: metadataClient, + }) + log.Printf("[INFO] AWS EC2 instance detected via default metadata" + + " API endpoint, EC2RoleProvider added to the auth chain") + } else { + if usedEndpoint == "" { + usedEndpoint = "default location" + } + log.Printf("[WARN] Ignoring AWS metadata API endpoint at %s "+ + "as it doesn't return any instance-id", usedEndpoint) } - log.Printf("[WARN] Ignoring AWS metadata API endpoint at %s "+ - "as it doesn't return any instance-id", usedEndpoint) } return awsCredentials.NewChainCredentials(providers) diff --git a/builtin/providers/aws/auth_helpers_test.go b/builtin/providers/aws/auth_helpers_test.go index a9de0fcc6..b5e1699a0 100644 --- a/builtin/providers/aws/auth_helpers_test.go +++ b/builtin/providers/aws/auth_helpers_test.go @@ -218,7 +218,7 @@ func TestAWSGetCredentials_shouldError(t *testing.T) { defer resetEnv() cfg := Config{} - c := GetCredentials(cfg.AccessKey, cfg.SecretKey, cfg.Token, cfg.Profile, cfg.CredsFilename) + c := GetCredentials(&cfg) _, err := c.Get() if awsErr, ok := err.(awserr.Error); ok { if awsErr.Code() != "NoCredentialProviders" { @@ -251,7 +251,7 @@ func TestAWSGetCredentials_shouldBeStatic(t *testing.T) { Token: c.Token, } - creds := GetCredentials(cfg.AccessKey, cfg.SecretKey, cfg.Token, cfg.Profile, cfg.CredsFilename) + creds := GetCredentials(&cfg) if creds == nil { t.Fatalf("Expected a static creds provider to be returned") } @@ -286,7 +286,7 @@ func TestAWSGetCredentials_shouldIAM(t *testing.T) { // An empty config, no key supplied cfg := Config{} - creds := GetCredentials(cfg.AccessKey, cfg.SecretKey, cfg.Token, cfg.Profile, cfg.CredsFilename) + creds := GetCredentials(&cfg) if creds == nil { t.Fatalf("Expected a static creds provider to be returned") } @@ -335,7 +335,7 @@ func TestAWSGetCredentials_shouldIgnoreIAM(t *testing.T) { Token: c.Token, } - creds := GetCredentials(cfg.AccessKey, cfg.SecretKey, cfg.Token, cfg.Profile, cfg.CredsFilename) + creds := GetCredentials(&cfg) if creds == nil { t.Fatalf("Expected a static creds provider to be returned") } @@ -362,7 +362,7 @@ func TestAWSGetCredentials_shouldErrorWithInvalidEndpoint(t *testing.T) { ts := invalidAwsEnv(t) defer ts() - creds := GetCredentials("", "", "", "", "") + creds := GetCredentials(&Config{}) v, err := creds.Get() if err == nil { t.Fatal("Expected error returned when getting creds w/ invalid EC2 endpoint") @@ -380,7 +380,7 @@ func TestAWSGetCredentials_shouldIgnoreInvalidEndpoint(t *testing.T) { ts := invalidAwsEnv(t) defer ts() - creds := GetCredentials("accessKey", "secretKey", "", "", "") + creds := GetCredentials(&Config{AccessKey: "accessKey", SecretKey: "secretKey"}) v, err := creds.Get() if err != nil { t.Fatalf("Getting static credentials w/ invalid EC2 endpoint failed: %s", err) @@ -406,7 +406,7 @@ func TestAWSGetCredentials_shouldCatchEC2RoleProvider(t *testing.T) { ts := awsEnv(t) defer ts() - creds := GetCredentials("", "", "", "", "") + creds := GetCredentials(&Config{}) if creds == nil { t.Fatalf("Expected an EC2Role creds provider to be returned") } @@ -452,7 +452,7 @@ func TestAWSGetCredentials_shouldBeShared(t *testing.T) { t.Fatalf("Error resetting env var AWS_SHARED_CREDENTIALS_FILE: %s", err) } - creds := GetCredentials("", "", "", "myprofile", file.Name()) + creds := GetCredentials(&Config{Profile: "myprofile", CredsFilename: file.Name()}) if creds == nil { t.Fatalf("Expected a provider chain to be returned") } @@ -479,7 +479,7 @@ func TestAWSGetCredentials_shouldBeENV(t *testing.T) { defer resetEnv() cfg := Config{} - creds := GetCredentials(cfg.AccessKey, cfg.SecretKey, cfg.Token, cfg.Profile, cfg.CredsFilename) + creds := GetCredentials(&cfg) if creds == nil { t.Fatalf("Expected a static creds provider to be returned") } diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go index 434bdffdd..84a7b5d26 100644 --- a/builtin/providers/aws/config.go +++ b/builtin/providers/aws/config.go @@ -70,12 +70,16 @@ type Config struct { AllowedAccountIds []interface{} ForbiddenAccountIds []interface{} - DynamoDBEndpoint string - KinesisEndpoint string - Ec2Endpoint string - IamEndpoint string - ElbEndpoint string - Insecure bool + DynamoDBEndpoint string + KinesisEndpoint string + Ec2Endpoint string + IamEndpoint string + ElbEndpoint string + S3Endpoint string + Insecure bool + SkipIamCredsValidation bool + SkipIamAccountId bool + SkipMetadataApiCheck bool } type AWSClient struct { @@ -141,7 +145,7 @@ func (c *Config) Client() (interface{}, error) { client.region = c.Region log.Println("[INFO] Building AWS auth structure") - creds := GetCredentials(c.AccessKey, c.SecretKey, c.Token, c.Profile, c.CredsFilename) + creds := GetCredentials(c) // Call Get to check for credential provider. If nothing found, we'll get an // error, and we can present it nicely to the user cp, err := creds.Get() @@ -199,19 +203,24 @@ func (c *Config) Client() (interface{}, error) { client.iamconn = iam.New(awsIamSess) client.stsconn = sts.New(sess) - err = c.ValidateCredentials(client.stsconn) - if err != nil { - errs = append(errs, err) - return nil, &multierror.Error{Errors: errs} - } - accountId, err := GetAccountId(client.iamconn, client.stsconn, cp.ProviderName) - if err == nil { - client.accountid = accountId + if c.SkipIamCredsValidation == false { + err = c.ValidateCredentials(client.stsconn) + if err != nil { + errs = append(errs, err) + return nil, &multierror.Error{Errors: errs} + } } - authErr := c.ValidateAccountId(client.accountid) - if authErr != nil { - errs = append(errs, authErr) + if c.SkipIamAccountId == false { + accountId, err := GetAccountId(client.iamconn, client.stsconn, cp.ProviderName) + if err == nil { + client.accountid = accountId + } + + authErr := c.ValidateAccountId(client.accountid) + if authErr != nil { + errs = append(errs, authErr) + } } client.apigateway = apigateway.New(sess) diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index dab42ba87..af041e44e 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -100,6 +100,7 @@ func Provider() terraform.ResourceProvider { Default: "", Description: descriptions["kinesis_endpoint"], }, + "endpoints": endpointsSchema(), "insecure": &schema.Schema{ @@ -108,6 +109,27 @@ func Provider() terraform.ResourceProvider { Default: false, Description: descriptions["insecure"], }, + + "skip_iam_creds_validation": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: descriptions["skip_iam_creds_validation"], + }, + + "skip_iam_account_id": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: descriptions["skip_iam_account_id"], + }, + + "skip_metadata_api_check": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: descriptions["skip_metadata_api_check"], + }, }, DataSourcesMap: map[string]*schema.Resource{ @@ -332,21 +354,33 @@ func init() { "insecure": "Explicitly allow the provider to perform \"insecure\" SSL requests. If omitted," + "default value is `false`", + + "skip_iam_creds_validation": "Skip the IAM/STS credentials validation. " + + "Used for AWS API implementations that do not use IAM.", + + "skip_iam_account_id": "Skip the request of account id to IAM/STS. " + + "Used for AWS API implementations that do not use IAM.", + + "skip_medatadata_api_check": "Skip the AWS Metadata API check. " + + "Used for AWS API implementations that do not have a metadata api endpoint.", } } func providerConfigure(d *schema.ResourceData) (interface{}, error) { config := Config{ - AccessKey: d.Get("access_key").(string), - SecretKey: d.Get("secret_key").(string), - Profile: d.Get("profile").(string), - CredsFilename: d.Get("shared_credentials_file").(string), - Token: d.Get("token").(string), - Region: d.Get("region").(string), - MaxRetries: d.Get("max_retries").(int), - DynamoDBEndpoint: d.Get("dynamodb_endpoint").(string), - KinesisEndpoint: d.Get("kinesis_endpoint").(string), - Insecure: d.Get("insecure").(bool), + AccessKey: d.Get("access_key").(string), + SecretKey: d.Get("secret_key").(string), + Profile: d.Get("profile").(string), + CredsFilename: d.Get("shared_credentials_file").(string), + Token: d.Get("token").(string), + Region: d.Get("region").(string), + MaxRetries: d.Get("max_retries").(int), + DynamoDBEndpoint: d.Get("dynamodb_endpoint").(string), + KinesisEndpoint: d.Get("kinesis_endpoint").(string), + Insecure: d.Get("insecure").(bool), + SkipIamCredsValidation: d.Get("skip_iam_creds_validation").(bool), + SkipIamAccountId: d.Get("skip_iam_account_id").(bool), + SkipMetadataApiCheck: d.Get("skip_metadata_api_check").(bool), } endpointsSet := d.Get("endpoints").(*schema.Set) diff --git a/state/remote/s3.go b/state/remote/s3.go index 230df5fce..026e50a11 100644 --- a/state/remote/s3.go +++ b/state/remote/s3.go @@ -60,7 +60,13 @@ func s3Factory(conf map[string]string) (Client, error) { kmsKeyID := conf["kms_key_id"] var errs []error - creds := terraformAws.GetCredentials(conf["access_key"], conf["secret_key"], conf["token"], conf["profile"], conf["shared_credentials_file"]) + creds := terraformAws.GetCredentials(&terraformAws.Config{ + AccessKey: conf["access_key"], + SecretKey: conf["secret_key"], + Token: conf["token"], + Profile: conf["profile"], + CredsFilename: conf["shared_credentials_file"], + }) // Call Get to check for credential provider. If nothing found, we'll get an // error, and we can present it nicely to the user _, err := creds.Get() From b98e5f2073cab4cdf6774f63ce966d22357456a4 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 10 Aug 2016 15:12:47 +0100 Subject: [PATCH 0638/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b2826b55f..245ae7c18 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ IMPROVEMENTS * provider/aws: Add support for Enhanced monitoring to `aws_rds_cluster_instance` [GH-8038] * provider/aws: Add ability to set Requests Payer in `aws_s3_bucket` [GH-8065] * provider/aws: Add ability to set canned ACL in `aws_s3_bucket_object` [GH-8091] + * provider/aws: Allow skipping credentials validation, requesting Account ID and/or metadata API check [GH-7874] * provider/azurerm: Adds support for uploading blobs to azure storage from local source [GH-7994] * provider/google: allows atomic Cloud DNS record changes [GH-6575] * provider/google: Move URLMap hosts to TypeSet from TypeList [GH-7472] From 2073e80c667a6d1202fe163390d0509ec6084ec3 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 10 Aug 2016 15:25:16 +0100 Subject: [PATCH 0639/1238] aws/config: Shortened conditions [cleanup] --- builtin/providers/aws/auth_helpers.go | 8 ++++---- builtin/providers/aws/config.go | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/builtin/providers/aws/auth_helpers.go b/builtin/providers/aws/auth_helpers.go index 91671a801..33db566fd 100644 --- a/builtin/providers/aws/auth_helpers.go +++ b/builtin/providers/aws/auth_helpers.go @@ -111,10 +111,10 @@ func GetCredentials(c *Config) *awsCredentials.Credentials { } usedEndpoint := setOptionalEndpoint(cfg) - // Real AWS should reply to a simple metadata request. - // We check it actually does to ensure something else didn't just - // happen to be listening on the same IP:Port - if c.SkipMetadataApiCheck == false { + if !c.SkipMetadataApiCheck { + // Real AWS should reply to a simple metadata request. + // We check it actually does to ensure something else didn't just + // happen to be listening on the same IP:Port metadataClient := ec2metadata.New(session.New(cfg)) if metadataClient.Available() { providers = append(providers, &ec2rolecreds.EC2RoleProvider{ diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go index 84a7b5d26..1a7a05c94 100644 --- a/builtin/providers/aws/config.go +++ b/builtin/providers/aws/config.go @@ -203,7 +203,7 @@ func (c *Config) Client() (interface{}, error) { client.iamconn = iam.New(awsIamSess) client.stsconn = sts.New(sess) - if c.SkipIamCredsValidation == false { + if !c.SkipIamCredsValidation { err = c.ValidateCredentials(client.stsconn) if err != nil { errs = append(errs, err) @@ -211,7 +211,7 @@ func (c *Config) Client() (interface{}, error) { } } - if c.SkipIamAccountId == false { + if !c.SkipIamAccountId { accountId, err := GetAccountId(client.iamconn, client.stsconn, cp.ProviderName) if err == nil { client.accountid = accountId From b9eaa23f600325b0dde779a9cda127069ca8a76d Mon Sep 17 00:00:00 2001 From: Gavin Williams Date: Tue, 9 Aug 2016 16:31:22 +0100 Subject: [PATCH 0640/1238] Add support for updating the External Gateway assigned to a Neutron router. Added a simple acceptance test, but doesn't work. --- ...resource_openstack_networking_router_v2.go | 9 ++++++ ...rce_openstack_networking_router_v2_test.go | 32 +++++++++++++++++++ 2 files changed, 41 insertions(+) diff --git a/builtin/providers/openstack/resource_openstack_networking_router_v2.go b/builtin/providers/openstack/resource_openstack_networking_router_v2.go index c6e4982ae..e79c6f1ca 100644 --- a/builtin/providers/openstack/resource_openstack_networking_router_v2.go +++ b/builtin/providers/openstack/resource_openstack_networking_router_v2.go @@ -213,6 +213,15 @@ func resourceNetworkingRouterV2Update(d *schema.ResourceData, meta interface{}) asu := d.Get("admin_state_up").(bool) updateOpts.AdminStateUp = &asu } + if d.HasChange("external_gateway") { + externalGateway := d.Get("external_gateway").(string) + if externalGateway != "" { + gatewayInfo := routers.GatewayInfo{ + NetworkID: externalGateway, + } + updateOpts.GatewayInfo = &gatewayInfo + } + } log.Printf("[DEBUG] Updating Router %s with options: %+v", d.Id(), updateOpts) diff --git a/builtin/providers/openstack/resource_openstack_networking_router_v2_test.go b/builtin/providers/openstack/resource_openstack_networking_router_v2_test.go index fd0ff0cc7..f6241c34f 100644 --- a/builtin/providers/openstack/resource_openstack_networking_router_v2_test.go +++ b/builtin/providers/openstack/resource_openstack_networking_router_v2_test.go @@ -34,6 +34,30 @@ func TestAccNetworkingV2Router_basic(t *testing.T) { }) } +func TestAccNetworkingV2Router_update_external_gw(t *testing.T) { + var router routers.Router + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNetworkingV2RouterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccNetworkingV2Router_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckNetworkingV2RouterExists(t, "openstack_networking_router_v2.foo", &router), + ), + }, + resource.TestStep{ + Config: testAccNetworkingV2Router_update_external_gw, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("openstack_networking_router_v2.foo", "external_gateway", "d730db50-0e0c-4790-9972-1f6e2b8c4915"), + ), + }, + }, + }) +} + func testAccCheckNetworkingV2RouterDestroy(s *terraform.State) error { config := testAccProvider.Meta().(*Config) networkingClient, err := config.networkingV2Client(OS_REGION_NAME) @@ -100,3 +124,11 @@ var testAccNetworkingV2Router_update = fmt.Sprintf(` admin_state_up = "true" distributed = "false" }`) + +var testAccNetworkingV2Router_update_external_gw = fmt.Sprintf(` + resource "openstack_networking_router_v2" "foo" { + name = "router" + admin_state_up = "true" + distributed = "false" + external_gateway = "d730db50-0e0c-4790-9972-1f6e2b8c4915" + }`) From 1c09918191983971acc48aa57a339daaf84b997d Mon Sep 17 00:00:00 2001 From: Joe Topjian Date: Wed, 10 Aug 2016 03:28:25 +0000 Subject: [PATCH 0641/1238] provider/openstack: Fixing acc test for external gw update --- ...rce_openstack_networking_router_v2_test.go | 31 ++++++++++++------- 1 file changed, 20 insertions(+), 11 deletions(-) diff --git a/builtin/providers/openstack/resource_openstack_networking_router_v2_test.go b/builtin/providers/openstack/resource_openstack_networking_router_v2_test.go index f6241c34f..07e2e6e81 100644 --- a/builtin/providers/openstack/resource_openstack_networking_router_v2_test.go +++ b/builtin/providers/openstack/resource_openstack_networking_router_v2_test.go @@ -2,6 +2,7 @@ package openstack import ( "fmt" + "os" "testing" "github.com/hashicorp/terraform/helper/resource" @@ -36,6 +37,22 @@ func TestAccNetworkingV2Router_basic(t *testing.T) { func TestAccNetworkingV2Router_update_external_gw(t *testing.T) { var router routers.Router + externalGateway := os.Getenv("OS_EXTGW_ID") + + var testAccNetworkingV2Router_update_external_gw_1 = fmt.Sprintf(` + resource "openstack_networking_router_v2" "foo" { + name = "router" + admin_state_up = "true" + distributed = "false" + }`) + + var testAccNetworkingV2Router_update_external_gw_2 = fmt.Sprintf(` + resource "openstack_networking_router_v2" "foo" { + name = "router" + admin_state_up = "true" + distributed = "false" + external_gateway = "%s" + }`, externalGateway) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, @@ -43,15 +60,15 @@ func TestAccNetworkingV2Router_update_external_gw(t *testing.T) { CheckDestroy: testAccCheckNetworkingV2RouterDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccNetworkingV2Router_basic, + Config: testAccNetworkingV2Router_update_external_gw_1, Check: resource.ComposeTestCheckFunc( testAccCheckNetworkingV2RouterExists(t, "openstack_networking_router_v2.foo", &router), ), }, resource.TestStep{ - Config: testAccNetworkingV2Router_update_external_gw, + Config: testAccNetworkingV2Router_update_external_gw_2, Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("openstack_networking_router_v2.foo", "external_gateway", "d730db50-0e0c-4790-9972-1f6e2b8c4915"), + resource.TestCheckResourceAttr("openstack_networking_router_v2.foo", "external_gateway", externalGateway), ), }, }, @@ -124,11 +141,3 @@ var testAccNetworkingV2Router_update = fmt.Sprintf(` admin_state_up = "true" distributed = "false" }`) - -var testAccNetworkingV2Router_update_external_gw = fmt.Sprintf(` - resource "openstack_networking_router_v2" "foo" { - name = "router" - admin_state_up = "true" - distributed = "false" - external_gateway = "d730db50-0e0c-4790-9972-1f6e2b8c4915" - }`) From 5d9fa90005a9f3936f9006c4897801affcdc9a38 Mon Sep 17 00:00:00 2001 From: Joe Topjian Date: Wed, 10 Aug 2016 09:04:40 -0600 Subject: [PATCH 0642/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 245ae7c18..14ed945b7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ IMPROVEMENTS * provider/google: allows atomic Cloud DNS record changes [GH-6575] * provider/google: Move URLMap hosts to TypeSet from TypeList [GH-7472] * provider/google: Support static private IP addresses in `resource_compute_instance` [GH-6310] + * provider/openstack: Support pdating the External Gateway assigned to a Neutron router [GH-8070] * provider/vsphere: Improved SCSI controller handling in `vsphere_virtual_machine` [GH-7908] * provider/vsphere: Adding disk type of `Thick Lazy` to `vsphere_virtual_disk` and `vsphere_virtual_machine` [GH-7916] From 2e5791ab2b76ac019681969686ec904eb015155f Mon Sep 17 00:00:00 2001 From: James Bardin Date: Tue, 9 Aug 2016 16:14:40 -0400 Subject: [PATCH 0643/1238] Allow the HCL input when prompted We already accept HCL encoded input for -vars, and this expands that to accept HCL when prompted for a value on the command line as well. --- terraform/context.go | 67 ++++++++++++++++------- terraform/context_input_test.go | 41 ++++++++++++++ terraform/context_test.go | 29 +++++++++- terraform/terraform_test.go | 11 ++++ terraform/test-fixtures/input-hcl/main.tf | 12 ++++ 5 files changed, 138 insertions(+), 22 deletions(-) create mode 100644 terraform/test-fixtures/input-hcl/main.tf diff --git a/terraform/context.go b/terraform/context.go index 262b7ce38..8f30fd554 100644 --- a/terraform/context.go +++ b/terraform/context.go @@ -317,16 +317,18 @@ func (c *Context) Input(mode InputMode) error { } } + var valueType config.VariableType + v := m[n] - switch v.Type() { + switch valueType = v.Type(); valueType { case config.VariableTypeUnknown: continue case config.VariableTypeMap: - continue + // OK case config.VariableTypeList: - continue + // OK case config.VariableTypeString: - // Good! + // OK default: panic(fmt.Sprintf("Unknown variable type: %#v", v.Type())) } @@ -340,6 +342,12 @@ func (c *Context) Input(mode InputMode) error { } } + // this should only happen during tests + if c.uiInput == nil { + log.Println("[WARN] Content.uiInput is nil") + continue + } + // Ask the user for a value for this variable var value string retry := 0 @@ -355,27 +363,33 @@ func (c *Context) Input(mode InputMode) error { "Error asking for %s: %s", n, err) } - if value == "" && v.Required() { - // Redo if it is required, but abort if we keep getting - // blank entries - if retry > 2 { - return fmt.Errorf("missing required value for %q", n) - } - retry++ - continue - } - if value == "" { - // No value, just exit the loop. With no value, we just - // use whatever is currently set in variables. - break + if v.Required() { + // Redo if it is required, but abort if we keep getting + // blank entries + if retry > 2 { + return fmt.Errorf("missing required value for %q", n) + } + retry++ + continue + } } break } - if value != "" { - c.variables[n] = value + // no value provided, so don't set the variable at all + if value == "" { + continue + } + + decoded, err := parseVariableAsHCL(n, value, valueType) + if err != nil { + return err + } + + if decoded != nil { + c.variables[n] = decoded } } } @@ -656,9 +670,20 @@ func (c *Context) walk( // the name of the variable. In order to get around the restriction of HCL requiring a // top level object, we prepend a sentinel key, decode the user-specified value as its // value and pull the value back out of the resulting map. -func parseVariableAsHCL(name string, input interface{}, targetType config.VariableType) (interface{}, error) { +func parseVariableAsHCL(name string, input string, targetType config.VariableType) (interface{}, error) { + // expecting a string so don't decode anything, just strip quotes if targetType == config.VariableTypeString { - return input, nil + return strings.Trim(input, `"`), nil + } + + // return empty types + if strings.TrimSpace(input) == "" { + switch targetType { + case config.VariableTypeList: + return []interface{}{}, nil + case config.VariableTypeMap: + return make(map[string]interface{}), nil + } } const sentinelValue = "SENTINEL_TERRAFORM_VAR_OVERRIDE_KEY" diff --git a/terraform/context_input_test.go b/terraform/context_input_test.go index 13e372469..f5e0f47a2 100644 --- a/terraform/context_input_test.go +++ b/terraform/context_input_test.go @@ -617,3 +617,44 @@ func TestContext2Input_interpolateVar(t *testing.T) { t.Fatalf("err: %s", err) } } + +func TestContext2Input_hcl(t *testing.T) { + input := new(MockUIInput) + m := testModule(t, "input-hcl") + p := testProvider("hcl") + p.ApplyFn = testApplyFn + p.DiffFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Module: m, + Providers: map[string]ResourceProviderFactory{ + "hcl": testProviderFuncFixed(p), + }, + Variables: map[string]interface{}{}, + UIInput: input, + }) + + input.InputReturnMap = map[string]string{ + "var.listed": `["a", "b"]`, + "var.mapped": `{x = "y", w = "z"}`, + } + + if err := ctx.Input(InputModeVar | InputModeVarUnset); err != nil { + t.Fatalf("err: %s", err) + } + + if _, err := ctx.Plan(); err != nil { + t.Fatalf("err: %s", err) + } + + state, err := ctx.Apply() + if err != nil { + t.Fatalf("err: %s", err) + } + + actualStr := strings.TrimSpace(state.String()) + expectedStr := strings.TrimSpace(testTerraformInputHCL) + if actualStr != expectedStr { + t.Logf("expected: \n%s", expectedStr) + t.Fatalf("bad: \n%s", actualStr) + } +} diff --git a/terraform/context_test.go b/terraform/context_test.go index 6d3f1e57f..0d6da97fe 100644 --- a/terraform/context_test.go +++ b/terraform/context_test.go @@ -6,6 +6,8 @@ import ( "strings" "testing" "time" + + "github.com/hashicorp/terraform/flatmap" ) func TestNewContextState(t *testing.T) { @@ -172,7 +174,16 @@ func testDiffFn( if reflect.DeepEqual(v, []interface{}{}) { attrDiff.New = "" } else { - attrDiff.New = v.(string) + if s, ok := v.(string); ok { + attrDiff.New = s + } else { + // the value is something other than a string + // flatmap it, adding the diff for each value. + for k, attrDiff := range testFlatAttrDiffs(k, v) { + diff.Attributes[k] = attrDiff + } + continue + } } if k == "require_new" { @@ -219,6 +230,22 @@ func testDiffFn( return diff, nil } +// generate ResourceAttrDiffs for nested data structures in tests +func testFlatAttrDiffs(k string, i interface{}) map[string]*ResourceAttrDiff { + flat := flatmap.Flatten(map[string]interface{}{k: i}) + diffs := make(map[string]*ResourceAttrDiff) + + for k, v := range flat { + attrDiff := &ResourceAttrDiff{ + Old: "", + New: v, + } + diffs[k] = attrDiff + } + + return diffs +} + func testProvider(prefix string) *MockResourceProvider { p := new(MockResourceProvider) p.RefreshFn = func(info *InstanceInfo, s *InstanceState) (*InstanceState, error) { diff --git a/terraform/terraform_test.go b/terraform/terraform_test.go index 7f81bf05a..469d5376d 100644 --- a/terraform/terraform_test.go +++ b/terraform/terraform_test.go @@ -1413,3 +1413,14 @@ module.mod2: STATE: ` + +const testTerraformInputHCL = ` +hcl_instance.hcltest: + ID = foo + bar.w = z + bar.x = y + foo.# = 2 + foo.0 = a + foo.1 = b + type = hcl_instance +` diff --git a/terraform/test-fixtures/input-hcl/main.tf b/terraform/test-fixtures/input-hcl/main.tf new file mode 100644 index 000000000..ca46ee8e9 --- /dev/null +++ b/terraform/test-fixtures/input-hcl/main.tf @@ -0,0 +1,12 @@ +variable "mapped" { + type = "map" +} + +variable "listed" { + type = "list" +} + +resource "hcl_instance" "hcltest" { + foo = "${var.listed}" + bar = "${var.mapped}" +} From d6c8b402011ef2045e8d73e5b2a1bf0ae0e14335 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Wed, 10 Aug 2016 11:37:55 -0400 Subject: [PATCH 0644/1238] unify some of the test code Have all the values in the testDiffFn go through the same code path --- terraform/context_plan_test.go | 1 + terraform/context_test.go | 51 +++++++++++++++++----------------- 2 files changed, 26 insertions(+), 26 deletions(-) diff --git a/terraform/context_plan_test.go b/terraform/context_plan_test.go index 65ba03566..5a5d50ed1 100644 --- a/terraform/context_plan_test.go +++ b/terraform/context_plan_test.go @@ -1223,6 +1223,7 @@ func TestContext2Plan_countZero(t *testing.T) { actual := strings.TrimSpace(plan.String()) expected := strings.TrimSpace(testTerraformPlanCountZeroStr) if actual != expected { + t.Logf("expected:\n%s", expected) t.Fatalf("bad:\n%s", actual) } } diff --git a/terraform/context_test.go b/terraform/context_test.go index 0d6da97fe..3ec4d9892 100644 --- a/terraform/context_test.go +++ b/terraform/context_test.go @@ -2,7 +2,6 @@ package terraform import ( "fmt" - "reflect" "strings" "testing" "time" @@ -167,32 +166,15 @@ func testDiffFn( v = c.Config[k] } - attrDiff := &ResourceAttrDiff{ - Old: "", - } - - if reflect.DeepEqual(v, []interface{}{}) { - attrDiff.New = "" - } else { - if s, ok := v.(string); ok { - attrDiff.New = s - } else { - // the value is something other than a string - // flatmap it, adding the diff for each value. - for k, attrDiff := range testFlatAttrDiffs(k, v) { - diff.Attributes[k] = attrDiff - } - continue + for k, attrDiff := range testFlatAttrDiffs(k, v) { + if k == "require_new" { + attrDiff.RequiresNew = true } + if _, ok := c.Raw["__"+k+"_requires_new"]; ok { + attrDiff.RequiresNew = true + } + diff.Attributes[k] = attrDiff } - - if k == "require_new" { - attrDiff.RequiresNew = true - } - if _, ok := c.Raw["__"+k+"_requires_new"]; ok { - attrDiff.RequiresNew = true - } - diff.Attributes[k] = attrDiff } for _, k := range c.ComputedKeys { @@ -232,8 +214,25 @@ func testDiffFn( // generate ResourceAttrDiffs for nested data structures in tests func testFlatAttrDiffs(k string, i interface{}) map[string]*ResourceAttrDiff { - flat := flatmap.Flatten(map[string]interface{}{k: i}) diffs := make(map[string]*ResourceAttrDiff) + // check for strings and empty containers first + switch t := i.(type) { + case string: + diffs[k] = &ResourceAttrDiff{New: t} + return diffs + case map[string]interface{}: + if len(t) == 0 { + diffs[k] = &ResourceAttrDiff{New: ""} + return diffs + } + case []interface{}: + if len(t) == 0 { + diffs[k] = &ResourceAttrDiff{New: ""} + return diffs + } + } + + flat := flatmap.Flatten(map[string]interface{}{k: i}) for k, v := range flat { attrDiff := &ResourceAttrDiff{ From 0ab3bc4105a70ea34586506778fb86e7b503f0ce Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 10 Aug 2016 16:46:02 +0100 Subject: [PATCH 0645/1238] aws: Change field names + desc according to reality - skip_iam_creds_validation => skip_credentials_validation - skip_iam_account_id => skip_requesting_account_id --- builtin/providers/aws/config.go | 25 +++++++++--------- builtin/providers/aws/provider.go | 42 +++++++++++++++---------------- 2 files changed, 34 insertions(+), 33 deletions(-) diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go index 1a7a05c94..b0afbb65b 100644 --- a/builtin/providers/aws/config.go +++ b/builtin/providers/aws/config.go @@ -70,16 +70,17 @@ type Config struct { AllowedAccountIds []interface{} ForbiddenAccountIds []interface{} - DynamoDBEndpoint string - KinesisEndpoint string - Ec2Endpoint string - IamEndpoint string - ElbEndpoint string - S3Endpoint string - Insecure bool - SkipIamCredsValidation bool - SkipIamAccountId bool - SkipMetadataApiCheck bool + DynamoDBEndpoint string + KinesisEndpoint string + Ec2Endpoint string + IamEndpoint string + ElbEndpoint string + S3Endpoint string + Insecure bool + + SkipCredsValidation bool + SkipRequestingAccountId bool + SkipMetadataApiCheck bool } type AWSClient struct { @@ -203,7 +204,7 @@ func (c *Config) Client() (interface{}, error) { client.iamconn = iam.New(awsIamSess) client.stsconn = sts.New(sess) - if !c.SkipIamCredsValidation { + if !c.SkipCredsValidation { err = c.ValidateCredentials(client.stsconn) if err != nil { errs = append(errs, err) @@ -211,7 +212,7 @@ func (c *Config) Client() (interface{}, error) { } } - if !c.SkipIamAccountId { + if !c.SkipRequestingAccountId { accountId, err := GetAccountId(client.iamconn, client.stsconn, cp.ProviderName) if err == nil { client.accountid = accountId diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index af041e44e..fb1a53ba7 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -110,18 +110,18 @@ func Provider() terraform.ResourceProvider { Description: descriptions["insecure"], }, - "skip_iam_creds_validation": &schema.Schema{ + "skip_credentials_validation": &schema.Schema{ Type: schema.TypeBool, Optional: true, Default: false, - Description: descriptions["skip_iam_creds_validation"], + Description: descriptions["skip_credentials_validation"], }, - "skip_iam_account_id": &schema.Schema{ + "skip_requesting_account_id": &schema.Schema{ Type: schema.TypeBool, Optional: true, Default: false, - Description: descriptions["skip_iam_account_id"], + Description: descriptions["skip_requesting_account_id"], }, "skip_metadata_api_check": &schema.Schema{ @@ -355,11 +355,11 @@ func init() { "insecure": "Explicitly allow the provider to perform \"insecure\" SSL requests. If omitted," + "default value is `false`", - "skip_iam_creds_validation": "Skip the IAM/STS credentials validation. " + - "Used for AWS API implementations that do not use IAM.", + "skip_credentials_validation": "Skip the credentials validation via STS API. " + + "Used for AWS API implementations that do not have STS available/implemented.", - "skip_iam_account_id": "Skip the request of account id to IAM/STS. " + - "Used for AWS API implementations that do not use IAM.", + "skip_requesting_account_id": "Skip requesting the account ID. " + + "Used for AWS API implementations that do not have IAM/STS API and/or metadata API.", "skip_medatadata_api_check": "Skip the AWS Metadata API check. " + "Used for AWS API implementations that do not have a metadata api endpoint.", @@ -368,19 +368,19 @@ func init() { func providerConfigure(d *schema.ResourceData) (interface{}, error) { config := Config{ - AccessKey: d.Get("access_key").(string), - SecretKey: d.Get("secret_key").(string), - Profile: d.Get("profile").(string), - CredsFilename: d.Get("shared_credentials_file").(string), - Token: d.Get("token").(string), - Region: d.Get("region").(string), - MaxRetries: d.Get("max_retries").(int), - DynamoDBEndpoint: d.Get("dynamodb_endpoint").(string), - KinesisEndpoint: d.Get("kinesis_endpoint").(string), - Insecure: d.Get("insecure").(bool), - SkipIamCredsValidation: d.Get("skip_iam_creds_validation").(bool), - SkipIamAccountId: d.Get("skip_iam_account_id").(bool), - SkipMetadataApiCheck: d.Get("skip_metadata_api_check").(bool), + AccessKey: d.Get("access_key").(string), + SecretKey: d.Get("secret_key").(string), + Profile: d.Get("profile").(string), + CredsFilename: d.Get("shared_credentials_file").(string), + Token: d.Get("token").(string), + Region: d.Get("region").(string), + MaxRetries: d.Get("max_retries").(int), + DynamoDBEndpoint: d.Get("dynamodb_endpoint").(string), + KinesisEndpoint: d.Get("kinesis_endpoint").(string), + Insecure: d.Get("insecure").(bool), + SkipCredsValidation: d.Get("skip_credentials_validation").(bool), + SkipRequestingAccountId: d.Get("skip_requesting_account_id").(bool), + SkipMetadataApiCheck: d.Get("skip_metadata_api_check").(bool), } endpointsSet := d.Get("endpoints").(*schema.Set) From 3fc119923e0c6620ecda5c186032362a1ea760b9 Mon Sep 17 00:00:00 2001 From: Clint Date: Wed, 10 Aug 2016 11:18:03 -0500 Subject: [PATCH 0646/1238] Revert "provider/aws: Added the ability to import aws_iam_role's" (#8112) --- .../providers/aws/import_aws_iam_role_test.go | 28 ------- .../resource_aws_iam_instance_profile_test.go | 8 +- ...resource_aws_iam_policy_attachment_test.go | 75 +++++++++++++++++-- .../providers/aws/resource_aws_iam_role.go | 9 --- ...rce_aws_iam_role_policy_attachment_test.go | 36 ++++++++- .../aws/resource_aws_iam_role_policy_test.go | 30 ++++---- .../aws/resource_aws_iam_role_test.go | 45 +++++++++-- 7 files changed, 155 insertions(+), 76 deletions(-) delete mode 100644 builtin/providers/aws/import_aws_iam_role_test.go diff --git a/builtin/providers/aws/import_aws_iam_role_test.go b/builtin/providers/aws/import_aws_iam_role_test.go deleted file mode 100644 index f46cedd56..000000000 --- a/builtin/providers/aws/import_aws_iam_role_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/hashicorp/terraform/helper/resource" -) - -func TestAccAWSRole_importBasic(t *testing.T) { - resourceName := "aws_iam_role.role" - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckAWSRoleDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccAWSRoleConfig, - }, - - resource.TestStep{ - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} diff --git a/builtin/providers/aws/resource_aws_iam_instance_profile_test.go b/builtin/providers/aws/resource_aws_iam_instance_profile_test.go index 049ccecae..93001184b 100644 --- a/builtin/providers/aws/resource_aws_iam_instance_profile_test.go +++ b/builtin/providers/aws/resource_aws_iam_instance_profile_test.go @@ -120,8 +120,8 @@ func testAccCheckAWSInstanceProfileExists(n string, res *iam.GetInstanceProfileO const testAccAwsIamInstanceProfileConfig = ` resource "aws_iam_role" "test" { - name = "test" - assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}" + name = "test" + assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":[\"ec2.amazonaws.com\"]},\"Action\":[\"sts:AssumeRole\"]}]}" } resource "aws_iam_instance_profile" "test" { @@ -132,8 +132,8 @@ resource "aws_iam_instance_profile" "test" { const testAccAWSInstanceProfilePrefixNameConfig = ` resource "aws_iam_role" "test" { - name = "test" - assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}" + name = "test" + assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"Service\":[\"ec2.amazonaws.com\"]},\"Action\":[\"sts:AssumeRole\"]}]}" } resource "aws_iam_instance_profile" "test" { diff --git a/builtin/providers/aws/resource_aws_iam_policy_attachment_test.go b/builtin/providers/aws/resource_aws_iam_policy_attachment_test.go index 446f38ef6..11e50b0d9 100644 --- a/builtin/providers/aws/resource_aws_iam_policy_attachment_test.go +++ b/builtin/providers/aws/resource_aws_iam_policy_attachment_test.go @@ -113,8 +113,22 @@ resource "aws_iam_user" "user" { name = "test-user" } resource "aws_iam_role" "role" { - name = "test-role" - assume_role_policy = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Sid\":\"\",\"Effect\":\"Allow\",\"Principal\":{\"Service\":\"ec2.amazonaws.com\"},\"Action\":\"sts:AssumeRole\"}]}" + name = "test-role" + assume_role_policy = < Date: Wed, 10 Aug 2016 17:29:07 +0100 Subject: [PATCH 0647/1238] aws: Let acc ID validation fail when we have no ID - we could've had ConflictsWith between affected fields, but that would make it fail even if skip_requesting_account_id=false and ConflictsWhen is not a thing (yet) --- builtin/providers/aws/config.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go index b0afbb65b..711362a69 100644 --- a/builtin/providers/aws/config.go +++ b/builtin/providers/aws/config.go @@ -217,11 +217,11 @@ func (c *Config) Client() (interface{}, error) { if err == nil { client.accountid = accountId } + } - authErr := c.ValidateAccountId(client.accountid) - if authErr != nil { - errs = append(errs, authErr) - } + authErr := c.ValidateAccountId(client.accountid) + if authErr != nil { + errs = append(errs, authErr) } client.apigateway = apigateway.New(sess) From d1272808d88ede8e93c58a9cd239e64b65efae4e Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Wed, 10 Aug 2016 17:10:28 +0100 Subject: [PATCH 0648/1238] aws/docs: Document new skip_* fields --- .../docs/providers/aws/index.html.markdown | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/website/source/docs/providers/aws/index.html.markdown b/website/source/docs/providers/aws/index.html.markdown index de975ac51..90fe99584 100644 --- a/website/source/docs/providers/aws/index.html.markdown +++ b/website/source/docs/providers/aws/index.html.markdown @@ -159,6 +159,30 @@ The following arguments are supported in the `provider` block: URL constructed from the `region`. It's typically used to connect to kinesalite. +* `skip_credentials_validation` - (Optional) Skip the credentials validation via STS API. + Useful for AWS API implementations that do not have STS available/implemented. + +* `skip_requesting_account_id` - (Optional) Skip requesting the account ID. + Useful for AWS API implementations that do not have IAM/STS API and/or metadata API. + `true` (enabling this option) prevents you from managing any resource that requires Account ID to construct an ARN, e.g. + - `aws_db_instance` + - `aws_db_option_group` + - `aws_db_parameter_group` + - `aws_db_security_group` + - `aws_db_subnet_group` + - `aws_elasticache_cluster` + - `aws_glacier_vault` + - `aws_rds_cluster` + - `aws_rds_cluster_instance` + - `aws_rds_cluster_parameter_group` + - `aws_redshift_cluster` + +* `skip_metadata_api_check` - (Optional) Skip the AWS Metadata API check. + Useful for AWS API implementations that do not have a metadata API endpoint. + `true` prevents Terraform from authenticating via Metadata API - i.e. you may need to use other auth methods + (static credentials set as ENV vars or config) + + Nested `endpoints` block supports the followings: * `iam` - (Optional) Use this to override the default endpoint From ee6159cd9dbeb6c13a8595ca7135dd09b9cfb5b8 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Wed, 10 Aug 2016 13:33:42 -0400 Subject: [PATCH 0649/1238] update github.com/hashicorp/go-retryablehttp --- .../hashicorp/go-retryablehttp/.gitignore | 3 - .../hashicorp/go-retryablehttp/.travis.yml | 12 --- .../hashicorp/go-retryablehttp/client.go | 93 ++++++++++++++++--- vendor/vendor.json | 4 +- 4 files changed, 84 insertions(+), 28 deletions(-) delete mode 100644 vendor/github.com/hashicorp/go-retryablehttp/.gitignore delete mode 100644 vendor/github.com/hashicorp/go-retryablehttp/.travis.yml diff --git a/vendor/github.com/hashicorp/go-retryablehttp/.gitignore b/vendor/github.com/hashicorp/go-retryablehttp/.gitignore deleted file mode 100644 index caab963a3..000000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -.idea/ -*.iml -*.test diff --git a/vendor/github.com/hashicorp/go-retryablehttp/.travis.yml b/vendor/github.com/hashicorp/go-retryablehttp/.travis.yml deleted file mode 100644 index 49c8bb75d..000000000 --- a/vendor/github.com/hashicorp/go-retryablehttp/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -sudo: false - -language: go - -go: - - 1.5.1 - -branches: - only: - - master - -script: make updatedeps test diff --git a/vendor/github.com/hashicorp/go-retryablehttp/client.go b/vendor/github.com/hashicorp/go-retryablehttp/client.go index db9eada8a..d0ec6b2ab 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/client.go +++ b/vendor/github.com/hashicorp/go-retryablehttp/client.go @@ -38,6 +38,10 @@ var ( // defaultClient is used for performing requests without explicitly making // a new client. It is purposely private to avoid modifications. defaultClient = NewClient() + + // We need to consume response bodies to maintain http connections, but + // limit the size we consume to respReadLimit. + respReadLimit = int64(4096) ) // LenReader is an interface implemented by many in-memory io.Reader's. Used @@ -86,6 +90,23 @@ func NewRequest(method, url string, body io.ReadSeeker) (*Request, error) { // consumers. type RequestLogHook func(*log.Logger, *http.Request, int) +// ResponseLogHook is like RequestLogHook, but allows running a function +// on each HTTP response. This function will be invoked at the end of +// every HTTP request executed, regardless of whether a subsequent retry +// needs to be performed or not. If the response body is read or closed +// from this method, this will affect the response returned from Do(). +type ResponseLogHook func(*log.Logger, *http.Response) + +// CheckRetry specifies a policy for handling retries. It is called +// following each request with the response and error values returned by +// the http.Client. If CheckRetry returns false, the Client stops retrying +// and returns the response to the caller. If CheckRetry returns an error, +// that error value is returned in lieu of the error from the request. The +// Client will close any response body when retrying, but if the retry is +// aborted it is up to the CheckResponse callback to properly close any +// response body before returning. +type CheckRetry func(resp *http.Response, err error) (bool, error) + // Client is used to make HTTP requests. It adds additional functionality // like automatic retries to tolerate minor outages. type Client struct { @@ -99,6 +120,14 @@ type Client struct { // RequestLogHook allows a user-supplied function to be called // before each retry. RequestLogHook RequestLogHook + + // ResponseLogHook allows a user-supplied function to be called + // with the response from each HTTP request executed. + ResponseLogHook ResponseLogHook + + // CheckRetry specifies the policy for handling retries, and is called + // after each request. The default policy is DefaultRetryPolicy. + CheckRetry CheckRetry } // NewClient creates a new Client with default settings. @@ -109,9 +138,27 @@ func NewClient() *Client { RetryWaitMin: defaultRetryWaitMin, RetryWaitMax: defaultRetryWaitMax, RetryMax: defaultRetryMax, + CheckRetry: DefaultRetryPolicy, } } +// DefaultRetryPolicy provides a default callback for Client.CheckRetry, which +// will retry on connection errors and server errors. +func DefaultRetryPolicy(resp *http.Response, err error) (bool, error) { + if err != nil { + return true, err + } + // Check the response code. We retry on 500-range responses to allow + // the server time to recover, as 500's are typically not permanent + // errors and may relate to outages on the server side. This will catch + // invalid response codes as well, like 0 and 999. + if resp.StatusCode == 0 || resp.StatusCode >= 500 { + return true, nil + } + + return false, nil +} + // Do wraps calling an HTTP method with retries. func (c *Client) Do(req *Request) (*http.Response, error) { c.Logger.Printf("[DEBUG] %s %s", req.Method, req.URL) @@ -132,23 +179,36 @@ func (c *Client) Do(req *Request) (*http.Response, error) { // Attempt the request resp, err := c.HTTPClient.Do(req.Request) + + // Check if we should continue with retries. + checkOK, checkErr := c.CheckRetry(resp, err) + if err != nil { c.Logger.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err) - goto RETRY + } else { + // Call this here to maintain the behavior of logging all requests, + // even if CheckRetry signals to stop. + if c.ResponseLogHook != nil { + // Call the response logger function if provided. + c.ResponseLogHook(c.Logger, resp) + } } - code = resp.StatusCode - // Check the response code. We retry on 500-range responses to allow - // the server time to recover, as 500's are typically not permanent - // errors and may relate to outages on the server side. - if code%500 < 100 { - resp.Body.Close() - goto RETRY + // Now decide if we should continue. + if !checkOK { + if checkErr != nil { + err = checkErr + } + return resp, err } - return resp, nil - RETRY: - if i == c.RetryMax { + // We're going to retry, consume any response to reuse the connection. + if err == nil { + c.drainBody(resp.Body) + } + + remain := c.RetryMax - i + if remain == 0 { break } wait := backoff(c.RetryWaitMin, c.RetryWaitMax, i) @@ -156,7 +216,7 @@ func (c *Client) Do(req *Request) (*http.Response, error) { if code > 0 { desc = fmt.Sprintf("%s (status: %d)", desc, code) } - c.Logger.Printf("[DEBUG] %s: retrying in %s", desc, wait) + c.Logger.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain) time.Sleep(wait) } @@ -165,6 +225,15 @@ func (c *Client) Do(req *Request) (*http.Response, error) { req.Method, req.URL, c.RetryMax+1) } +// Try to read the response body so we can reuse this connection. +func (c *Client) drainBody(body io.ReadCloser) { + defer body.Close() + _, err := io.Copy(ioutil.Discard, io.LimitReader(body, respReadLimit)) + if err != nil { + c.Logger.Printf("[ERR] error reading response body: %v", err) + } +} + // Get is a shortcut for doing a GET request without making a new client. func Get(url string) (*http.Response, error) { return defaultClient.Get(url) diff --git a/vendor/vendor.json b/vendor/vendor.json index ee31a419f..f25283096 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -1108,8 +1108,10 @@ "revision": "cccb4a1328abbb89898f3ecf4311a05bddc4de6d" }, { + "checksumSHA1": "GBDE1KDl/7c5hlRPYRZ7+C0WQ0g=", "path": "github.com/hashicorp/go-retryablehttp", - "revision": "5ec125ef739293cb4d57c3456dd92ba9af29ed6e" + "revision": "f4ed9b0fa01a2ac614afe7c897ed2e3d8208f3e8", + "revisionTime": "2016-08-10T17:22:55Z" }, { "path": "github.com/hashicorp/go-rootcerts", From df0c795b39c3c264862d20545809a118e4b94321 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Mon, 8 Aug 2016 13:33:45 -0400 Subject: [PATCH 0650/1238] Don't retry the atlas requests with the wrong cert This probably won't recover, so abort immediately. Requires retryablehttp CheckRetry patch. --- state/remote/atlas.go | 18 ++++++++++++++ state/remote/atlas_test.go | 50 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+) diff --git a/state/remote/atlas.go b/state/remote/atlas.go index 5343c0236..ead0acbcb 100644 --- a/state/remote/atlas.go +++ b/state/remote/atlas.go @@ -4,6 +4,7 @@ import ( "bytes" "crypto/md5" "crypto/tls" + "crypto/x509" "encoding/base64" "fmt" "io" @@ -276,9 +277,26 @@ func (c *AtlasClient) http() (*retryablehttp.Client, error) { return nil, err } rc := retryablehttp.NewClient() + + rc.CheckRetry = func(resp *http.Response, err error) (bool, error) { + if err != nil { + // don't bother retrying if the certs don't match + if err, ok := err.(*url.Error); ok { + if _, ok := err.Err.(x509.UnknownAuthorityError); ok { + return false, nil + } + } + // continue retrying + return true, nil + } + return retryablehttp.DefaultRetryPolicy(resp, err) + } + t := cleanhttp.DefaultTransport() t.TLSClientConfig = tlsConfig rc.HTTPClient.Transport = t + + c.HTTPClient = rc return rc, nil } diff --git a/state/remote/atlas_test.go b/state/remote/atlas_test.go index 9d4f226fe..060d79455 100644 --- a/state/remote/atlas_test.go +++ b/state/remote/atlas_test.go @@ -3,8 +3,11 @@ package remote import ( "bytes" "crypto/md5" + "crypto/tls" + "crypto/x509" "net/http" "net/http/httptest" + "net/url" "os" "testing" "time" @@ -36,6 +39,53 @@ func TestAtlasClient(t *testing.T) { testClient(t, client) } +func TestAtlasClient_noRetryOnBadCerts(t *testing.T) { + acctest.RemoteTestPrecheck(t) + + client, err := atlasFactory(map[string]string{ + "access_token": "NOT_REQUIRED", + "name": "hashicorp/test-remote-state", + }) + if err != nil { + t.Fatalf("bad: %s", err) + } + + ac := client.(*AtlasClient) + // trigger the AtlasClient to build the http client and assign HTTPClient + httpClient, err := ac.http() + if err != nil { + t.Fatal(err) + } + + // remove the CA certs from the client + brokenCfg := &tls.Config{ + RootCAs: new(x509.CertPool), + } + httpClient.HTTPClient.Transport.(*http.Transport).TLSClientConfig = brokenCfg + + // Instrument CheckRetry to make sure we didn't retry + retries := 0 + oldCheck := httpClient.CheckRetry + httpClient.CheckRetry = func(resp *http.Response, err error) (bool, error) { + if retries > 0 { + t.Fatal("retried after certificate error") + } + retries++ + return oldCheck(resp, err) + } + + _, err = client.Get() + if err != nil { + if err, ok := err.(*url.Error); ok { + if _, ok := err.Err.(x509.UnknownAuthorityError); ok { + return + } + } + } + + t.Fatalf("expected x509.UnknownAuthorityError, got %v", err) +} + func TestAtlasClient_ReportedConflictEqualStates(t *testing.T) { fakeAtlas := newFakeAtlas(t, testStateModuleOrderChange) srv := fakeAtlas.Server() From d60cf4d77792a8cbd66722d70bcdc01cf0f0317b Mon Sep 17 00:00:00 2001 From: Aaron Welch Date: Wed, 10 Aug 2016 11:37:17 -0700 Subject: [PATCH 0651/1238] update acceptance tests for packet volume, update packngo api client --- .../packet/resource_packet_project_test.go | 2 +- .../packet/resource_packet_volume_test.go | 51 +++++++++++-------- .../github.com/packethost/packngo/volumes.go | 2 +- 3 files changed, 31 insertions(+), 24 deletions(-) diff --git a/builtin/providers/packet/resource_packet_project_test.go b/builtin/providers/packet/resource_packet_project_test.go index ff1b45f7c..1ba91b1fa 100644 --- a/builtin/providers/packet/resource_packet_project_test.go +++ b/builtin/providers/packet/resource_packet_project_test.go @@ -38,7 +38,7 @@ func testAccCheckPacketProjectDestroy(s *terraform.State) error { continue } if _, _, err := client.Projects.Get(rs.Primary.ID); err == nil { - return fmt.Errorf("Project cstill exists") + return fmt.Errorf("Project still exists") } } diff --git a/builtin/providers/packet/resource_packet_volume_test.go b/builtin/providers/packet/resource_packet_volume_test.go index b2db4e3f0..1cf316a72 100644 --- a/builtin/providers/packet/resource_packet_volume_test.go +++ b/builtin/providers/packet/resource_packet_volume_test.go @@ -2,6 +2,7 @@ package packet import ( "fmt" + "os" "testing" "github.com/hashicorp/terraform/helper/resource" @@ -12,24 +13,26 @@ import ( func TestAccPacketVolume_Basic(t *testing.T) { var volume packngo.Volume + project_id := os.Getenv("PACKET_PROJECT_ID") + facility := os.Getenv("PACKET_FACILITY") + resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, + PreCheck: testAccPacketVolumePreCheck(t), Providers: testAccProviders, CheckDestroy: testAccCheckPacketVolumeDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: testAccCheckPacketVolumeConfig_basic, + Config: fmt.Sprintf(testAccCheckPacketVolumeConfig_basic, project_id, facility), Check: resource.ComposeTestCheckFunc( testAccCheckPacketVolumeExists("packet_volume.foobar", &volume), - testAccCheckPacketVolumeAttributes(&volume), resource.TestCheckResourceAttr( - "packet_volume.foobar", "project_id", "foobar"), + "packet_volume.foobar", "project_id", project_id), resource.TestCheckResourceAttr( - "packet_volume.foobar", "plan", "foobar"), - resource.TestCheckResourceAttr( - "packet_volume.foobar", "facility", "foobar"), + "packet_volume.foobar", "plan", "storage_1"), resource.TestCheckResourceAttr( "packet_volume.foobar", "billing_cycle", "hourly"), + resource.TestCheckResourceAttr( + "packet_volume.foobar", "size", "100"), ), }, }, @@ -44,22 +47,13 @@ func testAccCheckPacketVolumeDestroy(s *terraform.State) error { continue } if _, _, err := client.Volumes.Get(rs.Primary.ID); err == nil { - return fmt.Errorf("Volume cstill exists") + return fmt.Errorf("Volume still exists") } } return nil } -func testAccCheckPacketVolumeAttributes(volume *packngo.Volume) resource.TestCheckFunc { - return func(s *terraform.State) error { - if volume.Name != "foobar" { - return fmt.Errorf("Bad name: %s", volume.Name) - } - return nil - } -} - func testAccCheckPacketVolumeExists(n string, volume *packngo.Volume) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -86,10 +80,23 @@ func testAccCheckPacketVolumeExists(n string, volume *packngo.Volume) resource.T } } -var testAccCheckPacketVolumeConfig_basic = fmt.Sprintf(` +func testAccPacketVolumePreCheck(t *testing.T) func() { + return func() { + testAccPreCheck(t) + if os.Getenv("PACKET_PROJECT_ID") == "" { + t.Fatal("PACKET_PROJECT_ID must be set") + } + if os.Getenv("PACKET_FACILITY") == "" { + t.Fatal("PACKET_FACILITY must be set") + } + } +} + +const testAccCheckPacketVolumeConfig_basic = ` resource "packet_volume" "foobar" { - project_id = "foobar" - plan = "foobar" - facility = "foobar" + plan = "storage_1" billing_cycle = "hourly" -}`) + size = 100 + project_id = "%s" + facility = "%s" +}` diff --git a/vendor/github.com/packethost/packngo/volumes.go b/vendor/github.com/packethost/packngo/volumes.go index 6a7037826..c3c3e561b 100644 --- a/vendor/github.com/packethost/packngo/volumes.go +++ b/vendor/github.com/packethost/packngo/volumes.go @@ -82,7 +82,7 @@ type VolumeServiceOp struct { // Get returns a volume by id func (v *VolumeServiceOp) Get(volumeID string) (*Volume, *Response, error) { - path := fmt.Sprintf("%s/%s", volumeBasePath, volumeID) + path := fmt.Sprintf("%s/%s?include=facility", volumeBasePath, volumeID) req, err := v.client.NewRequest("GET", path, nil) if err != nil { return nil, nil, err From 3430afb8a54ea800cc26da3cb7f901329b516450 Mon Sep 17 00:00:00 2001 From: Aaron Welch Date: Wed, 10 Aug 2016 11:51:09 -0700 Subject: [PATCH 0652/1238] remove IP stuff for now --- .../packet/resource_packet_ip_address.go | 99 --------------- .../packet/resource_packet_ip_address_test.go | 86 ------------- .../packet/resource_packet_ip_reservation.go | 116 ------------------ .../resource_packet_ip_reservation_test.go | 86 ------------- 4 files changed, 387 deletions(-) delete mode 100644 builtin/providers/packet/resource_packet_ip_address.go delete mode 100644 builtin/providers/packet/resource_packet_ip_address_test.go delete mode 100644 builtin/providers/packet/resource_packet_ip_reservation.go delete mode 100644 builtin/providers/packet/resource_packet_ip_reservation_test.go diff --git a/builtin/providers/packet/resource_packet_ip_address.go b/builtin/providers/packet/resource_packet_ip_address.go deleted file mode 100644 index 3bffdf443..000000000 --- a/builtin/providers/packet/resource_packet_ip_address.go +++ /dev/null @@ -1,99 +0,0 @@ -package packet - -import ( - "github.com/hashicorp/terraform/helper/schema" - "github.com/packethost/packngo" -) - -func resourcePacketIPAddress() *schema.Resource { - return &schema.Resource{ - Create: resourcePacketIPAddressCreate, - Read: resourcePacketIPAddressRead, - Delete: resourcePacketIPAddressDelete, - - Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "address": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "created": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "updated": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourcePacketIPAddressCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*packngo.Client) - - createRequest := &packngo.IPAddressAssignRequest{ - Address: d.Get("address").(string), - } - - device_id := "" - if attr, ok := d.GetOk("instance_id"); ok { - device_id = attr.(string) - } - - newIPAddress, _, err := client.Ips.Assign(device_id, createRequest) - if err != nil { - return friendlyError(err) - } - - d.SetId(newIPAddress.ID) - - return resourcePacketIPAddressRead(d, meta) -} - -func resourcePacketIPAddressRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*packngo.Client) - - ip_address, _, err := client.Ips.Get(d.Id()) - if err != nil { - err = friendlyError(err) - - // If the ip_address somehow already destroyed, mark as succesfully gone. - if isNotFound(err) { - d.SetId("") - return nil - } - - return err - } - - d.Set("address", ip_address.Address) - d.Set("gateway", ip_address.Gateway) - d.Set("network", ip_address.Network) - d.Set("family", ip_address.AddressFamily) - d.Set("netmask", ip_address.Netmask) - d.Set("public", ip_address.Public) - d.Set("cidr", ip_address.Cidr) - d.Set("assigned_to", ip_address.AssignedTo) - d.Set("created", ip_address.Created) - d.Set("updated", ip_address.Updated) - - return nil -} - -func resourcePacketIPAddressDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*packngo.Client) - - if _, err := client.Ips.Unassign(d.Id()); err != nil { - return friendlyError(err) - } - - return nil -} diff --git a/builtin/providers/packet/resource_packet_ip_address_test.go b/builtin/providers/packet/resource_packet_ip_address_test.go deleted file mode 100644 index 9944eb107..000000000 --- a/builtin/providers/packet/resource_packet_ip_address_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package packet - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/packethost/packngo" -) - -func TestAccPacketIPAddress_Basic(t *testing.T) { - var ip_address packngo.IPAddress - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPacketIPAddressDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckPacketIPAddressConfig_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckPacketIPAddressExists("packet_ip_address.foobar", &ip_address), - testAccCheckPacketIPAddressAttributes(&ip_address), - resource.TestCheckResourceAttr( - "packet_ip_address.foobar", "address", "foobar"), - ), - }, - }, - }) -} - -func testAccCheckPacketIPAddressDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*packngo.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "packet_ip_address" { - continue - } - if _, _, err := client.Ips.Get(rs.Primary.ID); err == nil { - return fmt.Errorf("IPAddress cstill exists") - } - } - - return nil -} - -func testAccCheckPacketIPAddressAttributes(ip_address *packngo.IPAddress) resource.TestCheckFunc { - return func(s *terraform.State) error { - if ip_address.Address != "foobar" { - return fmt.Errorf("Bad address: %s", ip_address.Address) - } - return nil - } -} - -func testAccCheckPacketIPAddressExists(n string, ip_address *packngo.IPAddress) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No Record ID is set") - } - - client := testAccProvider.Meta().(*packngo.Client) - - foundIPAddress, _, err := client.Ips.Get(rs.Primary.ID) - if err != nil { - return err - } - if foundIPAddress.ID != rs.Primary.ID { - return fmt.Errorf("Record not found: %v - %v", rs.Primary.ID, foundIPAddress) - } - - *ip_address = *foundIPAddress - - return nil - } -} - -var testAccCheckPacketIPAddressConfig_basic = fmt.Sprintf(` -resource "packet_ip_address" "foobar" { - address = "foobar" -}`) diff --git a/builtin/providers/packet/resource_packet_ip_reservation.go b/builtin/providers/packet/resource_packet_ip_reservation.go deleted file mode 100644 index 286df050b..000000000 --- a/builtin/providers/packet/resource_packet_ip_reservation.go +++ /dev/null @@ -1,116 +0,0 @@ -package packet - -import ( - "github.com/hashicorp/terraform/helper/schema" - "github.com/packethost/packngo" -) - -func resourcePacketIPReservation() *schema.Resource { - return &schema.Resource{ - Create: resourcePacketIPReservationCreate, - Read: resourcePacketIPReservationRead, - Delete: resourcePacketIPReservationDelete, - - Schema: map[string]*schema.Schema{ - "id": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "type": &schema.Schema{ - Type: schema.TypeString, - Required: true, - ForceNew: true, - }, - - "quantity": &schema.Schema{ - Type: schema.TypeInt, - Optional: true, - ForceNew: true, - }, - - "comments": &schema.Schema{ - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, - - "created": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - - "updated": &schema.Schema{ - Type: schema.TypeString, - Computed: true, - }, - }, - } -} - -func resourcePacketIPReservationCreate(d *schema.ResourceData, meta interface{}) error { - client := meta.(*packngo.Client) - - createRequest := &packngo.IPReservationRequest{ - Type: d.Get("type").(string), - Quantity: d.Get("quantity").(int), - Comments: d.Get("comments").(string), - } - - project_id := "" - if attr, ok := d.GetOk("project_id"); ok { - project_id = attr.(string) - } - - newIPReservation, _, err := client.IpReservations.RequestMore(project_id, createRequest) - if err != nil { - return friendlyError(err) - } - - d.SetId(newIPReservation.ID) - - return resourcePacketIPReservationRead(d, meta) -} - -func resourcePacketIPReservationRead(d *schema.ResourceData, meta interface{}) error { - client := meta.(*packngo.Client) - - ip_reservation, _, err := client.IpReservations.Get(d.Id()) - if err != nil { - err = friendlyError(err) - - // If the ip_reservation somehow already destroyed, mark as succesfully gone. - if isNotFound(err) { - d.SetId("") - return nil - } - - return err - } - - d.Set("address", ip_reservation.Address) - d.Set("network", ip_reservation.Network) - d.Set("family", ip_reservation.AddressFamily) - d.Set("netmask", ip_reservation.Netmask) - d.Set("public", ip_reservation.Public) - d.Set("cidr", ip_reservation.Cidr) - d.Set("management", ip_reservation.Management) - d.Set("manageable", ip_reservation.Manageable) - d.Set("addon", ip_reservation.Addon) - d.Set("bill", ip_reservation.Bill) - d.Set("assignments", ip_reservation.Assignments) - d.Set("created", ip_reservation.Created) - d.Set("updated", ip_reservation.Updated) - - return nil -} - -func resourcePacketIPReservationDelete(d *schema.ResourceData, meta interface{}) error { - client := meta.(*packngo.Client) - - if _, err := client.IpReservations.Remove(d.Id()); err != nil { - return friendlyError(err) - } - - return nil -} diff --git a/builtin/providers/packet/resource_packet_ip_reservation_test.go b/builtin/providers/packet/resource_packet_ip_reservation_test.go deleted file mode 100644 index 6c2c962fa..000000000 --- a/builtin/providers/packet/resource_packet_ip_reservation_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package packet - -import ( - "fmt" - "testing" - - "github.com/hashicorp/terraform/helper/resource" - "github.com/hashicorp/terraform/terraform" - "github.com/packethost/packngo" -) - -func TestAccPacketIPReservation_Basic(t *testing.T) { - var ip_reservation packngo.IPReservation - - resource.Test(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - Providers: testAccProviders, - CheckDestroy: testAccCheckPacketIPReservationDestroy, - Steps: []resource.TestStep{ - resource.TestStep{ - Config: testAccCheckPacketIPReservationConfig_basic, - Check: resource.ComposeTestCheckFunc( - testAccCheckPacketIPReservationExists("packet_ip_reservation.foobar", &ip_reservation), - testAccCheckPacketIPReservationAttributes(&ip_reservation), - resource.TestCheckResourceAttr( - "packet_ip_reservation.foobar", "type", "foobar"), - ), - }, - }, - }) -} - -func testAccCheckPacketIPReservationDestroy(s *terraform.State) error { - client := testAccProvider.Meta().(*packngo.Client) - - for _, rs := range s.RootModule().Resources { - if rs.Type != "packet_ip_reservation" { - continue - } - if _, _, err := client.IpReservations.Get(rs.Primary.ID); err == nil { - return fmt.Errorf("IPReservation cstill exists") - } - } - - return nil -} - -func testAccCheckPacketIPReservationAttributes(ip_reservation *packngo.IPReservation) resource.TestCheckFunc { - return func(s *terraform.State) error { - if ip_reservation.Address != "foobar" { - return fmt.Errorf("Bad address: %s", ip_reservation.Address) - } - return nil - } -} - -func testAccCheckPacketIPReservationExists(n string, ip_reservation *packngo.IPReservation) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - if rs.Primary.ID == "" { - return fmt.Errorf("No Record ID is set") - } - - client := testAccProvider.Meta().(*packngo.Client) - - foundIPReservation, _, err := client.IpReservations.Get(rs.Primary.ID) - if err != nil { - return err - } - if foundIPReservation.ID != rs.Primary.ID { - return fmt.Errorf("Record not found: %v - %v", rs.Primary.ID, foundIPReservation) - } - - *ip_reservation = *foundIPReservation - - return nil - } -} - -var testAccCheckPacketIPReservationConfig_basic = fmt.Sprintf(` -resource "packet_ip_reservation" "foobar" { - type = "foobar" -}`) From 58103a0df9bbeadc5415a5d78527f7e21b0ba60e Mon Sep 17 00:00:00 2001 From: Aaron Welch Date: Wed, 10 Aug 2016 11:55:49 -0700 Subject: [PATCH 0653/1238] remove ip stuff for now --- builtin/providers/packet/provider.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/builtin/providers/packet/provider.go b/builtin/providers/packet/provider.go index 853e1c247..58515210c 100644 --- a/builtin/providers/packet/provider.go +++ b/builtin/providers/packet/provider.go @@ -22,8 +22,6 @@ func Provider() terraform.ResourceProvider { "packet_ssh_key": resourcePacketSSHKey(), "packet_project": resourcePacketProject(), "packet_volume": resourcePacketVolume(), - "packet_ip_address": resourcePacketIPAddress(), - "packet_ip_reservation": resourcePacketIPReservation(), }, ConfigureFunc: providerConfigure, From c5b15da76dd33f32a93de8ed84ae08e6090e6803 Mon Sep 17 00:00:00 2001 From: Aaron Welch Date: Wed, 10 Aug 2016 12:11:04 -0700 Subject: [PATCH 0654/1238] gofmt! --- builtin/providers/packet/provider.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/builtin/providers/packet/provider.go b/builtin/providers/packet/provider.go index 58515210c..f3a848337 100644 --- a/builtin/providers/packet/provider.go +++ b/builtin/providers/packet/provider.go @@ -18,10 +18,10 @@ func Provider() terraform.ResourceProvider { }, ResourcesMap: map[string]*schema.Resource{ - "packet_device": resourcePacketDevice(), - "packet_ssh_key": resourcePacketSSHKey(), - "packet_project": resourcePacketProject(), - "packet_volume": resourcePacketVolume(), + "packet_device": resourcePacketDevice(), + "packet_ssh_key": resourcePacketSSHKey(), + "packet_project": resourcePacketProject(), + "packet_volume": resourcePacketVolume(), }, ConfigureFunc: providerConfigure, From 8bba3d4e6e1159f9600a5fdae657777226de35f1 Mon Sep 17 00:00:00 2001 From: ldanz Date: Wed, 10 Aug 2016 12:32:18 -0700 Subject: [PATCH 0655/1238] Documentation: explain the role of to_port in a security group rule when protocol is "icmp" (#8093) --- .../docs/providers/aws/r/security_group.html.markdown | 6 +++--- .../docs/providers/aws/r/security_group_rule.html.markdown | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/website/source/docs/providers/aws/r/security_group.html.markdown b/website/source/docs/providers/aws/r/security_group.html.markdown index 15e5a9ed8..aa8fe2f2b 100644 --- a/website/source/docs/providers/aws/r/security_group.html.markdown +++ b/website/source/docs/providers/aws/r/security_group.html.markdown @@ -92,7 +92,7 @@ The `ingress` block supports: EC2-Classic, or Group IDs if using a VPC. * `self` - (Optional) If true, the security group itself will be added as a source to this ingress rule. -* `to_port` - (Required) The end range port. +* `to_port` - (Required) The end range port (or ICMP code if protocol is "icmp"). The `egress` block supports: @@ -105,7 +105,7 @@ The `egress` block supports: EC2-Classic, or Group IDs if using a VPC. * `self` - (Optional) If true, the security group itself will be added as a source to this egress rule. -* `to_port` - (Required) The end range port. +* `to_port` - (Required) The end range port (or ICMP code if protocol is "icmp"). ~> **NOTE on Egress rules:** By default, AWS creates an `ALLOW ALL` egress rule when creating a new Security Group inside of a VPC. When creating a new Security @@ -160,4 +160,4 @@ Security Groups can be imported using the `security group id`, e.g. ``` $ terraform import aws_security_group.elb_sg sg-903004f8 -``` \ No newline at end of file +``` diff --git a/website/source/docs/providers/aws/r/security_group_rule.html.markdown b/website/source/docs/providers/aws/r/security_group_rule.html.markdown index 7ae1aba51..f10aedd1a 100644 --- a/website/source/docs/providers/aws/r/security_group_rule.html.markdown +++ b/website/source/docs/providers/aws/r/security_group_rule.html.markdown @@ -51,7 +51,7 @@ Only valid with `egress`. depending on the `type`. Cannot be specified with `cidr_blocks`. * `self` - (Optional) If true, the security group itself will be added as a source to this ingress rule. -* `to_port` - (Required) The end range port. +* `to_port` - (Required) The end range port (or ICMP code if protocol is "icmp"). ## Usage with prefix list IDs From f55532b018bb1b7d28f086e52672b053cbf3e025 Mon Sep 17 00:00:00 2001 From: Noah Webb Date: Thu, 4 Aug 2016 16:14:43 -0400 Subject: [PATCH 0656/1238] provider/google: Support Import of 'google_compute_autoscaler' --- .../google/import_compute_autoscaler_test.go | 28 +++++++ builtin/providers/google/provider.go | 27 +++++++ .../google/resource_compute_autoscaler.go | 74 +++++++++++++++---- .../resource_compute_autoscaler_test.go | 4 +- 4 files changed, 118 insertions(+), 15 deletions(-) create mode 100644 builtin/providers/google/import_compute_autoscaler_test.go diff --git a/builtin/providers/google/import_compute_autoscaler_test.go b/builtin/providers/google/import_compute_autoscaler_test.go new file mode 100644 index 000000000..4d5792c69 --- /dev/null +++ b/builtin/providers/google/import_compute_autoscaler_test.go @@ -0,0 +1,28 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAutoscaler_importBasic(t *testing.T) { + resourceName := "google_compute_autoscaler.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAutoscalerDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAutoscaler_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/google/provider.go b/builtin/providers/google/provider.go index 89e176979..40b2ebe4f 100644 --- a/builtin/providers/google/provider.go +++ b/builtin/providers/google/provider.go @@ -3,10 +3,13 @@ package google import ( "encoding/json" "fmt" + "strings" "github.com/hashicorp/terraform/helper/pathorcontents" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" ) // Provider returns a terraform.ResourceProvider. @@ -195,3 +198,27 @@ func getProject(d *schema.ResourceData, config *Config) (string, error) { } return res.(string), nil } + +func getZonalResourceFromRegion(getResource func(string) (interface{}, error), region string, compute *compute.Service, project string) (interface{}, error) { + zoneList, err := compute.Zones.List(project).Do() + if err != nil { + return nil, err + } + var resource interface{} + for _, zone := range zoneList.Items { + if strings.Contains(zone.Name, region) { + resource, err = getResource(zone.Name) + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // Resource was not found in this zone + continue + } + return nil, fmt.Errorf("Error reading Resource: %s", err) + } + // Resource was found + return resource, nil + } + } + // Resource does not exist in this region + return nil, nil +} diff --git a/builtin/providers/google/resource_compute_autoscaler.go b/builtin/providers/google/resource_compute_autoscaler.go index 0afb83e38..bbecbe977 100644 --- a/builtin/providers/google/resource_compute_autoscaler.go +++ b/builtin/providers/google/resource_compute_autoscaler.go @@ -3,10 +3,10 @@ package google import ( "fmt" "log" + "strings" "github.com/hashicorp/terraform/helper/schema" "google.golang.org/api/compute/v1" - "google.golang.org/api/googleapi" ) func resourceComputeAutoscaler() *schema.Resource { @@ -15,6 +15,9 @@ func resourceComputeAutoscaler() *schema.Resource { Read: resourceComputeAutoscalerRead, Update: resourceComputeAutoscalerUpdate, Delete: resourceComputeAutoscalerDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ @@ -241,6 +244,40 @@ func resourceComputeAutoscalerCreate(d *schema.ResourceData, meta interface{}) e return resourceComputeAutoscalerRead(d, meta) } +func flattenAutoscalingPolicy(policy *compute.AutoscalingPolicy) []map[string]interface{} { + result := make([]map[string]interface{}, 0, 1) + policyMap := make(map[string]interface{}) + policyMap["max_replicas"] = policy.MaxNumReplicas + policyMap["min_replicas"] = policy.MinNumReplicas + policyMap["cooldown_period"] = policy.CoolDownPeriodSec + if policy.CpuUtilization != nil { + cpuUtils := make([]map[string]interface{}, 0, 1) + cpuUtil := make(map[string]interface{}) + cpuUtil["target"] = policy.CpuUtilization.UtilizationTarget + cpuUtils = append(cpuUtils, cpuUtil) + policyMap["cpu_utilization"] = cpuUtils + } + if policy.LoadBalancingUtilization != nil { + loadBalancingUtils := make([]map[string]interface{}, 0, 1) + loadBalancingUtil := make(map[string]interface{}) + loadBalancingUtil["target"] = policy.LoadBalancingUtilization.UtilizationTarget + loadBalancingUtils = append(loadBalancingUtils, loadBalancingUtil) + policyMap["load_balancing_utilization"] = loadBalancingUtils + } + if policy.CustomMetricUtilizations != nil { + metricUtils := make([]map[string]interface{}, 0, len(policy.CustomMetricUtilizations)) + for _, customMetricUtilization := range policy.CustomMetricUtilizations { + metricUtil := make(map[string]interface{}) + metricUtil["target"] = customMetricUtilization.UtilizationTarget + + metricUtils = append(metricUtils, metricUtil) + } + policyMap["metric"] = metricUtils + } + result = append(result, policyMap) + return result +} + func resourceComputeAutoscalerRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -249,22 +286,33 @@ func resourceComputeAutoscalerRead(d *schema.ResourceData, meta interface{}) err return err } - zone := d.Get("zone").(string) - scaler, err := config.clientCompute.Autoscalers.Get( - project, zone, d.Id()).Do() + region, err := getRegion(d, config) if err != nil { - if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { - // The resource doesn't exist anymore - log.Printf("[WARN] Removing Autoscalar %q because it's gone", d.Get("name").(string)) - d.SetId("") - - return nil - } - - return fmt.Errorf("Error reading Autoscaler: %s", err) + return err + } + var getAutoscaler = func(zone string) (interface{}, error) { + return config.clientCompute.Autoscalers.Get(project, zone, d.Id()).Do() } + resource, err := getZonalResourceFromRegion(getAutoscaler, region, config.clientCompute, project) + if err != nil { + return err + } + if resource == nil { + log.Printf("[WARN] Removing Autoscalar %q because it's gone", d.Get("name").(string)) + d.SetId("") + return nil + } + scaler := resource.(*compute.Autoscaler) + zoneUrl := strings.Split(scaler.Zone, "/") d.Set("self_link", scaler.SelfLink) + d.Set("name", scaler.Name) + d.Set("target", scaler.Target) + d.Set("zone", zoneUrl[len(zoneUrl)-1]) + d.Set("description", scaler.Description) + if scaler.AutoscalingPolicy != nil { + d.Set("autoscaling_policy", flattenAutoscalingPolicy(scaler.AutoscalingPolicy)) + } return nil } diff --git a/builtin/providers/google/resource_compute_autoscaler_test.go b/builtin/providers/google/resource_compute_autoscaler_test.go index c946bb774..00a92592f 100644 --- a/builtin/providers/google/resource_compute_autoscaler_test.go +++ b/builtin/providers/google/resource_compute_autoscaler_test.go @@ -179,7 +179,7 @@ resource "google_compute_autoscaler" "foobar" { target = "${google_compute_instance_group_manager.foobar.self_link}" autoscaling_policy = { max_replicas = 5 - min_replicas = 0 + min_replicas = 1 cooldown_period = 60 cpu_utilization = { target = 0.5 @@ -236,7 +236,7 @@ resource "google_compute_autoscaler" "foobar" { target = "${google_compute_instance_group_manager.foobar.self_link}" autoscaling_policy = { max_replicas = 10 - min_replicas = 0 + min_replicas = 1 cooldown_period = 60 cpu_utilization = { target = 0.5 From 3fb62870272c31dbf7f622ae04512ed725afccfb Mon Sep 17 00:00:00 2001 From: Noah Webb Date: Thu, 4 Aug 2016 13:53:45 -0400 Subject: [PATCH 0657/1238] provider/google: Support Import of 'google_resource_http_health_check' --- .../import_compute_http_health_check_test.go | 28 +++++++++++++++++++ .../resource_compute_http_health_check.go | 9 +++++- 2 files changed, 36 insertions(+), 1 deletion(-) create mode 100644 builtin/providers/google/import_compute_http_health_check_test.go diff --git a/builtin/providers/google/import_compute_http_health_check_test.go b/builtin/providers/google/import_compute_http_health_check_test.go new file mode 100644 index 000000000..027509885 --- /dev/null +++ b/builtin/providers/google/import_compute_http_health_check_test.go @@ -0,0 +1,28 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeHttpHealthCheck_importBasic(t *testing.T) { + resourceName := "google_compute_http_health_check.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeHttpHealthCheckDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeHttpHealthCheck_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/google/resource_compute_http_health_check.go b/builtin/providers/google/resource_compute_http_health_check.go index 70c0146bb..0802db8bc 100644 --- a/builtin/providers/google/resource_compute_http_health_check.go +++ b/builtin/providers/google/resource_compute_http_health_check.go @@ -15,6 +15,9 @@ func resourceComputeHttpHealthCheck() *schema.Resource { Read: resourceComputeHttpHealthCheckRead, Delete: resourceComputeHttpHealthCheckDelete, Update: resourceComputeHttpHealthCheckUpdate, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ @@ -55,6 +58,7 @@ func resourceComputeHttpHealthCheck() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, + Computed: true, }, "request_path": &schema.Schema{ @@ -220,11 +224,14 @@ func resourceComputeHttpHealthCheckRead(d *schema.ResourceData, meta interface{} d.Set("host", hchk.Host) d.Set("request_path", hchk.RequestPath) d.Set("check_interval_sec", hchk.CheckIntervalSec) - d.Set("health_threshold", hchk.HealthyThreshold) + d.Set("healthy_threshold", hchk.HealthyThreshold) d.Set("port", hchk.Port) d.Set("timeout_sec", hchk.TimeoutSec) d.Set("unhealthy_threshold", hchk.UnhealthyThreshold) d.Set("self_link", hchk.SelfLink) + d.Set("name", hchk.Name) + d.Set("description", hchk.Description) + d.Set("project", project) return nil } From 742089f10c3a30ebbf0020edff9053bc857d6a30 Mon Sep 17 00:00:00 2001 From: Kraig Amador Date: Wed, 10 Aug 2016 13:31:36 -0700 Subject: [PATCH 0658/1238] Fixing the certs, test now passes --- ...urce_aws_lb_ssl_negotiation_policy_test.go | 109 ++++++++---------- 1 file changed, 50 insertions(+), 59 deletions(-) diff --git a/builtin/providers/aws/resource_aws_lb_ssl_negotiation_policy_test.go b/builtin/providers/aws/resource_aws_lb_ssl_negotiation_policy_test.go index 58d948468..8244ce093 100644 --- a/builtin/providers/aws/resource_aws_lb_ssl_negotiation_policy_test.go +++ b/builtin/providers/aws/resource_aws_lb_ssl_negotiation_policy_test.go @@ -161,78 +161,69 @@ resource "aws_iam_server_certificate" "test_cert" { name = "%s" certificate_body = < Date: Wed, 10 Aug 2016 16:34:21 -0400 Subject: [PATCH 0659/1238] Collapse nested if with an && --- terraform/context.go | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/terraform/context.go b/terraform/context.go index 8f30fd554..f60d5b570 100644 --- a/terraform/context.go +++ b/terraform/context.go @@ -363,16 +363,14 @@ func (c *Context) Input(mode InputMode) error { "Error asking for %s: %s", n, err) } - if value == "" { - if v.Required() { - // Redo if it is required, but abort if we keep getting - // blank entries - if retry > 2 { - return fmt.Errorf("missing required value for %q", n) - } - retry++ - continue + if value == "" && v.Required() { + // Redo if it is required, but abort if we keep getting + // blank entries + if retry > 2 { + return fmt.Errorf("missing required value for %q", n) } + retry++ + continue } break From c9de6a41738d7df9645cdaaa39307311679b09f4 Mon Sep 17 00:00:00 2001 From: Clint Date: Wed, 10 Aug 2016 15:34:38 -0500 Subject: [PATCH 0660/1238] provider/aws: Change a few policy test docs to use heredoc format, to prevent regressions (#8118) --- .../aws/resource_aws_iam_group_policy_test.go | 11 +++- .../aws/resource_aws_iam_role_policy_test.go | 65 +++++++++++++++++-- 2 files changed, 70 insertions(+), 6 deletions(-) diff --git a/builtin/providers/aws/resource_aws_iam_group_policy_test.go b/builtin/providers/aws/resource_aws_iam_group_policy_test.go index ccf35310b..8ca167b8a 100644 --- a/builtin/providers/aws/resource_aws_iam_group_policy_test.go +++ b/builtin/providers/aws/resource_aws_iam_group_policy_test.go @@ -111,7 +111,16 @@ resource "aws_iam_group" "group" { resource "aws_iam_group_policy" "foo" { name = "foo_policy" group = "${aws_iam_group.group.name}" - policy = "{\"Version\":\"2012-10-17\",\"Statement\":{\"Effect\":\"Allow\",\"Action\":\"*\",\"Resource\":\"*\"}}" + policy = < Date: Thu, 4 Aug 2016 13:12:09 -0400 Subject: [PATCH 0661/1238] provider/google: Support Import of 'google_compute_forwarding_rule' --- .../import_compute_forwarding_rule_test.go | 32 +++++++++++++++++++ .../resource_compute_forwarding_rule.go | 12 ++++++- 2 files changed, 43 insertions(+), 1 deletion(-) create mode 100644 builtin/providers/google/import_compute_forwarding_rule_test.go diff --git a/builtin/providers/google/import_compute_forwarding_rule_test.go b/builtin/providers/google/import_compute_forwarding_rule_test.go new file mode 100644 index 000000000..cc6c0214e --- /dev/null +++ b/builtin/providers/google/import_compute_forwarding_rule_test.go @@ -0,0 +1,32 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeForwardingRule_importBasic(t *testing.T) { + resourceName := "google_compute_forwarding_rule.foobar" + poolName := fmt.Sprintf("tf-%s", acctest.RandString(10)) + ruleName := fmt.Sprintf("tf-%s", acctest.RandString(10)) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeForwardingRuleDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeForwardingRule_basic(poolName, ruleName), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/google/resource_compute_forwarding_rule.go b/builtin/providers/google/resource_compute_forwarding_rule.go index 8f1634c44..194845aa1 100644 --- a/builtin/providers/google/resource_compute_forwarding_rule.go +++ b/builtin/providers/google/resource_compute_forwarding_rule.go @@ -15,6 +15,9 @@ func resourceComputeForwardingRule() *schema.Resource { Read: resourceComputeForwardingRuleRead, Delete: resourceComputeForwardingRuleDelete, Update: resourceComputeForwardingRuleUpdate, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ @@ -59,12 +62,14 @@ func resourceComputeForwardingRule() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, + Computed: true, }, "region": &schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, + Computed: true, }, "self_link": &schema.Schema{ @@ -179,10 +184,15 @@ func resourceComputeForwardingRuleRead(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Error reading ForwardingRule: %s", err) } + d.Set("name", frule.Name) + d.Set("target", frule.Target) + d.Set("description", frule.Description) + d.Set("port_range", frule.PortRange) + d.Set("project", project) + d.Set("region", region) d.Set("ip_address", frule.IPAddress) d.Set("ip_protocol", frule.IPProtocol) d.Set("self_link", frule.SelfLink) - return nil } From ec3e442f26a020d4058aa236df15aff461188932 Mon Sep 17 00:00:00 2001 From: stack72 Date: Thu, 11 Aug 2016 15:15:23 +1200 Subject: [PATCH 0662/1238] provider/aws: change the test to be us-west-2a --- .../aws/resource_aws_lb_ssl_negotiation_policy_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/providers/aws/resource_aws_lb_ssl_negotiation_policy_test.go b/builtin/providers/aws/resource_aws_lb_ssl_negotiation_policy_test.go index 8244ce093..8df23afe0 100644 --- a/builtin/providers/aws/resource_aws_lb_ssl_negotiation_policy_test.go +++ b/builtin/providers/aws/resource_aws_lb_ssl_negotiation_policy_test.go @@ -217,7 +217,7 @@ EOF } resource "aws_elb" "lb" { name = "test-lb" - availability_zones = ["us-east-1a"] + availability_zones = ["us-west-2a"] listener { instance_port = 8000 instance_protocol = "https" From 605dfada51c78c1a701eb13a847cccf49e02d30d Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Thu, 11 Aug 2016 15:20:06 +1200 Subject: [PATCH 0663/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 14ed945b7..d4c1e28b1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,7 @@ FEATURES: * **New Resource:** `aws_load_balancer_policy` [GH-7458] * **New Resource:** `aws_load_balancer_backend_server_policy` [GH-7458] * **New Resource:** `aws_load_balancer_listener_policy` [GH-7458] + * **New Resource:** `aws_lb_ssl_negotiation_policy` [GH-8084] * **New Data Source:** `aws_ip_ranges` [GH-7984] * **New Data Source:** `fastly_ip_ranges` [GH-7984] From d7285d1b14b84570e7aa5c3354517ffad07a105d Mon Sep 17 00:00:00 2001 From: Peter McAtominey Date: Thu, 11 Aug 2016 05:00:23 +0100 Subject: [PATCH 0664/1238] provider/azurerm: fix; add tests for importing traffic manager resources (#8111) * provider/azurerm: add test for importing traffic manager profile resource TF_ACC=1 go test ./builtin/providers/azurerm -v -run TestAccAzureRMTrafficManagerProfile_import -timeout 120m === RUN TestAccAzureRMTrafficManagerProfile_importBasic --- PASS: TestAccAzureRMTrafficManagerProfile_importBasic (84.50s) PASS ok github.com/hashicorp/terraform/builtin/providers/azurerm 84.722s * provider/azurerm: fix; add test for importing traffic_manager_endpoint resource TF_ACC=1 go test ./builtin/providers/azurerm -v -run TestAccAzureRMTrafficManagerEndpoint -timeout 120m === RUN TestAccAzureRMTrafficManagerEndpoint_importBasic --- PASS: TestAccAzureRMTrafficManagerEndpoint_importBasic (130.66s) === RUN TestAccAzureRMTrafficManagerEndpoint_basic --- PASS: TestAccAzureRMTrafficManagerEndpoint_basic (152.87s) === RUN TestAccAzureRMTrafficManagerEndpoint_basicDisableExternal --- PASS: TestAccAzureRMTrafficManagerEndpoint_basicDisableExternal (151.55s) === RUN TestAccAzureRMTrafficManagerEndpoint_updateWeight --- PASS: TestAccAzureRMTrafficManagerEndpoint_updateWeight (199.33s) === RUN TestAccAzureRMTrafficManagerEndpoint_updatePriority --- PASS: TestAccAzureRMTrafficManagerEndpoint_updatePriority (113.15s) === RUN TestAccAzureRMTrafficManagerEndpoint_nestedEndpoints --- PASS: TestAccAzureRMTrafficManagerEndpoint_nestedEndpoints (97.05s) PASS ok github.com/hashicorp/terraform/builtin/providers/azurerm 844.740s --- ...mport_arm_traffic_manager_endpoint_test.go | 36 +++++++++++++++++++ ...import_arm_traffic_manager_profile_test.go | 34 ++++++++++++++++++ .../resource_arm_traffic_manager_endpoint.go | 13 ++++++- 3 files changed, 82 insertions(+), 1 deletion(-) create mode 100644 builtin/providers/azurerm/import_arm_traffic_manager_endpoint_test.go create mode 100644 builtin/providers/azurerm/import_arm_traffic_manager_profile_test.go diff --git a/builtin/providers/azurerm/import_arm_traffic_manager_endpoint_test.go b/builtin/providers/azurerm/import_arm_traffic_manager_endpoint_test.go new file mode 100644 index 000000000..d8778a2c4 --- /dev/null +++ b/builtin/providers/azurerm/import_arm_traffic_manager_endpoint_test.go @@ -0,0 +1,36 @@ +package azurerm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAzureRMTrafficManagerEndpoint_importBasic(t *testing.T) { + resourceName := "azurerm_traffic_manager_endpoint.testExternal" + + ri := acctest.RandInt() + config := fmt.Sprintf(testAccAzureRMTrafficManagerEndpoint_basic, ri, ri, ri, ri, ri, ri, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMTrafficManagerEndpointDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: config, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "resource_group_name", + }, + }, + }, + }) +} diff --git a/builtin/providers/azurerm/import_arm_traffic_manager_profile_test.go b/builtin/providers/azurerm/import_arm_traffic_manager_profile_test.go new file mode 100644 index 000000000..08be702e4 --- /dev/null +++ b/builtin/providers/azurerm/import_arm_traffic_manager_profile_test.go @@ -0,0 +1,34 @@ +package azurerm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAzureRMTrafficManagerProfile_importBasic(t *testing.T) { + resourceName := "azurerm_traffic_manager_profile.test" + + ri := acctest.RandInt() + config := fmt.Sprintf(testAccAzureRMTrafficManagerProfile_performance, ri, ri, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMTrafficManagerProfileDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: config, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"resource_group_name"}, + }, + }, + }) +} diff --git a/builtin/providers/azurerm/resource_arm_traffic_manager_endpoint.go b/builtin/providers/azurerm/resource_arm_traffic_manager_endpoint.go index 922db271f..f4d1c4b68 100644 --- a/builtin/providers/azurerm/resource_arm_traffic_manager_endpoint.go +++ b/builtin/providers/azurerm/resource_arm_traffic_manager_endpoint.go @@ -4,6 +4,7 @@ import ( "fmt" "log" "net/http" + "regexp" "github.com/Azure/azure-sdk-for-go/arm/trafficmanager" "github.com/hashicorp/terraform/helper/schema" @@ -136,7 +137,15 @@ func resourceArmTrafficManagerEndpointRead(d *schema.ResourceData, meta interfac return err } resGroup := id.ResourceGroup - endpointType := d.Get("type").(string) + + // lookup endpointType in Azure ID path + var endpointType string + typeRegex := regexp.MustCompile("azureEndpoints|externalEndpoints|nestedEndpoints") + for k := range id.Path { + if typeRegex.MatchString(k) { + endpointType = k + } + } profileName := id.Path["trafficManagerProfiles"] // endpoint name is keyed by endpoint type in ARM ID @@ -153,6 +162,8 @@ func resourceArmTrafficManagerEndpointRead(d *schema.ResourceData, meta interfac endpoint := *resp.Properties d.Set("name", resp.Name) + d.Set("type", endpointType) + d.Set("profile_name", profileName) d.Set("endpoint_status", endpoint.EndpointStatus) d.Set("target_resource_id", endpoint.TargetResourceID) d.Set("target", endpoint.Target) From 66a14cb3b732bfd2f41d1672a01758f19dfba9a9 Mon Sep 17 00:00:00 2001 From: Raphael Randschau Date: Thu, 11 Aug 2016 12:49:59 +0200 Subject: [PATCH 0665/1238] provider/aws: Re-implement api gateway parameter handling (#7794) * provider/aws: Re-implement api gateway parameter handling this PR cleans up some left overs from PR #4295, namely the parameter handling. now that GH-2143 is finally closed this PR does away with the ugly `request_parameters_in_json` and `response_parameters_in_json` hack. * Add deprecation message and conflictsWith settings following @radeksimko s advice, keeping the old code around with a deprecation warning. this should be cleaned up in a few releases * provider/aws: fix missing append operation * provider/aws: mark old parameters clearly as deprecated * provider/aws work around #8104 following @radeksimko s lead * provider/aws fix cnp error --- .../resource_aws_api_gateway_integration.go | 23 ++++++-- ...ce_aws_api_gateway_integration_response.go | 30 +++++++--- ...s_api_gateway_integration_response_test.go | 18 ++---- ...source_aws_api_gateway_integration_test.go | 12 ++-- .../aws/resource_aws_api_gateway_method.go | 44 +++++++++++++-- ...esource_aws_api_gateway_method_response.go | 45 ++++++++++++--- ...ce_aws_api_gateway_method_response_test.go | 13 ++--- .../resource_aws_api_gateway_method_test.go | 14 ++--- builtin/providers/aws/structure.go | 56 ++++++++++++++++++- .../r/api_gateway_integration.html.markdown | 7 +-- ...gateway_integration_response.html.markdown | 6 +- .../aws/r/api_gateway_method.html.markdown | 6 +- .../api_gateway_method_response.html.markdown | 5 +- 13 files changed, 201 insertions(+), 78 deletions(-) diff --git a/builtin/providers/aws/resource_aws_api_gateway_integration.go b/builtin/providers/aws/resource_aws_api_gateway_integration.go index 2cb0c9818..c745ef3b6 100644 --- a/builtin/providers/aws/resource_aws_api_gateway_integration.go +++ b/builtin/providers/aws/resource_aws_api_gateway_integration.go @@ -75,9 +75,18 @@ func resourceAwsApiGatewayIntegration() *schema.Resource { Elem: schema.TypeString, }, + "request_parameters": &schema.Schema{ + Type: schema.TypeMap, + Elem: schema.TypeString, + Optional: true, + ConflictsWith: []string{"request_parameters_in_json"}, + }, + "request_parameters_in_json": &schema.Schema{ - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"request_parameters"}, + Deprecated: "Use field request_parameters instead", }, "passthrough_behavior": &schema.Schema{ @@ -107,6 +116,12 @@ func resourceAwsApiGatewayIntegrationCreate(d *schema.ResourceData, meta interfa } parameters := make(map[string]string) + if kv, ok := d.GetOk("request_parameters"); ok { + for k, v := range kv.(map[string]interface{}) { + parameters[k] = v.(string) + } + } + if v, ok := d.GetOk("request_parameters_in_json"); ok { if err := json.Unmarshal([]byte(v.(string)), ¶meters); err != nil { return fmt.Errorf("Error unmarshaling request_parameters_in_json: %s", err) @@ -129,8 +144,7 @@ func resourceAwsApiGatewayIntegrationCreate(d *schema.ResourceData, meta interfa RestApiId: aws.String(d.Get("rest_api_id").(string)), Type: aws.String(d.Get("type").(string)), IntegrationHttpMethod: integrationHttpMethod, - Uri: uri, - // TODO reimplement once [GH-2143](https://github.com/hashicorp/terraform/issues/2143) has been implemented + Uri: uri, RequestParameters: aws.StringMap(parameters), RequestTemplates: aws.StringMap(templates), Credentials: credentials, @@ -175,6 +189,7 @@ func resourceAwsApiGatewayIntegrationRead(d *schema.ResourceData, meta interface d.Set("credentials", integration.Credentials) d.Set("type", integration.Type) d.Set("uri", integration.Uri) + d.Set("request_parameters", aws.StringValueMap(integration.RequestParameters)) d.Set("request_parameters_in_json", aws.StringValueMap(integration.RequestParameters)) d.Set("passthrough_behavior", integration.PassthroughBehavior) diff --git a/builtin/providers/aws/resource_aws_api_gateway_integration_response.go b/builtin/providers/aws/resource_aws_api_gateway_integration_response.go index e5f17abd7..c507b3473 100644 --- a/builtin/providers/aws/resource_aws_api_gateway_integration_response.go +++ b/builtin/providers/aws/resource_aws_api_gateway_integration_response.go @@ -56,9 +56,18 @@ func resourceAwsApiGatewayIntegrationResponse() *schema.Resource { Elem: schema.TypeString, }, + "response_parameters": &schema.Schema{ + Type: schema.TypeMap, + Elem: schema.TypeString, + Optional: true, + ConflictsWith: []string{"response_parameters_in_json"}, + }, + "response_parameters_in_json": &schema.Schema{ - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"response_parameters"}, + Deprecated: "Use field response_parameters instead", }, }, } @@ -73,6 +82,11 @@ func resourceAwsApiGatewayIntegrationResponseCreate(d *schema.ResourceData, meta } parameters := make(map[string]string) + if kv, ok := d.GetOk("response_parameters"); ok { + for k, v := range kv.(map[string]interface{}) { + parameters[k] = v.(string) + } + } if v, ok := d.GetOk("response_parameters_in_json"); ok { if err := json.Unmarshal([]byte(v.(string)), ¶meters); err != nil { return fmt.Errorf("Error unmarshaling response_parameters_in_json: %s", err) @@ -80,12 +94,11 @@ func resourceAwsApiGatewayIntegrationResponseCreate(d *schema.ResourceData, meta } input := apigateway.PutIntegrationResponseInput{ - HttpMethod: aws.String(d.Get("http_method").(string)), - ResourceId: aws.String(d.Get("resource_id").(string)), - RestApiId: aws.String(d.Get("rest_api_id").(string)), - StatusCode: aws.String(d.Get("status_code").(string)), - ResponseTemplates: aws.StringMap(templates), - // TODO reimplement once [GH-2143](https://github.com/hashicorp/terraform/issues/2143) has been implemented + HttpMethod: aws.String(d.Get("http_method").(string)), + ResourceId: aws.String(d.Get("resource_id").(string)), + RestApiId: aws.String(d.Get("rest_api_id").(string)), + StatusCode: aws.String(d.Get("status_code").(string)), + ResponseTemplates: aws.StringMap(templates), ResponseParameters: aws.StringMap(parameters), } if v, ok := d.GetOk("selection_pattern"); ok { @@ -125,6 +138,7 @@ func resourceAwsApiGatewayIntegrationResponseRead(d *schema.ResourceData, meta i d.SetId(fmt.Sprintf("agir-%s-%s-%s-%s", d.Get("rest_api_id").(string), d.Get("resource_id").(string), d.Get("http_method").(string), d.Get("status_code").(string))) d.Set("response_templates", integrationResponse.ResponseTemplates) d.Set("selection_pattern", integrationResponse.SelectionPattern) + d.Set("response_parameters", aws.StringValueMap(integrationResponse.ResponseParameters)) d.Set("response_parameters_in_json", aws.StringValueMap(integrationResponse.ResponseParameters)) return nil } diff --git a/builtin/providers/aws/resource_aws_api_gateway_integration_response_test.go b/builtin/providers/aws/resource_aws_api_gateway_integration_response_test.go index 7220bef17..aba246730 100644 --- a/builtin/providers/aws/resource_aws_api_gateway_integration_response_test.go +++ b/builtin/providers/aws/resource_aws_api_gateway_integration_response_test.go @@ -185,11 +185,9 @@ resource "aws_api_gateway_method_response" "error" { "application/json" = "Error" } - response_parameters_in_json = < Date: Thu, 11 Aug 2016 12:58:20 +0200 Subject: [PATCH 0666/1238] Support setting datacenter when using consul remote state (#8102) * Support setting datacenter when using consul remote state Change-Id: I8c03f4058e9373f0de8fde7ce291ec552321cc60 * Add documentation for setting datacenter when using consul remote state Change-Id: Ia62feea7a910a76308f0a5e7f9505c9a210e0339 --- state/remote/consul.go | 3 +++ website/source/docs/state/remote/consul.html.md | 1 + 2 files changed, 4 insertions(+) diff --git a/state/remote/consul.go b/state/remote/consul.go index 8ac686f24..0db6e29fa 100644 --- a/state/remote/consul.go +++ b/state/remote/consul.go @@ -24,6 +24,9 @@ func consulFactory(conf map[string]string) (Client, error) { if scheme, ok := conf["scheme"]; ok && scheme != "" { config.Scheme = scheme } + if datacenter, ok := conf["datacenter"]; ok && datacenter != "" { + config.Datacenter = datacenter + } if auth, ok := conf["http_auth"]; ok && auth != "" { var username, password string if strings.Contains(auth, ":") { diff --git a/website/source/docs/state/remote/consul.html.md b/website/source/docs/state/remote/consul.html.md index 1e4422d3b..fbcb6ea46 100644 --- a/website/source/docs/state/remote/consul.html.md +++ b/website/source/docs/state/remote/consul.html.md @@ -44,5 +44,6 @@ The following configuration options / environment variables are supported: * `scheme` - (Optional) Specifies what protocol to use when talking to the given `address`, either `http` or `https`. SSL support can also be triggered by setting then environment variable `CONSUL_HTTP_SSL` to `true`. + * `datacenter` - (Optional) The datacenter to use. Defaults to that of the agent. * `http_auth` / `CONSUL_HTTP_AUTH` - (Optional) HTTP Basic Authentication credentials to be used when communicating with Consul, in the format of either `user` or `user:pass`. From 78b4923f7d4d28d6e0e5acd8d8e1fccd361641b3 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Thu, 11 Aug 2016 20:59:31 +1000 Subject: [PATCH 0667/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d4c1e28b1..74e4fb7cb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ IMPROVEMENTS * provider/openstack: Support pdating the External Gateway assigned to a Neutron router [GH-8070] * provider/vsphere: Improved SCSI controller handling in `vsphere_virtual_machine` [GH-7908] * provider/vsphere: Adding disk type of `Thick Lazy` to `vsphere_virtual_disk` and `vsphere_virtual_machine` [GH-7916] + * remote/consul: Support setting datacenter when using consul remote state [GH-8102] BUG FIXES: * provider/aws: guard against missing image_digest in `aws_ecs_task_definition` [GH-7966] From 69f8a03dddea86208f3174cbec9e22c552b71c47 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Thu, 11 Aug 2016 12:10:53 +0100 Subject: [PATCH 0668/1238] aws/docs: Fix a few nitpicks after merging #7794 --- .../providers/aws/r/api_gateway_integration.html.markdown | 3 ++- .../aws/r/api_gateway_integration_response.html.markdown | 2 +- .../docs/providers/aws/r/api_gateway_method.html.markdown | 4 ++-- .../providers/aws/r/api_gateway_method_response.html.markdown | 4 ++-- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/website/source/docs/providers/aws/r/api_gateway_integration.html.markdown b/website/source/docs/providers/aws/r/api_gateway_integration.html.markdown index 550f17214..733544ee5 100644 --- a/website/source/docs/providers/aws/r/api_gateway_integration.html.markdown +++ b/website/source/docs/providers/aws/r/api_gateway_integration.html.markdown @@ -56,6 +56,7 @@ The following arguments are supported: Not all methods are compatible with all `AWS` integrations. e.g. Lambda function [can only be invoked](https://github.com/awslabs/aws-apigateway-importer/issues/9#issuecomment-129651005) via `POST`. * `request_templates` - (Optional) A map of the integration's request templates. -* `request_parameters` - (Optional) Request query string parameters and headers that should be passed to the +* `request_parameters` - (Optional) A map of request query string parameters and headers that should be passed to the backend responder. + For example: `request_parameters = { "integration.request.header.X-Some-Other-Header" = "method.request.header.X-Some-Header" }` * `passthrough_behavior` - (Optional) The integration passthrough behavior (`WHEN_NO_MATCH`, `WHEN_NO_TEMPLATES`, `NEVER`). **Required** if `request_templates` is used. * `request_parameters_in_json` - **Deprecated**, use `request_parameters` instead. diff --git a/website/source/docs/providers/aws/r/api_gateway_integration_response.html.markdown b/website/source/docs/providers/aws/r/api_gateway_integration_response.html.markdown index a9348d9e9..431e2d62f 100644 --- a/website/source/docs/providers/aws/r/api_gateway_integration_response.html.markdown +++ b/website/source/docs/providers/aws/r/api_gateway_integration_response.html.markdown @@ -69,6 +69,6 @@ The following arguments are supported: If the backend is an `AWS` Lambda function, the AWS Lambda function error header is matched. For all other `HTTP` and `AWS` backends, the HTTP status code is matched. * `response_templates` - (Optional) A map specifying the templates used to transform the integration response body -* `response_parameters` - (Optional) Specify the response parameters that can be read from the backend response +* `response_parameters` - (Optional) A map of response parameters that can be read from the backend response. For example: `response_parameters = { "method.response.header.X-Some-Header" = "integration.response.header.X-Some-Other-Header" }`, * `response_parameters_in_json` - **Deprecated**, use `response_parameters` instead. diff --git a/website/source/docs/providers/aws/r/api_gateway_method.html.markdown b/website/source/docs/providers/aws/r/api_gateway_method.html.markdown index d79fcf9c0..7d1d097ed 100644 --- a/website/source/docs/providers/aws/r/api_gateway_method.html.markdown +++ b/website/source/docs/providers/aws/r/api_gateway_method.html.markdown @@ -44,7 +44,7 @@ The following arguments are supported: * `request_models` - (Optional) A map of the API models used for the request's content type where key is the content type (e.g. `application/json`) and value is either `Error`, `Empty` (built-in models) or `aws_api_gateway_model`'s `name`. -* `request_parameters` - (Optional) Specify which request query string parameters and headers that should be passed to the integration +* `request_parameters` - (Optional) A map of request query string parameters and headers that should be passed to the integration. For example: `request_parameters = { "method.request.header.X-Some-Header" = true }` - would define that the header X-Some-Header must be provided on the request. + would define that the header `X-Some-Header` must be provided on the request. * `request_parameters_in_json` - **Deprecated**, use `request_parameters` instead. diff --git a/website/source/docs/providers/aws/r/api_gateway_method_response.html.markdown b/website/source/docs/providers/aws/r/api_gateway_method_response.html.markdown index 2c836696a..30b925e1c 100644 --- a/website/source/docs/providers/aws/r/api_gateway_method_response.html.markdown +++ b/website/source/docs/providers/aws/r/api_gateway_method_response.html.markdown @@ -55,7 +55,7 @@ The following arguments are supported: * `http_method` - (Required) The HTTP Method (`GET`, `POST`, `PUT`, `DELETE`, `HEAD`, `OPTION`) * `status_code` - (Required) The HTTP status code * `response_models` - (Optional) A map of the API models used for the response's content type -* `response_parameters` - (Optional) Response parameters that can be sent to the caller +* `response_parameters` - (Optional) A map of response parameters that can be sent to the caller. For example: `response_parameters = { "method.response.header.X-Some-Header" = true }` - would define that the header X-Some-Header can be provided on the response. + would define that the header `X-Some-Header` can be provided on the response. * `response_parameters_in_json` - **Deprecated**, use `response_parameters` instead. From 97fcd5f4cc9074642a91c27634fb8f85a9d5af2b Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Thu, 11 Aug 2016 12:13:35 +0100 Subject: [PATCH 0669/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 74e4fb7cb..b55d47359 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ IMPROVEMENTS * provider/aws: Add ability to set Requests Payer in `aws_s3_bucket` [GH-8065] * provider/aws: Add ability to set canned ACL in `aws_s3_bucket_object` [GH-8091] * provider/aws: Allow skipping credentials validation, requesting Account ID and/or metadata API check [GH-7874] + * provider/aws: API gateway request/response parameters can now be specified as map, original `*_in_json` parameters deprecated [GH-7794] * provider/azurerm: Adds support for uploading blobs to azure storage from local source [GH-7994] * provider/google: allows atomic Cloud DNS record changes [GH-6575] * provider/google: Move URLMap hosts to TypeSet from TypeList [GH-7472] From 1ff6f8ccf4921eed452b2530ed6ac7e69dbdfe11 Mon Sep 17 00:00:00 2001 From: Noah Webb Date: Tue, 2 Aug 2016 13:43:32 -0400 Subject: [PATCH 0670/1238] provider/google: Support Import of 'google_compute_target_pool' --- .../google/import_compute_target_pool_test.go | 28 +++++++++++++++ .../google/resource_compute_target_pool.go | 36 ++++++++++++++++--- 2 files changed, 59 insertions(+), 5 deletions(-) create mode 100644 builtin/providers/google/import_compute_target_pool_test.go diff --git a/builtin/providers/google/import_compute_target_pool_test.go b/builtin/providers/google/import_compute_target_pool_test.go new file mode 100644 index 000000000..9d3e70322 --- /dev/null +++ b/builtin/providers/google/import_compute_target_pool_test.go @@ -0,0 +1,28 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeTargetPool_importBasic(t *testing.T) { + resourceName := "google_compute_target_pool.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeTargetPoolDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeTargetPool_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/google/resource_compute_target_pool.go b/builtin/providers/google/resource_compute_target_pool.go index b49ca4251..1eed09337 100644 --- a/builtin/providers/google/resource_compute_target_pool.go +++ b/builtin/providers/google/resource_compute_target_pool.go @@ -16,6 +16,9 @@ func resourceComputeTargetPool() *schema.Resource { Read: resourceComputeTargetPoolRead, Delete: resourceComputeTargetPoolDelete, Update: resourceComputeTargetPoolUpdate, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ @@ -60,12 +63,14 @@ func resourceComputeTargetPool() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, + Computed: true, }, "region": &schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, + Computed: true, }, "self_link": &schema.Schema{ @@ -106,7 +111,7 @@ func convertHealthChecks(config *Config, project string, names []string) ([]stri // Instances do not need to exist yet, so we simply generate URLs. // Instances can be full URLS or zone/name -func convertInstances(config *Config, project string, names []string) ([]string, error) { +func convertInstancesToUrls(config *Config, project string, names []string) ([]string, error) { urls := make([]string, len(names)) for i, name := range names { if strings.HasPrefix(name, "https://www.googleapis.com/compute/v1/") { @@ -144,7 +149,7 @@ func resourceComputeTargetPoolCreate(d *schema.ResourceData, meta interface{}) e return err } - instanceUrls, err := convertInstances( + instanceUrls, err := convertInstancesToUrls( config, project, convertStringArr(d.Get("instances").([]interface{}))) if err != nil { return err @@ -279,11 +284,11 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e from_, to_ := d.GetChange("instances") from := convertStringArr(from_.([]interface{})) to := convertStringArr(to_.([]interface{})) - fromUrls, err := convertInstances(config, project, from) + fromUrls, err := convertInstancesToUrls(config, project, from) if err != nil { return err } - toUrls, err := convertInstances(config, project, to) + toUrls, err := convertInstancesToUrls(config, project, to) if err != nil { return err } @@ -346,6 +351,16 @@ func resourceComputeTargetPoolUpdate(d *schema.ResourceData, meta interface{}) e return resourceComputeTargetPoolRead(d, meta) } +func convertInstancesFromUrls(urls []string) []string { + result := make([]string, 0, len(urls)) + for _, url := range urls { + urlArray := strings.Split(url, "/") + instance := fmt.Sprintf("%s/%s", urlArray[len(urlArray)-3], urlArray[len(urlArray)-1]) + result = append(result, instance) + } + return result +} + func resourceComputeTargetPoolRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) @@ -373,8 +388,19 @@ func resourceComputeTargetPoolRead(d *schema.ResourceData, meta interface{}) err return fmt.Errorf("Error reading TargetPool: %s", err) } + regionUrl := strings.Split(tpool.Region, "/") d.Set("self_link", tpool.SelfLink) - + d.Set("backup_pool", tpool.BackupPool) + d.Set("description", tpool.Description) + d.Set("failover_ratio", tpool.FailoverRatio) + d.Set("health_checks", tpool.HealthChecks) + if tpool.Instances != nil { + d.Set("instances", convertInstancesFromUrls(tpool.Instances)) + } + d.Set("name", tpool.Name) + d.Set("region", regionUrl[len(regionUrl)-1]) + d.Set("session_affinity", tpool.SessionAffinity) + d.Set("project", project) return nil } From 03bc37edcb00c80bae8e65e3953cba782f86f21d Mon Sep 17 00:00:00 2001 From: Michael Blakeley Date: Thu, 11 Aug 2016 09:32:54 -0700 Subject: [PATCH 0671/1238] apostrophe police Sorry, my OCD kicked in. Use `it's` when you mean `it is`. Use `its` when you mean something belongs to `it`. --- website/source/docs/providers/aws/r/db_instance.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/source/docs/providers/aws/r/db_instance.html.markdown b/website/source/docs/providers/aws/r/db_instance.html.markdown index 234a97985..66ed8c384 100644 --- a/website/source/docs/providers/aws/r/db_instance.html.markdown +++ b/website/source/docs/providers/aws/r/db_instance.html.markdown @@ -14,7 +14,7 @@ databases. Changes to a DB instance can occur when you manually change a parameter, such as `allocated_storage`, and are reflected in the next maintenance -window. Because of this, Terraform may report a difference in it's planning +window. Because of this, Terraform may report a difference in its planning phase because a modification has not yet taken place. You can use the `apply_immediately` flag to instruct the service to apply the change immediately (see documentation below). @@ -148,4 +148,4 @@ DB Instances can be imported using the `identifier`, e.g. ``` $ terraform import aws_db_instance.default mydb-rds-instance -``` \ No newline at end of file +``` From fd055b700dcfd085c4a1e4beccc4f1a62a2fd07e Mon Sep 17 00:00:00 2001 From: Michael Blakeley Date: Thu, 11 Aug 2016 09:34:00 -0700 Subject: [PATCH 0672/1238] apostrophe police --- website/source/docs/providers/aws/r/rds_cluster.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/providers/aws/r/rds_cluster.html.markdown b/website/source/docs/providers/aws/r/rds_cluster.html.markdown index eef19cff0..2e605f677 100644 --- a/website/source/docs/providers/aws/r/rds_cluster.html.markdown +++ b/website/source/docs/providers/aws/r/rds_cluster.html.markdown @@ -17,7 +17,7 @@ For more information on Amazon Aurora, see [Aurora on Amazon RDS][2] in the Amaz Changes to a RDS Cluster can occur when you manually change a parameter, such as `port`, and are reflected in the next maintenance -window. Because of this, Terraform may report a difference in it's planning +window. Because of this, Terraform may report a difference in its planning phase because a modification has not yet taken place. You can use the `apply_immediately` flag to instruct the service to apply the change immediately (see documentation below). From 79fa978896fb4ebd6562c75b33d44382c867eb6b Mon Sep 17 00:00:00 2001 From: Aaron Welch Date: Thu, 11 Aug 2016 09:40:23 -0700 Subject: [PATCH 0673/1238] working volume resource and test, website docs updated --- .../packet/resource_packet_volume.go | 97 ++++++++++++++----- .../packet/resource_packet_volume_test.go | 1 + .../docs/providers/packet/r/volume.html | 60 ++++++++++++ website/source/layouts/packet.erb | 3 + 4 files changed, 138 insertions(+), 23 deletions(-) create mode 100644 website/source/docs/providers/packet/r/volume.html diff --git a/builtin/providers/packet/resource_packet_volume.go b/builtin/providers/packet/resource_packet_volume.go index d212a1561..c5dc0a887 100644 --- a/builtin/providers/packet/resource_packet_volume.go +++ b/builtin/providers/packet/resource_packet_volume.go @@ -1,8 +1,11 @@ package packet import ( + "errors" "fmt" + "time" + "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" "github.com/packethost/packngo" ) @@ -39,9 +42,7 @@ func resourcePacketVolume() *schema.Resource { "size": &schema.Schema{ Type: schema.TypeInt, - Required: false, - Optional: true, - Computed: true, + Required: true, }, "facility": &schema.Schema{ @@ -53,13 +54,12 @@ func resourcePacketVolume() *schema.Resource { "plan": &schema.Schema{ Type: schema.TypeString, Required: true, - ForceNew: true, }, "billing_cycle": &schema.Schema{ Type: schema.TypeString, - Required: true, - ForceNew: true, + Computed: true, + Optional: true, }, "state": &schema.Schema{ @@ -69,7 +69,7 @@ func resourcePacketVolume() *schema.Resource { "locked": &schema.Schema{ Type: schema.TypeBool, - Computed: true, + Optional: true, }, "snapshot_policies": &schema.Schema{ @@ -121,26 +121,30 @@ func resourcePacketVolumeCreate(d *schema.ResourceData, meta interface{}) error client := meta.(*packngo.Client) createRequest := &packngo.VolumeCreateRequest{ - PlanID: d.Get("plan").(string), - FacilityID: d.Get("facility").(string), - BillingCycle: d.Get("billing_cycle").(string), - ProjectID: d.Get("project_id").(string), + PlanID: d.Get("plan").(string), + FacilityID: d.Get("facility").(string), + ProjectID: d.Get("project_id").(string), + Size: d.Get("size").(int), + } + + if attr, ok := d.GetOk("billing_cycle"); ok { + createRequest.BillingCycle = attr.(string) + } else { + createRequest.BillingCycle = "hourly" } if attr, ok := d.GetOk("description"); ok { createRequest.Description = attr.(string) } - if attr, ok := d.GetOk("size"); ok { - createRequest.Size = attr.(int) - } - - snapshot_policies := d.Get("snapshot_policies.#").(int) - if snapshot_policies > 0 { - createRequest.SnapshotPolicies = make([]*packngo.SnapshotPolicy, 0, snapshot_policies) - for i := 0; i < snapshot_policies; i++ { - key := fmt.Sprintf("snapshot_policies.%d", i) - createRequest.SnapshotPolicies = append(createRequest.SnapshotPolicies, d.Get(key).(*packngo.SnapshotPolicy)) + snapshot_count := d.Get("snapshot_policies.#").(int) + if snapshot_count > 0 { + createRequest.SnapshotPolicies = make([]*packngo.SnapshotPolicy, 0, snapshot_count) + for i := 0; i < snapshot_count; i++ { + policy := new(packngo.SnapshotPolicy) + policy.SnapshotFrequency = d.Get(fmt.Sprintf("snapshot_policies.%d.snapshot_frequency", i)).(string) + policy.SnapshotCount = d.Get(fmt.Sprintf("snapshot_policies.%d.snapshot_count", i)).(int) + createRequest.SnapshotPolicies = append(createRequest.SnapshotPolicies, policy) } } @@ -151,9 +155,52 @@ func resourcePacketVolumeCreate(d *schema.ResourceData, meta interface{}) error d.SetId(newVolume.ID) + _, err = waitForVolumeAttribute(d, "active", []string{"queued", "provisioning"}, "state", meta) + if err != nil { + if isForbidden(err) { + // If the volume doesn't get to the active state, we can't recover it from here. + d.SetId("") + + return errors.New("provisioning time limit exceeded; the Packet team will investigate") + } + return err + } + return resourcePacketVolumeRead(d, meta) } +func waitForVolumeAttribute(d *schema.ResourceData, target string, pending []string, attribute string, meta interface{}) (interface{}, error) { + stateConf := &resource.StateChangeConf{ + Pending: pending, + Target: []string{target}, + Refresh: newVolumeStateRefreshFunc(d, attribute, meta), + Timeout: 60 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + return stateConf.WaitForState() +} + +func newVolumeStateRefreshFunc(d *schema.ResourceData, attribute string, meta interface{}) resource.StateRefreshFunc { + client := meta.(*packngo.Client) + + return func() (interface{}, string, error) { + if err := resourcePacketVolumeRead(d, meta); err != nil { + return nil, "", err + } + + if attr, ok := d.GetOk(attribute); ok { + volume, _, err := client.Volumes.Get(d.Id()) + if err != nil { + return nil, "", friendlyError(err) + } + return &volume, attr.(string), nil + } + + return nil, "", nil + } +} + func resourcePacketVolumeRead(d *schema.ResourceData, meta interface{}) error { client := meta.(*packngo.Client) @@ -181,9 +228,13 @@ func resourcePacketVolumeRead(d *schema.ResourceData, meta interface{}) error { d.Set("created", volume.Created) d.Set("updated", volume.Updated) - snapshot_policies := make([]*packngo.SnapshotPolicy, 0, len(volume.SnapshotPolicies)) + snapshot_policies := make([]map[string]interface{}, 0, len(volume.SnapshotPolicies)) for _, snapshot_policy := range volume.SnapshotPolicies { - snapshot_policies = append(snapshot_policies, snapshot_policy) + policy := map[string]interface{}{ + "snapshot_frequency": snapshot_policy.SnapshotFrequency, + "snapshot_count": snapshot_policy.SnapshotCount, + } + snapshot_policies = append(snapshot_policies, policy) } d.Set("snapshot_policies", snapshot_policies) diff --git a/builtin/providers/packet/resource_packet_volume_test.go b/builtin/providers/packet/resource_packet_volume_test.go index 1cf316a72..cffd55f13 100644 --- a/builtin/providers/packet/resource_packet_volume_test.go +++ b/builtin/providers/packet/resource_packet_volume_test.go @@ -99,4 +99,5 @@ resource "packet_volume" "foobar" { size = 100 project_id = "%s" facility = "%s" + snapshot_policies = { snapshot_frequency = "1day", snapshot_count = 7 } }` diff --git a/website/source/docs/providers/packet/r/volume.html b/website/source/docs/providers/packet/r/volume.html new file mode 100644 index 000000000..3179518ea --- /dev/null +++ b/website/source/docs/providers/packet/r/volume.html @@ -0,0 +1,60 @@ +--- +layout: "packet" +page_title: "Packet: packet_volume" +sidebar_current: "docs-packet-resource-volume" +description: |- + Provides a Packet Block Storage Volume Resource. +--- + +# packet\_volume + +Provides a Packet Block Storage Volume resource to allow you to +manage block volumes on your account. +Once created by Terraform, they must then be attached and mounted +using the api and `packet_block_attach` and `packet_block_detach` +scripts. + +## Example Usage + +``` +# Create a new block volume +resource "packet_volume" "volume1" { + description = "terraform-volume-1" + facility = "ewr1" + project_id = "${packet_project.cool_project.id}" + plan = 'storage_1' + size = 100 + billing_cycle = "hourly" + snapshot_policies = { snapshot_frequency = "1day", snapshot_count = 7 } + snapshot_policies = { snapshot_frequency = "1month", snapshot_count = 6 } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `plan` - (Required) The service plan slug of the volume +* `facility` - (Required) The facility to create the volume in +* `project_id` - (Required) The packet project ID to deploy the volume in +* `size` - (Required) The size in GB to make the volume +* `billing_cycle` - The billing cycle, defaults to "hourly" +* `description` - Optional description for the volume +* `snapshot_policies` - Optional list of snapshot policies + +## Attributes Reference + +The following attributes are exported: + +* `id` - The unique ID of the volume +* `name` - The name of the volume +* `description` - The description of the volume +* `size` - The size in GB of the volume +* `plan` - Performance plan the volume is on +* `billing_cycle` - The billing cycle, defaults to hourly +* `facility` - The facility slug the volume resides in +* `state` - The state of the volume +* `locked` - Whether the volume is locked or not +* `project_id ` - The project id the volume is in +* `created` - The timestamp for when the volume was created +* `updated` - The timestamp for the last time the volume was updated diff --git a/website/source/layouts/packet.erb b/website/source/layouts/packet.erb index f7464f198..8591e23e3 100644 --- a/website/source/layouts/packet.erb +++ b/website/source/layouts/packet.erb @@ -22,6 +22,9 @@ > packet_ssh_key + > + packet_volume + From c38884912d658b605bef7c0e81b9f40f9f6766a8 Mon Sep 17 00:00:00 2001 From: James Nugent Date: Thu, 11 Aug 2016 13:01:51 -0400 Subject: [PATCH 0674/1238] deps: github.com/aws/aws-sdk-go/... to v1.4.1 --- .../github.com/aws/aws-sdk-go/aws/config.go | 118 ++- .../aws/aws-sdk-go/aws/defaults/defaults.go | 2 +- .../aws/aws-sdk-go/aws/session/session.go | 39 +- .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws-sdk-go/private/endpoints/endpoints.go | 19 +- .../private/endpoints/endpoints.json | 3 + .../private/endpoints/endpoints_map.go | 3 + .../aws/aws-sdk-go/service/autoscaling/api.go | 935 ++++++++++++++---- .../aws/aws-sdk-go/service/cloudfront/api.go | 821 ++++++++++++++- .../aws-sdk-go/service/cloudfront/service.go | 8 +- .../aws-sdk-go/service/ec2/customizations.go | 6 +- .../aws/aws-sdk-go/service/ecr/api.go | 43 +- .../aws/aws-sdk-go/service/ecs/api.go | 70 +- .../aws/aws-sdk-go/service/elb/api.go | 373 ++++--- .../aws/aws-sdk-go/service/elb/service.go | 29 +- .../aws/aws-sdk-go/service/kms/api.go | 519 +++++++++- .../aws/aws-sdk-go/service/rds/waiters.go | 12 - .../service/s3/host_style_bucket.go | 8 + vendor/vendor.json | 298 +++--- 19 files changed, 2649 insertions(+), 659 deletions(-) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index 16647c808..fca922584 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -7,24 +7,36 @@ import ( "github.com/aws/aws-sdk-go/aws/credentials" ) -// UseServiceDefaultRetries instructs the config to use the service's own default -// number of retries. This will be the default action if Config.MaxRetries -// is nil also. +// UseServiceDefaultRetries instructs the config to use the service's own +// default number of retries. This will be the default action if +// Config.MaxRetries is nil also. const UseServiceDefaultRetries = -1 -// RequestRetryer is an alias for a type that implements the request.Retryer interface. +// RequestRetryer is an alias for a type that implements the request.Retryer +// interface. type RequestRetryer interface{} // A Config provides service configuration for service clients. By default, -// all clients will use the {defaults.DefaultConfig} structure. +// all clients will use the defaults.DefaultConfig tructure. +// +// // Create Session with MaxRetry configuration to be shared by multiple +// // service clients. +// sess, err := session.NewSession(&aws.Config{ +// MaxRetries: aws.Int(3), +// }) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, &aws.Config{ +// Region: aws.String("us-west-2"), +// }) type Config struct { // Enables verbose error printing of all credential chain errors. - // Should be used when wanting to see all errors while attempting to retrieve - // credentials. + // Should be used when wanting to see all errors while attempting to + // retrieve credentials. CredentialsChainVerboseErrors *bool - // The credentials object to use when signing requests. Defaults to - // a chain of credential providers to search for credentials in environment + // The credentials object to use when signing requests. Defaults to a + // chain of credential providers to search for credentials in environment // variables, shared credential file, and EC2 Instance Roles. Credentials *credentials.Credentials @@ -63,11 +75,12 @@ type Config struct { Logger Logger // The maximum number of times that a request will be retried for failures. - // Defaults to -1, which defers the max retry setting to the service specific - // configuration. + // Defaults to -1, which defers the max retry setting to the service + // specific configuration. MaxRetries *int - // Retryer guides how HTTP requests should be retried in case of recoverable failures. + // Retryer guides how HTTP requests should be retried in case of + // recoverable failures. // // When nil or the value does not implement the request.Retryer interface, // the request.DefaultRetryer will be used. @@ -82,8 +95,8 @@ type Config struct { // Retryer RequestRetryer - // Disables semantic parameter validation, which validates input for missing - // required fields and/or other semantic request input errors. + // Disables semantic parameter validation, which validates input for + // missing required fields and/or other semantic request input errors. DisableParamValidation *bool // Disables the computation of request and response checksums, e.g., @@ -91,8 +104,8 @@ type Config struct { DisableComputeChecksums *bool // Set this to `true` to force the request to use path-style addressing, - // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client will - // use virtual hosted bucket addressing when possible + // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client + // will use virtual hosted bucket addressing when possible // (`http://BUCKET.s3.amazonaws.com/KEY`). // // @note This configuration option is specific to the Amazon S3 service. @@ -109,26 +122,31 @@ type Config struct { // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html // // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s - // `ExpectContinueTimeout` for information on adjusting the continue wait timeout. - // https://golang.org/pkg/net/http/#Transport + // `ExpectContinueTimeout` for information on adjusting the continue wait + // timeout. https://golang.org/pkg/net/http/#Transport // // You should use this flag to disble 100-Continue if you experience issues // with proxies or third party S3 compatible services. S3Disable100Continue *bool - // Set this to `true` to enable S3 Accelerate feature. For all operations compatible - // with S3 Accelerate will use the accelerate endpoint for requests. Requests not compatible - // will fall back to normal S3 requests. + // Set this to `true` to enable S3 Accelerate feature. For all operations + // compatible with S3 Accelerate will use the accelerate endpoint for + // requests. Requests not compatible will fall back to normal S3 requests. // - // The bucket must be enable for accelerate to be used with S3 client with accelerate - // enabled. If the bucket is not enabled for accelerate an error will be returned. - // The bucket name must be DNS compatible to also work with accelerate. + // The bucket must be enable for accelerate to be used with S3 client with + // accelerate enabled. If the bucket is not enabled for accelerate an error + // will be returned. The bucket name must be DNS compatible to also work + // with accelerate. + // + // Not compatible with UseDualStack requests will fail if both flags are + // specified. S3UseAccelerate *bool // Set this to `true` to disable the EC2Metadata client from overriding the - // default http.Client's Timeout. This is helpful if you do not want the EC2Metadata - // client to create a new http.Client. This options is only meaningful if you're not - // already using a custom HTTP client with the SDK. Enabled by default. + // default http.Client's Timeout. This is helpful if you do not want the + // EC2Metadata client to create a new http.Client. This options is only + // meaningful if you're not already using a custom HTTP client with the + // SDK. Enabled by default. // // Must be set and provided to the session.NewSession() in order to disable // the EC2Metadata overriding the timeout for default credentials chain. @@ -140,6 +158,27 @@ type Config struct { // EC2MetadataDisableTimeoutOverride *bool + // Instructs the endpiont to be generated for a service client to + // be the dual stack endpoint. The dual stack endpoint will support + // both IPv4 and IPv6 addressing. + // + // Setting this for a service which does not support dual stack will fail + // to make requets. It is not recommended to set this value on the session + // as it will apply to all service clients created with the session. Even + // services which don't support dual stack endpoints. + // + // If the Endpoint config value is also provided the UseDualStack flag + // will be ignored. + // + // Only supported with. + // + // sess, err := session.NewSession() + // + // svc := s3.New(sess, &aws.Config{ + // UseDualStack: aws.Bool(true), + // }) + UseDualStack *bool + // SleepDelay is an override for the func the SDK will call when sleeping // during the lifecycle of a request. Specifically this will be used for // request delays. This value should only be used for testing. To adjust @@ -148,11 +187,19 @@ type Config struct { SleepDelay func(time.Duration) } -// NewConfig returns a new Config pointer that can be chained with builder methods to -// set multiple configuration values inline without using pointers. +// NewConfig returns a new Config pointer that can be chained with builder +// methods to set multiple configuration values inline without using pointers. // -// sess, err := session.NewSession(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10)) +// // Create Session with MaxRetry configuration to be shared by multiple +// // service clients. +// sess, err := session.NewSession(aws.NewConfig(). +// WithMaxRetries(3), +// ) // +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, aws.NewConfig(). +// WithRegion("us-west-2"), +// ) func NewConfig() *Config { return &Config{} } @@ -255,6 +302,13 @@ func (c *Config) WithS3UseAccelerate(enable bool) *Config { return c } +// WithUseDualStack sets a config UseDualStack value returning a Config +// pointer for chaining. +func (c *Config) WithUseDualStack(enable bool) *Config { + c.UseDualStack = &enable + return c +} + // WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value // returning a Config pointer for chaining. func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { @@ -341,6 +395,10 @@ func mergeInConfig(dst *Config, other *Config) { dst.S3UseAccelerate = other.S3UseAccelerate } + if other.UseDualStack != nil { + dst.UseDualStack = other.UseDualStack + } + if other.EC2MetadataDisableTimeoutOverride != nil { dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go index dccbafbfc..10b7d8649 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -120,7 +120,7 @@ func ecsCredProvider(cfg aws.Config, handlers request.Handlers, uri string) cred func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName, - aws.StringValue(cfg.Region), true) + aws.StringValue(cfg.Region), true, false) return &ec2rolecreds.EC2RoleProvider{ Client: ec2metadata.NewClient(cfg, handlers, endpoint, signingRegion), diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 1abe39a3a..2374b1f27 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -1,7 +1,10 @@ package session import ( + "fmt" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/client" "github.com/aws/aws-sdk-go/aws/corehandlers" "github.com/aws/aws-sdk-go/aws/credentials" @@ -310,14 +313,34 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share sharedCfg.Creds, ) } else { - // Fallback to default credentials provider - cfg.Credentials = credentials.NewCredentials( - defaults.RemoteCredProvider(*cfg, handlers), - ) + // Fallback to default credentials provider, include mock errors + // for the credential chain so user can identify why credentials + // failed to be retrieved. + cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)}, + &credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)}, + defaults.RemoteCredProvider(*cfg, handlers), + }, + }) } } } +type credProviderError struct { + Err error +} + +var emptyCreds = credentials.Value{} + +func (c credProviderError) Retrieve() (credentials.Value, error) { + return credentials.Value{}, c.Err +} +func (c credProviderError) IsExpired() bool { + return true +} + func initHandlers(s *Session) { // Add the Validate parameter handler if it is not disabled. s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) @@ -349,8 +372,12 @@ func (s *Session) Copy(cfgs ...*aws.Config) *Session { func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { s = s.Copy(cfgs...) endpoint, signingRegion := endpoints.NormalizeEndpoint( - aws.StringValue(s.Config.Endpoint), serviceName, - aws.StringValue(s.Config.Region), aws.BoolValue(s.Config.DisableSSL)) + aws.StringValue(s.Config.Endpoint), + serviceName, + aws.StringValue(s.Config.Region), + aws.BoolValue(s.Config.DisableSSL), + aws.BoolValue(s.Config.UseDualStack), + ) return client.Config{ Config: s.Config, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index b03a27624..1fa57165f 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.3.1" +const SDKVersion = "1.4.1" diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go index 2b279e659..b4ad7405c 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go +++ b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go @@ -14,9 +14,9 @@ import ( // normalized endpoint and signing region. If the endpoint is not an empty string // the service name and region will be used to look up the service's API endpoint. // If the endpoint is provided the scheme will be added if it is not present. -func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL bool) (normEndpoint, signingRegion string) { +func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL, useDualStack bool) (normEndpoint, signingRegion string) { if endpoint == "" { - return EndpointForRegion(serviceName, region, disableSSL) + return EndpointForRegion(serviceName, region, disableSSL, useDualStack) } return AddScheme(endpoint, disableSSL), "" @@ -24,12 +24,17 @@ func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL bool) (n // EndpointForRegion returns an endpoint and its signing region for a service and region. // if the service and region pair are not found endpoint and signingRegion will be empty. -func EndpointForRegion(svcName, region string, disableSSL bool) (endpoint, signingRegion string) { +func EndpointForRegion(svcName, region string, disableSSL, useDualStack bool) (endpoint, signingRegion string) { + dualStackField := "" + if useDualStack { + dualStackField = "/dualstack" + } + derivedKeys := []string{ - region + "/" + svcName, - region + "/*", - "*/" + svcName, - "*/*", + region + "/" + svcName + dualStackField, + region + "/*" + dualStackField, + "*/" + svcName + dualStackField, + "*/*" + dualStackField, } for _, key := range derivedKeys { diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json index 5f4991c2b..c5bf3c7c3 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json +++ b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json @@ -65,6 +65,9 @@ "*/s3": { "endpoint": "s3-{region}.amazonaws.com" }, + "*/s3/dualstack": { + "endpoint": "s3.dualstack.{region}.amazonaws.com" + }, "us-east-1/s3": { "endpoint": "s3.amazonaws.com" }, diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go index e995315ab..a81d158c3 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go +++ b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go @@ -48,6 +48,9 @@ var endpointsMap = endpointStruct{ "*/s3": { Endpoint: "s3-{region}.amazonaws.com", }, + "*/s3/dualstack": { + Endpoint: "s3.dualstack.{region}.amazonaws.com", + }, "*/sts": { Endpoint: "sts.amazonaws.com", SigningRegion: "us-east-1", diff --git a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go index 04caf266b..3de964fe3 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/autoscaling/api.go @@ -63,15 +63,75 @@ func (c *AutoScaling) AttachInstancesRequest(input *AttachInstancesInput) (req * // being attached plus the desired capacity of the group exceeds the maximum // size of the group, the operation fails. // +// If there is a Classic load balancer attached to your Auto Scaling group, +// the instances are also registered with the load balancer. If there are target +// groups attached to your Auto Scaling group, the instances are also registered +// with the target groups. +// // For more information, see Attach EC2 Instances to Your Auto Scaling Group // (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/attach-instance-asg.html) -// in the Auto Scaling Developer Guide. +// in the Auto Scaling User Guide. func (c *AutoScaling) AttachInstances(input *AttachInstancesInput) (*AttachInstancesOutput, error) { req, out := c.AttachInstancesRequest(input) err := req.Send() return out, err } +const opAttachLoadBalancerTargetGroups = "AttachLoadBalancerTargetGroups" + +// AttachLoadBalancerTargetGroupsRequest generates a "aws/request.Request" representing the +// client's request for the AttachLoadBalancerTargetGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AttachLoadBalancerTargetGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AttachLoadBalancerTargetGroupsRequest method. +// req, resp := client.AttachLoadBalancerTargetGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) AttachLoadBalancerTargetGroupsRequest(input *AttachLoadBalancerTargetGroupsInput) (req *request.Request, output *AttachLoadBalancerTargetGroupsOutput) { + op := &request.Operation{ + Name: opAttachLoadBalancerTargetGroups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachLoadBalancerTargetGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &AttachLoadBalancerTargetGroupsOutput{} + req.Data = output + return +} + +// Attaches one or more target groups to the specified Auto Scaling group. +// +// To describe the target groups for an Auto Scaling group, use DescribeLoadBalancerTargetGroups. +// To detach the target group from the Auto Scaling group, use DetachLoadBalancerTargetGroups. +// +// For more information, see Attach a Load Balancer to Your Auto Scaling Group +// (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/attach-load-balancer-asg.html) +// in the Auto Scaling User Guide. +func (c *AutoScaling) AttachLoadBalancerTargetGroups(input *AttachLoadBalancerTargetGroupsInput) (*AttachLoadBalancerTargetGroupsOutput, error) { + req, out := c.AttachLoadBalancerTargetGroupsRequest(input) + err := req.Send() + return out, err +} + const opAttachLoadBalancers = "AttachLoadBalancers" // AttachLoadBalancersRequest generates a "aws/request.Request" representing the @@ -113,14 +173,17 @@ func (c *AutoScaling) AttachLoadBalancersRequest(input *AttachLoadBalancersInput return } -// Attaches one or more load balancers to the specified Auto Scaling group. +// Attaches one or more Classic load balancers to the specified Auto Scaling +// group. +// +// To attach an Application load balancer instead, see AttachLoadBalancerTargetGroups. // // To describe the load balancers for an Auto Scaling group, use DescribeLoadBalancers. // To detach the load balancer from the Auto Scaling group, use DetachLoadBalancers. // // For more information, see Attach a Load Balancer to Your Auto Scaling Group // (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/attach-load-balancer-asg.html) -// in the Auto Scaling Developer Guide. +// in the Auto Scaling User Guide. func (c *AutoScaling) AttachLoadBalancers(input *AttachLoadBalancersInput) (*AttachLoadBalancersOutput, error) { req, out := c.AttachLoadBalancersRequest(input) err := req.Send() @@ -174,17 +237,25 @@ func (c *AutoScaling) CompleteLifecycleActionRequest(input *CompleteLifecycleAct // This step is a part of the procedure for adding a lifecycle hook to an Auto // Scaling group: // -// (Optional) Create a Lambda function and a rule that allows CloudWatch Events -// to invoke your Lambda function when Auto Scaling launches or terminates instances. -// (Optional) Create a notification target and an IAM role. The target can be -// either an Amazon SQS queue or an Amazon SNS topic. The role allows Auto Scaling -// to publish lifecycle notifications to the target. Create the lifecycle hook. -// Specify whether the hook is used when the instances launch or terminate. -// If you need more time, record the lifecycle action heartbeat to keep the -// instance in a pending state. If you finish before the timeout period ends, -// complete the lifecycle action. For more information, see Auto Scaling Lifecycle -// (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingGroupLifecycle.html) -// in the Auto Scaling Developer Guide. +// (Optional) Create a Lambda function and a rule that allows CloudWatch +// Events to invoke your Lambda function when Auto Scaling launches or terminates +// instances. +// +// (Optional) Create a notification target and an IAM role. The target can +// be either an Amazon SQS queue or an Amazon SNS topic. The role allows Auto +// Scaling to publish lifecycle notifications to the target. +// +// Create the lifecycle hook. Specify whether the hook is used when the instances +// launch or terminate. +// +// If you need more time, record the lifecycle action heartbeat to keep the +// instance in a pending state. +// +// If you finish before the timeout period ends, complete the lifecycle +// action. +// +// For more information, see Auto Scaling Lifecycle (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingGroupLifecycle.html) +// in the Auto Scaling User Guide. func (c *AutoScaling) CompleteLifecycleAction(input *CompleteLifecycleActionInput) (*CompleteLifecycleActionOutput, error) { req, out := c.CompleteLifecycleActionRequest(input) err := req.Send() @@ -241,7 +312,7 @@ func (c *AutoScaling) CreateAutoScalingGroupRequest(input *CreateAutoScalingGrou // this limit, see DescribeAccountLimits. // // For more information, see Auto Scaling Groups (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingGroup.html) -// in the Auto Scaling Developer Guide. +// in the Auto Scaling User Guide. func (c *AutoScaling) CreateAutoScalingGroup(input *CreateAutoScalingGroupInput) (*CreateAutoScalingGroupOutput, error) { req, out := c.CreateAutoScalingGroupRequest(input) err := req.Send() @@ -298,7 +369,7 @@ func (c *AutoScaling) CreateLaunchConfigurationRequest(input *CreateLaunchConfig // this limit, see DescribeAccountLimits. // // For more information, see Launch Configurations (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/LaunchConfiguration.html) -// in the Auto Scaling Developer Guide. +// in the Auto Scaling User Guide. func (c *AutoScaling) CreateLaunchConfiguration(input *CreateLaunchConfigurationInput) (*CreateLaunchConfigurationOutput, error) { req, out := c.CreateLaunchConfigurationRequest(input) err := req.Send() @@ -354,7 +425,7 @@ func (c *AutoScaling) CreateOrUpdateTagsRequest(input *CreateOrUpdateTagsInput) // the previous tag definition, and you do not get an error message. // // For more information, see Tagging Auto Scaling Groups and Instances (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/ASTagging.html) -// in the Auto Scaling Developer Guide. +// in the Auto Scaling User Guide. func (c *AutoScaling) CreateOrUpdateTags(input *CreateOrUpdateTagsInput) (*CreateOrUpdateTagsOutput, error) { req, out := c.CreateOrUpdateTagsRequest(input) err := req.Send() @@ -881,8 +952,7 @@ func (c *AutoScaling) DescribeAutoScalingGroupsRequest(input *DescribeAutoScalin return } -// Describes one or more Auto Scaling groups. If a list of names is not provided, -// the call describes all Auto Scaling groups. +// Describes one or more Auto Scaling groups. func (c *AutoScaling) DescribeAutoScalingGroups(input *DescribeAutoScalingGroupsInput) (*DescribeAutoScalingGroupsOutput, error) { req, out := c.DescribeAutoScalingGroupsRequest(input) err := req.Send() @@ -961,8 +1031,7 @@ func (c *AutoScaling) DescribeAutoScalingInstancesRequest(input *DescribeAutoSca return } -// Describes one or more Auto Scaling instances. If a list is not provided, -// the call describes all instances. +// Describes one or more Auto Scaling instances. func (c *AutoScaling) DescribeAutoScalingInstances(input *DescribeAutoScalingInstancesInput) (*DescribeAutoScalingInstancesOutput, error) { req, out := c.DescribeAutoScalingInstancesRequest(input) err := req.Send() @@ -1089,8 +1158,7 @@ func (c *AutoScaling) DescribeLaunchConfigurationsRequest(input *DescribeLaunchC return } -// Describes one or more launch configurations. If you omit the list of names, -// then the call describes all launch configurations. +// Describes one or more launch configurations. func (c *AutoScaling) DescribeLaunchConfigurations(input *DescribeLaunchConfigurationsInput) (*DescribeLaunchConfigurationsOutput, error) { req, out := c.DescribeLaunchConfigurationsRequest(input) err := req.Send() @@ -1218,6 +1286,54 @@ func (c *AutoScaling) DescribeLifecycleHooks(input *DescribeLifecycleHooksInput) return out, err } +const opDescribeLoadBalancerTargetGroups = "DescribeLoadBalancerTargetGroups" + +// DescribeLoadBalancerTargetGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DescribeLoadBalancerTargetGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DescribeLoadBalancerTargetGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DescribeLoadBalancerTargetGroupsRequest method. +// req, resp := client.DescribeLoadBalancerTargetGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) DescribeLoadBalancerTargetGroupsRequest(input *DescribeLoadBalancerTargetGroupsInput) (req *request.Request, output *DescribeLoadBalancerTargetGroupsOutput) { + op := &request.Operation{ + Name: opDescribeLoadBalancerTargetGroups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeLoadBalancerTargetGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeLoadBalancerTargetGroupsOutput{} + req.Data = output + return +} + +// Describes the target groups for the specified Auto Scaling group. +func (c *AutoScaling) DescribeLoadBalancerTargetGroups(input *DescribeLoadBalancerTargetGroupsInput) (*DescribeLoadBalancerTargetGroupsOutput, error) { + req, out := c.DescribeLoadBalancerTargetGroupsRequest(input) + err := req.Send() + return out, err +} + const opDescribeLoadBalancers = "DescribeLoadBalancers" // DescribeLoadBalancersRequest generates a "aws/request.Request" representing the @@ -1260,6 +1376,9 @@ func (c *AutoScaling) DescribeLoadBalancersRequest(input *DescribeLoadBalancersI } // Describes the load balancers for the specified Auto Scaling group. +// +// Note that this operation describes only Classic load balancers. If you have +// Application load balancers, use DescribeLoadBalancerTargetGroups instead. func (c *AutoScaling) DescribeLoadBalancers(input *DescribeLoadBalancersInput) (*DescribeLoadBalancersOutput, error) { req, out := c.DescribeLoadBalancersRequest(input) err := req.Send() @@ -1524,9 +1643,6 @@ func (c *AutoScaling) DescribeScalingActivitiesRequest(input *DescribeScalingAct } // Describes one or more scaling activities for the specified Auto Scaling group. -// If you omit the ActivityIds, the call returns all activities from the past -// six weeks. Activities are sorted by the start time. Activities still in progress -// appear first on the list. func (c *AutoScaling) DescribeScalingActivities(input *DescribeScalingActivitiesInput) (*DescribeScalingActivitiesOutput, error) { req, out := c.DescribeScalingActivitiesRequest(input) err := req.Send() @@ -1871,15 +1987,68 @@ func (c *AutoScaling) DetachInstancesRequest(input *DetachInstancesInput) (req * // If you do not specify the option to decrement the desired capacity, Auto // Scaling launches instances to replace the ones that are detached. // +// If there is a Classic load balancer attached to the Auto Scaling group, +// the instances are deregistered from the load balancer. If there are target +// groups attached to the Auto Scaling group, the instances are deregistered +// from the target groups. +// // For more information, see Detach EC2 Instances from Your Auto Scaling Group // (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/detach-instance-asg.html) -// in the Auto Scaling Developer Guide. +// in the Auto Scaling User Guide. func (c *AutoScaling) DetachInstances(input *DetachInstancesInput) (*DetachInstancesOutput, error) { req, out := c.DetachInstancesRequest(input) err := req.Send() return out, err } +const opDetachLoadBalancerTargetGroups = "DetachLoadBalancerTargetGroups" + +// DetachLoadBalancerTargetGroupsRequest generates a "aws/request.Request" representing the +// client's request for the DetachLoadBalancerTargetGroups operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DetachLoadBalancerTargetGroups method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DetachLoadBalancerTargetGroupsRequest method. +// req, resp := client.DetachLoadBalancerTargetGroupsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *AutoScaling) DetachLoadBalancerTargetGroupsRequest(input *DetachLoadBalancerTargetGroupsInput) (req *request.Request, output *DetachLoadBalancerTargetGroupsOutput) { + op := &request.Operation{ + Name: opDetachLoadBalancerTargetGroups, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachLoadBalancerTargetGroupsInput{} + } + + req = c.newRequest(op, input, output) + output = &DetachLoadBalancerTargetGroupsOutput{} + req.Data = output + return +} + +// Detaches one or more target groups from the specified Auto Scaling group. +func (c *AutoScaling) DetachLoadBalancerTargetGroups(input *DetachLoadBalancerTargetGroupsInput) (*DetachLoadBalancerTargetGroupsOutput, error) { + req, out := c.DetachLoadBalancerTargetGroupsRequest(input) + err := req.Send() + return out, err +} + const opDetachLoadBalancers = "DetachLoadBalancers" // DetachLoadBalancersRequest generates a "aws/request.Request" representing the @@ -1921,7 +2090,11 @@ func (c *AutoScaling) DetachLoadBalancersRequest(input *DetachLoadBalancersInput return } -// Removes one or more load balancers from the specified Auto Scaling group. +// Detaches one or more Classic load balancers from the specified Auto Scaling +// group. +// +// Note that this operation detaches only Classic load balancers. If you have +// Application load balancers, use DetachLoadBalancerTargetGroups instead. // // When you detach a load balancer, it enters the Removing state while deregistering // the instances in the group. When all instances are deregistered, then you @@ -2082,7 +2255,7 @@ func (c *AutoScaling) EnterStandbyRequest(input *EnterStandbyInput) (req *reques // Moves the specified instances into Standby mode. // // For more information, see Auto Scaling Lifecycle (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingGroupLifecycle.html) -// in the Auto Scaling Developer Guide. +// in the Auto Scaling User Guide. func (c *AutoScaling) EnterStandby(input *EnterStandbyInput) (*EnterStandbyOutput, error) { req, out := c.EnterStandbyRequest(input) err := req.Send() @@ -2183,7 +2356,7 @@ func (c *AutoScaling) ExitStandbyRequest(input *ExitStandbyInput) (req *request. // Moves the specified instances out of Standby mode. // // For more information, see Auto Scaling Lifecycle (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingGroupLifecycle.html) -// in the Auto Scaling Developer Guide. +// in the Auto Scaling User Guide. func (c *AutoScaling) ExitStandby(input *ExitStandbyInput) (*ExitStandbyOutput, error) { req, out := c.ExitStandbyRequest(input) err := req.Send() @@ -2240,21 +2413,28 @@ func (c *AutoScaling) PutLifecycleHookRequest(input *PutLifecycleHookInput) (req // This step is a part of the procedure for adding a lifecycle hook to an Auto // Scaling group: // -// (Optional) Create a Lambda function and a rule that allows CloudWatch Events -// to invoke your Lambda function when Auto Scaling launches or terminates instances. -// (Optional) Create a notification target and an IAM role. The target can be -// either an Amazon SQS queue or an Amazon SNS topic. The role allows Auto Scaling -// to publish lifecycle notifications to the target. Create the lifecycle hook. -// Specify whether the hook is used when the instances launch or terminate. -// If you need more time, record the lifecycle action heartbeat to keep the -// instance in a pending state. If you finish before the timeout period ends, -// complete the lifecycle action. For more information, see Auto Scaling Lifecycle -// (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingGroupLifecycle.html) -// in the Auto Scaling Developer Guide. +// (Optional) Create a Lambda function and a rule that allows CloudWatch +// Events to invoke your Lambda function when Auto Scaling launches or terminates +// instances. +// +// (Optional) Create a notification target and an IAM role. The target can +// be either an Amazon SQS queue or an Amazon SNS topic. The role allows Auto +// Scaling to publish lifecycle notifications to the target. +// +// Create the lifecycle hook. Specify whether the hook is used when the +// instances launch or terminate. +// +// If you need more time, record the lifecycle action heartbeat to keep the +// instance in a pending state. +// +// If you finish before the timeout period ends, complete the lifecycle action. +// +// For more information, see Auto Scaling Lifecycle (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingGroupLifecycle.html) +// in the Auto Scaling User Guide. // // If you exceed your maximum limit of lifecycle hooks, which by default is -// 50 per region, the call fails. For information about updating this limit, -// see AWS Service Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) +// 50 per Auto Scaling group, the call fails. For information about updating +// this limit, see AWS Service Limits (http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) // in the Amazon Web Services General Reference. func (c *AutoScaling) PutLifecycleHook(input *PutLifecycleHookInput) (*PutLifecycleHookOutput, error) { req, out := c.PutLifecycleHookRequest(input) @@ -2306,14 +2486,14 @@ func (c *AutoScaling) PutNotificationConfigurationRequest(input *PutNotification } // Configures an Auto Scaling group to send notifications when specified events -// take place. Subscribers to this topic can have messages for events delivered -// to an endpoint such as a web server or email address. +// take place. Subscribers to the specified topic can have messages delivered +// to an endpoint such as a web server or an email address. // -// For more information see Getting Notifications When Your Auto Scaling Group -// Changes (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/ASGettingNotifications.html) -// in the Auto Scaling Developer Guide. +// This configuration overwrites any existing configuration. // -// This configuration overwrites an existing configuration. +// For more information see Getting SNS Notifications When Your Auto Scaling +// Group Scales (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/ASGettingNotifications.html) +// in the Auto Scaling User Guide. func (c *AutoScaling) PutNotificationConfiguration(input *PutNotificationConfigurationInput) (*PutNotificationConfigurationOutput, error) { req, out := c.PutNotificationConfigurationRequest(input) err := req.Send() @@ -2424,7 +2604,7 @@ func (c *AutoScaling) PutScheduledUpdateGroupActionRequest(input *PutScheduledUp // the corresponding value remains unchanged in the affected Auto Scaling group. // // For more information, see Scheduled Scaling (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/schedule_time.html) -// in the Auto Scaling Developer Guide. +// in the Auto Scaling User Guide. func (c *AutoScaling) PutScheduledUpdateGroupAction(input *PutScheduledUpdateGroupActionInput) (*PutScheduledUpdateGroupActionOutput, error) { req, out := c.PutScheduledUpdateGroupActionRequest(input) err := req.Send() @@ -2479,17 +2659,24 @@ func (c *AutoScaling) RecordLifecycleActionHeartbeatRequest(input *RecordLifecyc // This step is a part of the procedure for adding a lifecycle hook to an Auto // Scaling group: // -// (Optional) Create a Lambda function and a rule that allows CloudWatch Events -// to invoke your Lambda function when Auto Scaling launches or terminates instances. -// (Optional) Create a notification target and an IAM role. The target can be -// either an Amazon SQS queue or an Amazon SNS topic. The role allows Auto Scaling -// to publish lifecycle notifications to the target. Create the lifecycle hook. -// Specify whether the hook is used when the instances launch or terminate. -// If you need more time, record the lifecycle action heartbeat to keep the -// instance in a pending state. If you finish before the timeout period ends, -// complete the lifecycle action. For more information, see Auto Scaling Lifecycle -// (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingGroupLifecycle.html) -// in the Auto Scaling Developer Guide. +// (Optional) Create a Lambda function and a rule that allows CloudWatch +// Events to invoke your Lambda function when Auto Scaling launches or terminates +// instances. +// +// (Optional) Create a notification target and an IAM role. The target can +// be either an Amazon SQS queue or an Amazon SNS topic. The role allows Auto +// Scaling to publish lifecycle notifications to the target. +// +// Create the lifecycle hook. Specify whether the hook is used when the instances +// launch or terminate. +// +// If you need more time, record the lifecycle action heartbeat to keep +// the instance in a pending state. +// +// If you finish before the timeout period ends, complete the lifecycle action. +// +// For more information, see Auto Scaling Lifecycle (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingGroupLifecycle.html) +// in the Auto Scaling User Guide. func (c *AutoScaling) RecordLifecycleActionHeartbeat(input *RecordLifecycleActionHeartbeatInput) (*RecordLifecycleActionHeartbeatOutput, error) { req, out := c.RecordLifecycleActionHeartbeatRequest(input) err := req.Send() @@ -2544,7 +2731,7 @@ func (c *AutoScaling) ResumeProcessesRequest(input *ScalingProcessQuery) (req *r // // For more information, see Suspending and Resuming Auto Scaling Processes // (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/US_SuspendResume.html) -// in the Auto Scaling Developer Guide. +// in the Auto Scaling User Guide. func (c *AutoScaling) ResumeProcesses(input *ScalingProcessQuery) (*ResumeProcessesOutput, error) { req, out := c.ResumeProcessesRequest(input) err := req.Send() @@ -2597,7 +2784,7 @@ func (c *AutoScaling) SetDesiredCapacityRequest(input *SetDesiredCapacityInput) // Sets the size of the specified Auto Scaling group. // // For more information about desired capacity, see What Is Auto Scaling? (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/WhatIsAutoScaling.html) -// in the Auto Scaling Developer Guide. +// in the Auto Scaling User Guide. func (c *AutoScaling) SetDesiredCapacity(input *SetDesiredCapacityInput) (*SetDesiredCapacityOutput, error) { req, out := c.SetDesiredCapacityRequest(input) err := req.Send() @@ -2650,7 +2837,7 @@ func (c *AutoScaling) SetInstanceHealthRequest(input *SetInstanceHealthInput) (r // Sets the health status of the specified instance. // // For more information, see Health Checks (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/healthcheck.html) -// in the Auto Scaling Developer Guide. +// in the Auto Scaling User Guide. func (c *AutoScaling) SetInstanceHealth(input *SetInstanceHealthInput) (*SetInstanceHealthOutput, error) { req, out := c.SetInstanceHealthRequest(input) err := req.Send() @@ -2701,7 +2888,7 @@ func (c *AutoScaling) SetInstanceProtectionRequest(input *SetInstanceProtectionI // Updates the instance protection settings of the specified instances. // // For more information, see Instance Protection (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingBehavior.InstanceTermination.html#instance-protection) -// in the Auto Scaling Developer Guide. +// in the Auto Scaling User Guide. func (c *AutoScaling) SetInstanceProtection(input *SetInstanceProtectionInput) (*SetInstanceProtectionOutput, error) { req, out := c.SetInstanceProtectionRequest(input) err := req.Send() @@ -2761,7 +2948,7 @@ func (c *AutoScaling) SuspendProcessesRequest(input *ScalingProcessQuery) (req * // // For more information, see Suspending and Resuming Auto Scaling Processes // (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/US_SuspendResume.html) -// in the Auto Scaling Developer Guide. +// in the Auto Scaling User Guide. func (c *AutoScaling) SuspendProcesses(input *ScalingProcessQuery) (*SuspendProcessesOutput, error) { req, out := c.SuspendProcessesRequest(input) err := req.Send() @@ -2943,7 +3130,7 @@ func (s Activity) GoString() string { // Describes a policy adjustment type. // // For more information, see Dynamic Scaling (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-scale-based-on-demand.html) -// in the Auto Scaling Developer Guide. +// in the Auto Scaling User Guide. type AdjustmentType struct { _ struct{} `type:"structure"` @@ -2983,6 +3170,7 @@ func (s Alarm) GoString() string { return s.String() } +// Contains the parameters for AttachInstances. type AttachInstancesInput struct { _ struct{} `type:"structure"` @@ -3033,14 +3221,69 @@ func (s AttachInstancesOutput) GoString() string { return s.String() } +// Contains the parameters for AttachLoadBalancerTargetGroups. +type AttachLoadBalancerTargetGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Names (ARN) of the target groups. + TargetGroupARNs []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s AttachLoadBalancerTargetGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachLoadBalancerTargetGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachLoadBalancerTargetGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AttachLoadBalancerTargetGroupsInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + if s.TargetGroupARNs == nil { + invalidParams.Add(request.NewErrParamRequired("TargetGroupARNs")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type AttachLoadBalancerTargetGroupsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachLoadBalancerTargetGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AttachLoadBalancerTargetGroupsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for AttachLoadBalancers. type AttachLoadBalancersInput struct { _ struct{} `type:"structure"` // The name of the group. - AutoScalingGroupName *string `min:"1" type:"string"` + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` // One or more load balancer names. - LoadBalancerNames []*string `type:"list"` + LoadBalancerNames []*string `type:"list" required:"true"` } // String returns the string representation @@ -3056,9 +3299,15 @@ func (s AttachLoadBalancersInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *AttachLoadBalancersInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "AttachLoadBalancersInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) } + if s.LoadBalancerNames == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerNames")) + } if invalidParams.Len() > 0 { return invalidParams @@ -3066,6 +3315,7 @@ func (s *AttachLoadBalancersInput) Validate() error { return nil } +// Contains the output of AttachLoadBalancers. type AttachLoadBalancersOutput struct { _ struct{} `type:"structure"` } @@ -3135,6 +3385,7 @@ func (s *BlockDeviceMapping) Validate() error { return nil } +// Contains the parameters for CompleteLifecycleAction. type CompleteLifecycleActionInput struct { _ struct{} `type:"structure"` @@ -3198,6 +3449,7 @@ func (s *CompleteLifecycleActionInput) Validate() error { return nil } +// Contains the output of CompleteLifecycleAction. type CompleteLifecycleActionOutput struct { _ struct{} `type:"structure"` } @@ -3212,6 +3464,7 @@ func (s CompleteLifecycleActionOutput) GoString() string { return s.String() } +// Contains the parameters for CreateAutoScalingGroup. type CreateAutoScalingGroupInput struct { _ struct{} `type:"structure"` @@ -3227,7 +3480,7 @@ type CreateAutoScalingGroupInput struct { // another scaling activity can start. The default is 300. // // For more information, see Auto Scaling Cooldowns (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/Cooldown.html) - // in the Auto Scaling Developer Guide. + // in the Auto Scaling User Guide. DefaultCooldown *int64 `type:"integer"` // The number of EC2 instances that should be running in the group. This number @@ -3238,19 +3491,19 @@ type CreateAutoScalingGroupInput struct { // The amount of time, in seconds, that Auto Scaling waits before checking the // health status of an EC2 instance that has come into service. During this // time, any health check failures for the instance are ignored. The default - // is 300. + // is 0. // // This parameter is required if you are adding an ELB health check. // // For more information, see Health Checks (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/healthcheck.html) - // in the Auto Scaling Developer Guide. + // in the Auto Scaling User Guide. HealthCheckGracePeriod *int64 `type:"integer"` // The service to use for the health checks. The valid values are EC2 and ELB. // // By default, health checks use Amazon EC2 instance status checks to determine // the health of an instance. For more information, see Health Checks (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/healthcheck.html) - // in the Auto Scaling Developer Guide. + // in the Auto Scaling User Guide. HealthCheckType *string `min:"1" type:"string"` // The ID of the instance used to create a launch configuration for the group. @@ -3263,18 +3516,19 @@ type CreateAutoScalingGroupInput struct { // // For more information, see Create an Auto Scaling Group Using an EC2 Instance // (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/create-asg-from-instance.html) - // in the Auto Scaling Developer Guide. + // in the Auto Scaling User Guide. InstanceId *string `min:"1" type:"string"` // The name of the launch configuration. Alternatively, specify an EC2 instance // instead of a launch configuration. LaunchConfigurationName *string `min:"1" type:"string"` - // One or more load balancers. + // One or more Classic load balancers. To specify an Application load balancer, + // use TargetGroupARNs instead. // // For more information, see Using a Load Balancer With an Auto Scaling Group // (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/US_SetUpASLBApp.html) - // in the Auto Scaling Developer Guide. + // in the Auto Scaling User Guide. LoadBalancerNames []*string `type:"list"` // The maximum size of the group. @@ -3295,15 +3549,18 @@ type CreateAutoScalingGroupInput struct { // One or more tags. // // For more information, see Tagging Auto Scaling Groups and Instances (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/ASTagging.html) - // in the Auto Scaling Developer Guide. + // in the Auto Scaling User Guide. Tags []*Tag `type:"list"` + // The Amazon Resource Names (ARN) of the target groups. + TargetGroupARNs []*string `type:"list"` + // One or more termination policies used to select the instance to terminate. // These policies are executed in the order that they are listed. // // For more information, see Controlling Which Instances Auto Scaling Terminates // During Scale In (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingBehavior.InstanceTermination.html) - // in the Auto Scaling Developer Guide. + // in the Auto Scaling User Guide. TerminationPolicies []*string `type:"list"` // A comma-separated list of subnet identifiers for your virtual private cloud @@ -3313,7 +3570,7 @@ type CreateAutoScalingGroupInput struct { // the subnets' Availability Zones match the Availability Zones specified. // // For more information, see Launching Auto Scaling Instances in a VPC (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/asg-in-vpc.html) - // in the Auto Scaling Developer Guide. + // in the Auto Scaling User Guide. VPCZoneIdentifier *string `min:"1" type:"string"` } @@ -3391,13 +3648,14 @@ func (s CreateAutoScalingGroupOutput) GoString() string { return s.String() } +// Contains the parameters for CreateLaunchConfiguration. type CreateLaunchConfigurationInput struct { _ struct{} `type:"structure"` // Used for groups that launch instances into a virtual private cloud (VPC). // Specifies whether to assign a public IP address to each instance. For more // information, see Launching Auto Scaling Instances in a VPC (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/asg-in-vpc.html) - // in the Auto Scaling Developer Guide. + // in the Auto Scaling User Guide. // // If you specify this parameter, be sure to specify at least one subnet when // you create your group. @@ -3442,7 +3700,7 @@ type CreateLaunchConfigurationInput struct { // enable applications running on your EC2 instances to securely access other // AWS resources. For more information, see Launch Auto Scaling Instances with // an IAM Role (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/us-iam-role.html) - // in the Auto Scaling Developer Guide. + // in the Auto Scaling User Guide. IamInstanceProfile *string `min:"1" type:"string"` // The ID of the Amazon Machine Image (AMI) to use to launch your EC2 instances. @@ -3460,7 +3718,7 @@ type CreateLaunchConfigurationInput struct { // // For more information, see Create a Launch Configuration Using an EC2 Instance // (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/create-lc-with-instanceID.html) - // in the Auto Scaling Developer Guide. + // in the Auto Scaling User Guide. InstanceId *string `min:"1" type:"string"` // Enables detailed monitoring if it is disabled. Detailed monitoring is enabled @@ -3471,7 +3729,7 @@ type CreateLaunchConfigurationInput struct { // monitoring, by specifying False, CloudWatch generates metrics every 5 minutes. // For more information, see Monitoring Your Auto Scaling Instances and Groups // (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-instance-monitoring.html) - // in the Auto Scaling Developer Guide. + // in the Auto Scaling User Guide. InstanceMonitoring *InstanceMonitoring `type:"structure"` // The instance type of the EC2 instance. For information about available instance @@ -3502,7 +3760,7 @@ type CreateLaunchConfigurationInput struct { // you create your group. // // For more information, see Launching Auto Scaling Instances in a VPC (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/asg-in-vpc.html) - // in the Auto Scaling Developer Guide. + // in the Auto Scaling User Guide. // // Valid values: default | dedicated PlacementTenancy *string `min:"1" type:"string"` @@ -3526,7 +3784,7 @@ type CreateLaunchConfigurationInput struct { // the request. Spot Instances are launched when the price you specify exceeds // the current Spot market price. For more information, see Launching Spot Instances // in Your Auto Scaling Group (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/US-SpotInstances.html) - // in the Auto Scaling Developer Guide. + // in the Auto Scaling User Guide. SpotPrice *string `min:"1" type:"string"` // The user data to make available to the launched EC2 instances. For more information, @@ -3615,6 +3873,7 @@ func (s CreateLaunchConfigurationOutput) GoString() string { return s.String() } +// Contains the parameters for CreateOrUpdateTags. type CreateOrUpdateTagsInput struct { _ struct{} `type:"structure"` @@ -3669,6 +3928,7 @@ func (s CreateOrUpdateTagsOutput) GoString() string { return s.String() } +// Contains the parameters for DeleteAutoScalingGroup. type DeleteAutoScalingGroupInput struct { _ struct{} `type:"structure"` @@ -3721,6 +3981,7 @@ func (s DeleteAutoScalingGroupOutput) GoString() string { return s.String() } +// Contains the parameters for DeleteLaunchConfiguration. type DeleteLaunchConfigurationInput struct { _ struct{} `type:"structure"` @@ -3768,6 +4029,7 @@ func (s DeleteLaunchConfigurationOutput) GoString() string { return s.String() } +// Contains the parameters for DeleteLifecycleHook. type DeleteLifecycleHookInput struct { _ struct{} `type:"structure"` @@ -3810,6 +4072,7 @@ func (s *DeleteLifecycleHookInput) Validate() error { return nil } +// Contains the output of DeleteLifecycleHook. type DeleteLifecycleHookOutput struct { _ struct{} `type:"structure"` } @@ -3824,6 +4087,7 @@ func (s DeleteLifecycleHookOutput) GoString() string { return s.String() } +// Contains the parameters for DeleteNotificationConfiguration. type DeleteNotificationConfigurationInput struct { _ struct{} `type:"structure"` @@ -3881,6 +4145,7 @@ func (s DeleteNotificationConfigurationOutput) GoString() string { return s.String() } +// Contains the parameters for DeletePolicy. type DeletePolicyInput struct { _ struct{} `type:"structure"` @@ -3934,11 +4199,12 @@ func (s DeletePolicyOutput) GoString() string { return s.String() } +// Contains the parameters for DeleteScheduledAction. type DeleteScheduledActionInput struct { _ struct{} `type:"structure"` // The name of the Auto Scaling group. - AutoScalingGroupName *string `min:"1" type:"string"` + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` // The name of the action to delete. ScheduledActionName *string `min:"1" type:"string" required:"true"` @@ -3957,6 +4223,9 @@ func (s DeleteScheduledActionInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *DeleteScheduledActionInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DeleteScheduledActionInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) } @@ -3987,6 +4256,7 @@ func (s DeleteScheduledActionOutput) GoString() string { return s.String() } +// Contains the parameters for DeleteTags. type DeleteTagsInput struct { _ struct{} `type:"structure"` @@ -4055,6 +4325,7 @@ func (s DescribeAccountLimitsInput) GoString() string { return s.String() } +// Contains the parameters for DescribeAccountLimits. type DescribeAccountLimitsOutput struct { _ struct{} `type:"structure"` @@ -4097,6 +4368,7 @@ func (s DescribeAdjustmentTypesInput) GoString() string { return s.String() } +// Contains the parameters for DescribeAdjustmentTypes. type DescribeAdjustmentTypesOutput struct { _ struct{} `type:"structure"` @@ -4114,10 +4386,12 @@ func (s DescribeAdjustmentTypesOutput) GoString() string { return s.String() } +// Contains the parameters for DescribeAutoScalingGroups. type DescribeAutoScalingGroupsInput struct { _ struct{} `type:"structure"` - // The group names. + // The group names. If you omit this parameter, all Auto Scaling groups are + // described. AutoScalingGroupNames []*string `type:"list"` // The maximum number of items to return with this call. @@ -4138,6 +4412,7 @@ func (s DescribeAutoScalingGroupsInput) GoString() string { return s.String() } +// Contains the output for DescribeAutoScalingGroups. type DescribeAutoScalingGroupsOutput struct { _ struct{} `type:"structure"` @@ -4159,6 +4434,7 @@ func (s DescribeAutoScalingGroupsOutput) GoString() string { return s.String() } +// Contains the parameters for DescribeAutoScalingInstances. type DescribeAutoScalingInstancesInput struct { _ struct{} `type:"structure"` @@ -4185,6 +4461,7 @@ func (s DescribeAutoScalingInstancesInput) GoString() string { return s.String() } +// Contains the output of DescribeAutoScalingInstances. type DescribeAutoScalingInstancesOutput struct { _ struct{} `type:"structure"` @@ -4220,20 +4497,11 @@ func (s DescribeAutoScalingNotificationTypesInput) GoString() string { return s.String() } +// Contains the output of DescribeAutoScalingNotificationTypes. type DescribeAutoScalingNotificationTypesOutput struct { _ struct{} `type:"structure"` - // One or more of the following notification types: - // - // autoscaling:EC2_INSTANCE_LAUNCH - // - // autoscaling:EC2_INSTANCE_LAUNCH_ERROR - // - // autoscaling:EC2_INSTANCE_TERMINATE - // - // autoscaling:EC2_INSTANCE_TERMINATE_ERROR - // - // autoscaling:TEST_NOTIFICATION + // The notification types. AutoScalingNotificationTypes []*string `type:"list"` } @@ -4247,10 +4515,12 @@ func (s DescribeAutoScalingNotificationTypesOutput) GoString() string { return s.String() } +// Contains the parameters for DescribeLaunchConfigurations. type DescribeLaunchConfigurationsInput struct { _ struct{} `type:"structure"` - // The launch configuration names. + // The launch configuration names. If you omit this parameter, all launch configurations + // are described. LaunchConfigurationNames []*string `type:"list"` // The maximum number of items to return with this call. The default is 100. @@ -4271,6 +4541,7 @@ func (s DescribeLaunchConfigurationsInput) GoString() string { return s.String() } +// Contains the output of DescribeLaunchConfigurations. type DescribeLaunchConfigurationsOutput struct { _ struct{} `type:"structure"` @@ -4306,14 +4577,11 @@ func (s DescribeLifecycleHookTypesInput) GoString() string { return s.String() } +// Contains the output of DescribeLifecycleHookTypes. type DescribeLifecycleHookTypesOutput struct { _ struct{} `type:"structure"` - // One or more of the following notification types: - // - // autoscaling:EC2_INSTANCE_LAUNCHING - // - // autoscaling:EC2_INSTANCE_TERMINATING + // The lifecycle hook types. LifecycleHookTypes []*string `type:"list"` } @@ -4327,13 +4595,15 @@ func (s DescribeLifecycleHookTypesOutput) GoString() string { return s.String() } +// Contains the parameters for DescribeLifecycleHooks. type DescribeLifecycleHooksInput struct { _ struct{} `type:"structure"` // The name of the group. AutoScalingGroupName *string `min:"1" type:"string" required:"true"` - // The names of one or more lifecycle hooks. + // The names of one or more lifecycle hooks. If you omit this parameter, all + // lifecycle hooks are described. LifecycleHookNames []*string `type:"list"` } @@ -4363,6 +4633,7 @@ func (s *DescribeLifecycleHooksInput) Validate() error { return nil } +// Contains the output of DescribeLifecycleHooks. type DescribeLifecycleHooksOutput struct { _ struct{} `type:"structure"` @@ -4380,6 +4651,70 @@ func (s DescribeLifecycleHooksOutput) GoString() string { return s.String() } +// Contains the parameters for DescribeLoadBalancerTargetGroups. +type DescribeLoadBalancerTargetGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The maximum number of items to return with this call. + MaxRecords *int64 `type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a previous call.) + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLoadBalancerTargetGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancerTargetGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeLoadBalancerTargetGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeLoadBalancerTargetGroupsInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the output of DescribeLoadBalancerTargetGroups. +type DescribeLoadBalancerTargetGroupsOutput struct { + _ struct{} `type:"structure"` + + // Information about the target groups. + LoadBalancerTargetGroups []*LoadBalancerTargetGroupState `type:"list"` + + // The token to use when requesting the next set of items. If there are no additional + // items to return, the string is empty. + NextToken *string `type:"string"` +} + +// String returns the string representation +func (s DescribeLoadBalancerTargetGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLoadBalancerTargetGroupsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeLoadBalancers. type DescribeLoadBalancersInput struct { _ struct{} `type:"structure"` @@ -4420,6 +4755,7 @@ func (s *DescribeLoadBalancersInput) Validate() error { return nil } +// Contains the output of DescribeLoadBalancers. type DescribeLoadBalancersOutput struct { _ struct{} `type:"structure"` @@ -4455,6 +4791,7 @@ func (s DescribeMetricCollectionTypesInput) GoString() string { return s.String() } +// Contains the output of DescribeMetricsCollectionTypes. type DescribeMetricCollectionTypesOutput struct { _ struct{} `type:"structure"` @@ -4475,6 +4812,7 @@ func (s DescribeMetricCollectionTypesOutput) GoString() string { return s.String() } +// Contains the parameters for DescribeNotificationConfigurations. type DescribeNotificationConfigurationsInput struct { _ struct{} `type:"structure"` @@ -4499,6 +4837,7 @@ func (s DescribeNotificationConfigurationsInput) GoString() string { return s.String() } +// Contains the output from DescribeNotificationConfigurations. type DescribeNotificationConfigurationsOutput struct { _ struct{} `type:"structure"` @@ -4520,6 +4859,7 @@ func (s DescribeNotificationConfigurationsOutput) GoString() string { return s.String() } +// Contains the parameters for DescribePolicies. type DescribePoliciesInput struct { _ struct{} `type:"structure"` @@ -4534,9 +4874,9 @@ type DescribePoliciesInput struct { NextToken *string `type:"string"` // One or more policy names or policy ARNs to be described. If you omit this - // list, all policy names are described. If an group name is provided, the results - // are limited to that group. This list is limited to 50 items. If you specify - // an unknown policy name, it is ignored with no error. + // parameter, all policy names are described. If an group name is provided, + // the results are limited to that group. This list is limited to 50 items. + // If you specify an unknown policy name, it is ignored with no error. PolicyNames []*string `type:"list"` // One or more policy types. Valid values are SimpleScaling and StepScaling. @@ -4566,6 +4906,7 @@ func (s *DescribePoliciesInput) Validate() error { return nil } +// Contains the output of DescribePolicies. type DescribePoliciesOutput struct { _ struct{} `type:"structure"` @@ -4587,14 +4928,15 @@ func (s DescribePoliciesOutput) GoString() string { return s.String() } +// Contains the parameters for DescribeScalingActivities. type DescribeScalingActivitiesInput struct { _ struct{} `type:"structure"` - // The activity IDs of the desired scaling activities. If this list is omitted, - // all activities are described. If you specify an Auto Scaling group, the results - // are limited to that group. The list of requested activities cannot contain - // more than 50 items. If unknown activities are requested, they are ignored - // with no error. + // The activity IDs of the desired scaling activities. If you omit this parameter, + // all activities for the past six weeks are described. If you specify an Auto + // Scaling group, the results are limited to that group. The list of requested + // activities cannot contain more than 50 items. If unknown activities are requested, + // they are ignored with no error. ActivityIds []*string `type:"list"` // The name of the group. @@ -4631,10 +4973,12 @@ func (s *DescribeScalingActivitiesInput) Validate() error { return nil } +// Contains the output of DescribeScalingActivities. type DescribeScalingActivitiesOutput struct { _ struct{} `type:"structure"` - // The scaling activities. + // The scaling activities. Activities are sorted by start time. Activities still + // in progress are described first. Activities []*Activity `type:"list" required:"true"` // The token to use when requesting the next set of items. If there are no additional @@ -4666,6 +5010,7 @@ func (s DescribeScalingProcessTypesInput) GoString() string { return s.String() } +// Contains the output of DescribeScalingProcessTypes. type DescribeScalingProcessTypesOutput struct { _ struct{} `type:"structure"` @@ -4683,6 +5028,7 @@ func (s DescribeScalingProcessTypesOutput) GoString() string { return s.String() } +// Contains the parameters for DescribeScheduledActions. type DescribeScheduledActionsInput struct { _ struct{} `type:"structure"` @@ -4700,8 +5046,8 @@ type DescribeScheduledActionsInput struct { // a previous call.) NextToken *string `type:"string"` - // Describes one or more scheduled actions. If you omit this list, the call - // describes all scheduled actions. If you specify an unknown scheduled action + // Describes one or more scheduled actions. If you omit this parameter, all + // scheduled actions are described. If you specify an unknown scheduled action, // it is ignored with no error. // // You can describe up to a maximum of 50 instances with a single call. If @@ -4737,6 +5083,7 @@ func (s *DescribeScheduledActionsInput) Validate() error { return nil } +// Contains the output of DescribeScheduledActions. type DescribeScheduledActionsOutput struct { _ struct{} `type:"structure"` @@ -4758,6 +5105,7 @@ func (s DescribeScheduledActionsOutput) GoString() string { return s.String() } +// Contains the parameters for DescribeTags. type DescribeTagsInput struct { _ struct{} `type:"structure"` @@ -4782,6 +5130,7 @@ func (s DescribeTagsInput) GoString() string { return s.String() } +// Contains the output of DescribeTags. type DescribeTagsOutput struct { _ struct{} `type:"structure"` @@ -4817,6 +5166,7 @@ func (s DescribeTerminationPolicyTypesInput) GoString() string { return s.String() } +// Contains the output of DescribeTerminationPolicyTypes. type DescribeTerminationPolicyTypesOutput struct { _ struct{} `type:"structure"` @@ -4835,6 +5185,7 @@ func (s DescribeTerminationPolicyTypesOutput) GoString() string { return s.String() } +// Contains the parameters for DetachInstances. type DetachInstancesInput struct { _ struct{} `type:"structure"` @@ -4878,6 +5229,7 @@ func (s *DetachInstancesInput) Validate() error { return nil } +// Contains the output of DetachInstances. type DetachInstancesOutput struct { _ struct{} `type:"structure"` @@ -4895,14 +5247,68 @@ func (s DetachInstancesOutput) GoString() string { return s.String() } +type DetachLoadBalancerTargetGroupsInput struct { + _ struct{} `type:"structure"` + + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Names (ARN) of the target groups. + TargetGroupARNs []*string `type:"list" required:"true"` +} + +// String returns the string representation +func (s DetachLoadBalancerTargetGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachLoadBalancerTargetGroupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DetachLoadBalancerTargetGroupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DetachLoadBalancerTargetGroupsInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } + if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { + invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) + } + if s.TargetGroupARNs == nil { + invalidParams.Add(request.NewErrParamRequired("TargetGroupARNs")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DetachLoadBalancerTargetGroupsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachLoadBalancerTargetGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DetachLoadBalancerTargetGroupsOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DetachLoadBalancers. type DetachLoadBalancersInput struct { _ struct{} `type:"structure"` - // The name of the group. - AutoScalingGroupName *string `min:"1" type:"string"` + // The name of the Auto Scaling group. + AutoScalingGroupName *string `min:"1" type:"string" required:"true"` // One or more load balancer names. - LoadBalancerNames []*string `type:"list"` + LoadBalancerNames []*string `type:"list" required:"true"` } // String returns the string representation @@ -4918,9 +5324,15 @@ func (s DetachLoadBalancersInput) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *DetachLoadBalancersInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DetachLoadBalancersInput"} + if s.AutoScalingGroupName == nil { + invalidParams.Add(request.NewErrParamRequired("AutoScalingGroupName")) + } if s.AutoScalingGroupName != nil && len(*s.AutoScalingGroupName) < 1 { invalidParams.Add(request.NewErrParamMinLen("AutoScalingGroupName", 1)) } + if s.LoadBalancerNames == nil { + invalidParams.Add(request.NewErrParamRequired("LoadBalancerNames")) + } if invalidParams.Len() > 0 { return invalidParams @@ -4928,6 +5340,7 @@ func (s *DetachLoadBalancersInput) Validate() error { return nil } +// Contains the output for DetachLoadBalancers. type DetachLoadBalancersOutput struct { _ struct{} `type:"structure"` } @@ -4942,6 +5355,7 @@ func (s DetachLoadBalancersOutput) GoString() string { return s.String() } +// Contains the parameters for DisableMetricsCollection. type DisableMetricsCollectionInput struct { _ struct{} `type:"structure"` @@ -4951,21 +5365,21 @@ type DisableMetricsCollectionInput struct { // One or more of the following metrics. If you omit this parameter, all metrics // are disabled. // - // GroupMinSize + // GroupMinSize // - // GroupMaxSize + // GroupMaxSize // - // GroupDesiredCapacity + // GroupDesiredCapacity // - // GroupInServiceInstances + // GroupInServiceInstances // - // GroupPendingInstances + // GroupPendingInstances // - // GroupStandbyInstances + // GroupStandbyInstances // - // GroupTerminatingInstances + // GroupTerminatingInstances // - // GroupTotalInstances + // GroupTotalInstances Metrics []*string `type:"list"` } @@ -5085,6 +5499,7 @@ func (s *Ebs) Validate() error { return nil } +// Contains the parameters for EnableMetricsCollection. type EnableMetricsCollectionInput struct { _ struct{} `type:"structure"` @@ -5098,24 +5513,24 @@ type EnableMetricsCollectionInput struct { // One or more of the following metrics. If you omit this parameter, all metrics // are enabled. // - // GroupMinSize + // GroupMinSize // - // GroupMaxSize + // GroupMaxSize // - // GroupDesiredCapacity + // GroupDesiredCapacity // - // GroupInServiceInstances + // GroupInServiceInstances // - // GroupPendingInstances + // GroupPendingInstances // - // GroupStandbyInstances + // GroupStandbyInstances // - // GroupTerminatingInstances + // GroupTerminatingInstances // - // GroupTotalInstances + // GroupTotalInstances // - // Note that the GroupStandbyInstances metric is not enabled by default. You - // must explicitly request this metric. + // Note that the GroupStandbyInstances metric is not enabled by default. + // You must explicitly request this metric. Metrics []*string `type:"list"` } @@ -5174,21 +5589,21 @@ type EnabledMetric struct { // One of the following metrics: // - // GroupMinSize + // GroupMinSize // - // GroupMaxSize + // GroupMaxSize // - // GroupDesiredCapacity + // GroupDesiredCapacity // - // GroupInServiceInstances + // GroupInServiceInstances // - // GroupPendingInstances + // GroupPendingInstances // - // GroupStandbyInstances + // GroupStandbyInstances // - // GroupTerminatingInstances + // GroupTerminatingInstances // - // GroupTotalInstances + // GroupTotalInstances Metric *string `min:"1" type:"string"` } @@ -5202,6 +5617,7 @@ func (s EnabledMetric) GoString() string { return s.String() } +// Contains the parameters for EnteStandby. type EnterStandbyInput struct { _ struct{} `type:"structure"` @@ -5248,6 +5664,7 @@ func (s *EnterStandbyInput) Validate() error { return nil } +// Contains the output of EnterStandby. type EnterStandbyOutput struct { _ struct{} `type:"structure"` @@ -5265,6 +5682,7 @@ func (s EnterStandbyOutput) GoString() string { return s.String() } +// Contains the parameters for ExecutePolicy. type ExecutePolicyInput struct { _ struct{} `type:"structure"` @@ -5284,7 +5702,7 @@ type ExecutePolicyInput struct { // This parameter is not supported if the policy type is StepScaling. // // For more information, see Auto Scaling Cooldowns (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/Cooldown.html) - // in the Auto Scaling Developer Guide. + // in the Auto Scaling User Guide. HonorCooldown *bool `type:"boolean"` // The metric value to compare to BreachThreshold. This enables you to execute @@ -5347,6 +5765,7 @@ func (s ExecutePolicyOutput) GoString() string { return s.String() } +// Contains the parameters for ExitStandby. type ExitStandbyInput struct { _ struct{} `type:"structure"` @@ -5383,6 +5802,7 @@ func (s *ExitStandbyInput) Validate() error { return nil } +// Contains the parameters for ExitStandby. type ExitStandbyOutput struct { _ struct{} `type:"structure"` @@ -5488,6 +5908,9 @@ type Group struct { // The tags for the group. Tags []*TagDescription `type:"list"` + // The Amazon Resource Names (ARN) of the target groups for your load balancer. + TargetGroupARNs []*string `type:"list"` + // The termination policies for the group. TerminationPolicies []*string `type:"list"` @@ -5515,9 +5938,9 @@ type Instance struct { // The Availability Zone in which the instance is running. AvailabilityZone *string `min:"1" type:"string" required:"true"` - // The health status of the instance. "Healthy" means that the instance is healthy - // and should remain in service. "Unhealthy" means that the instance is unhealthy - // and Auto Scaling should terminate and replace it. + // The last reported health status of the instance. "Healthy" means that the + // instance is healthy and should remain in service. "Unhealthy" means that + // the instance is unhealthy and Auto Scaling should terminate and replace it. HealthStatus *string `min:"1" type:"string" required:"true"` // The ID of the instance. @@ -5555,9 +5978,9 @@ type InstanceDetails struct { // The Availability Zone for the instance. AvailabilityZone *string `min:"1" type:"string" required:"true"` - // The health status of this instance. "Healthy" means that the instance is - // healthy and should remain in service. "Unhealthy" means that the instance - // is unhealthy and Auto Scaling should terminate and replace it. + // The last reported health status of this instance. "Healthy" means that the + // instance is healthy and should remain in service. "Unhealthy" means that + // the instance is unhealthy and Auto Scaling should terminate and replace it. HealthStatus *string `min:"1" type:"string" required:"true"` // The ID of the instance. @@ -5568,7 +5991,7 @@ type InstanceDetails struct { // The lifecycle state for the instance. For more information, see Auto Scaling // Lifecycle (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingGroupLifecycle.html) - // in the Auto Scaling Developer Guide. + // in the Auto Scaling User Guide. LifecycleState *string `min:"1" type:"string" required:"true"` // Indicates whether the instance is protected from termination by Auto Scaling @@ -5689,10 +6112,12 @@ func (s LaunchConfiguration) GoString() string { // an action when an instance launches or terminates. When you have a lifecycle // hook in place, the Auto Scaling group will either: // -// Pause the instance after it launches, but before it is put into service -// Pause the instance as it terminates, but before it is fully terminated For -// more information, see Auto Scaling Lifecycle (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingGroupLifecycle.html) -// in the Auto Scaling Developer Guide. +// Pause the instance after it launches, but before it is put into service +// +// Pause the instance as it terminates, but before it is fully terminated +// +// For more information, see Auto Scaling Lifecycle (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingGroupLifecycle.html) +// in the Auto Scaling User Guide. type LifecycleHook struct { _ struct{} `type:"structure"` @@ -5730,8 +6155,19 @@ type LifecycleHook struct { // can be either an SQS queue or an SNS topic. The notification message sent // to the target includes the following: // - // Lifecycle action token User account ID Name of the Auto Scaling group Lifecycle - // hook name EC2 instance ID Lifecycle transition Notification metadata + // Lifecycle action token + // + // User account ID + // + // Name of the Auto Scaling group + // + // Lifecycle hook name + // + // EC2 instance ID + // + // Lifecycle transition + // + // Notification metadata NotificationTargetARN *string `min:"1" type:"string"` // The ARN of the IAM role that allows the Auto Scaling group to publish to @@ -5749,7 +6185,17 @@ func (s LifecycleHook) GoString() string { return s.String() } -// Describes the state of a load balancer. +// Describes the state of a Classic load balancer. +// +// If you specify a load balancer when creating the Auto Scaling group, the +// state of the load balancer is InService. +// +// If you attach a load balancer to an existing Auto Scaling group, the initial +// state is Adding. The state transitions to Added after all instances in the +// group are registered with the load balancer. If ELB health checks are enabled +// for the load balancer, the state transitions to InService after at least +// one instance in the group passes the health check. If EC2 health checks are +// enabled instead, the load balancer remains in the Added state. type LoadBalancerState struct { _ struct{} `type:"structure"` @@ -5758,16 +6204,18 @@ type LoadBalancerState struct { // One of the following load balancer states: // - // Adding - The instances in the group are being registered with the load + // Adding - The instances in the group are being registered with the load // balancer. // - // Added - All instances in the group are registered with the load balancer. + // Added - All instances in the group are registered with the load balancer. // - // InService - At least one instance in the group passed an ELB health check. + // InService - At least one instance in the group passed an ELB health check. // - // Removing - The instances are being deregistered from the load balancer. - // If connection draining is enabled, Elastic Load Balancing waits for in-flight - // requests to complete before deregistering the instances. + // Removing - The instances in the group are being deregistered from the + // load balancer. If connection draining is enabled, Elastic Load Balancing + // waits for in-flight requests to complete before deregistering the instances. + // + // Removed - All instances in the group are deregistered from the load balancer. State *string `min:"1" type:"string"` } @@ -5781,27 +6229,69 @@ func (s LoadBalancerState) GoString() string { return s.String() } +// Describes the state of a target group. +// +// If you attach a target group to an existing Auto Scaling group, the initial +// state is Adding. The state transitions to Added after all Auto Scaling instances +// are registered with the target group. If ELB health checks are enabled, the +// state transitions to InService after at least one Auto Scaling instance passes +// the health check. If EC2 health checks are enabled instead, the target group +// remains in the Added state. +type LoadBalancerTargetGroupState struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the target group. + LoadBalancerTargetGroupARN *string `min:"1" type:"string"` + + // The state of the target group. + // + // Adding - The Auto Scaling instances are being registered with the target + // group. + // + // Added - All Auto Scaling instances are registered with the target group. + // + // InService - At least one Auto Scaling instance passed an ELB health check. + // + // Removing - The Auto Scaling instances are being deregistered from the + // target group. If connection draining is enabled, Elastic Load Balancing waits + // for in-flight requests to complete before deregistering the instances. + // + // Removed - All Auto Scaling instances are deregistered from the target + // group. + State *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s LoadBalancerTargetGroupState) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoadBalancerTargetGroupState) GoString() string { + return s.String() +} + // Describes a metric. type MetricCollectionType struct { _ struct{} `type:"structure"` // One of the following metrics: // - // GroupMinSize + // GroupMinSize // - // GroupMaxSize + // GroupMaxSize // - // GroupDesiredCapacity + // GroupDesiredCapacity // - // GroupInServiceInstances + // GroupInServiceInstances // - // GroupPendingInstances + // GroupPendingInstances // - // GroupStandbyInstances + // GroupStandbyInstances // - // GroupTerminatingInstances + // GroupTerminatingInstances // - // GroupTotalInstances + // GroupTotalInstances Metric *string `min:"1" type:"string"` } @@ -5842,15 +6332,15 @@ type NotificationConfiguration struct { // One of the following event notification types: // - // autoscaling:EC2_INSTANCE_LAUNCH + // autoscaling:EC2_INSTANCE_LAUNCH // - // autoscaling:EC2_INSTANCE_LAUNCH_ERROR + // autoscaling:EC2_INSTANCE_LAUNCH_ERROR // - // autoscaling:EC2_INSTANCE_TERMINATE + // autoscaling:EC2_INSTANCE_TERMINATE // - // autoscaling:EC2_INSTANCE_TERMINATE_ERROR + // autoscaling:EC2_INSTANCE_TERMINATE_ERROR // - // autoscaling:TEST_NOTIFICATION + // autoscaling:TEST_NOTIFICATION NotificationType *string `min:"1" type:"string"` // The Amazon Resource Name (ARN) of the Amazon Simple Notification Service @@ -5871,27 +6361,27 @@ func (s NotificationConfiguration) GoString() string { // Describes a process type. // // For more information, see Auto Scaling Processes (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/US_SuspendResume.html#process-types) -// in the Auto Scaling Developer Guide. +// in the Auto Scaling User Guide. type ProcessType struct { _ struct{} `type:"structure"` // One of the following processes: // - // Launch + // Launch // - // Terminate + // Terminate // - // AddToLoadBalancer + // AddToLoadBalancer // - // AlarmNotification + // AlarmNotification // - // AZRebalance + // AZRebalance // - // HealthCheck + // HealthCheck // - // ReplaceUnhealthy + // ReplaceUnhealthy // - // ScheduledActions + // ScheduledActions ProcessName *string `min:"1" type:"string" required:"true"` } @@ -5905,6 +6395,7 @@ func (s ProcessType) GoString() string { return s.String() } +// Contains the parameters for PutLifecycleHook. type PutLifecycleHookInput struct { _ struct{} `type:"structure"` @@ -5944,11 +6435,21 @@ type PutLifecycleHookInput struct { // // The notification messages sent to the target include the following information: // - // AutoScalingGroupName. The name of the Auto Scaling group. AccountId. The - // AWS account ID. LifecycleTransition. The lifecycle hook type. LifecycleActionToken. - // The lifecycle action token. EC2InstanceId. The EC2 instance ID. LifecycleHookName. - // The name of the lifecycle hook. NotificationMetadata. User-defined information. - // This operation uses the JSON format when sending notifications to an Amazon + // AutoScalingGroupName. The name of the Auto Scaling group. + // + // AccountId. The AWS account ID. + // + // LifecycleTransition. The lifecycle hook type. + // + // LifecycleActionToken. The lifecycle action token. + // + // EC2InstanceId. The EC2 instance ID. + // + // LifecycleHookName. The name of the lifecycle hook. + // + // NotificationMetadata. User-defined information. + // + // This operation uses the JSON format when sending notifications to an Amazon // SQS queue, and an email key/value pair format when sending notifications // to an Amazon SNS topic. // @@ -6003,6 +6504,7 @@ func (s *PutLifecycleHookInput) Validate() error { return nil } +// Contains the output of PutLifecycleHook. type PutLifecycleHookOutput struct { _ struct{} `type:"structure"` } @@ -6017,6 +6519,7 @@ func (s PutLifecycleHookOutput) GoString() string { return s.String() } +// Contains the parameters for PutNotificationConfiguration. type PutNotificationConfigurationInput struct { _ struct{} `type:"structure"` @@ -6081,6 +6584,7 @@ func (s PutNotificationConfigurationOutput) GoString() string { return s.String() } +// Contains the parameters for PutScalingPolicy. type PutScalingPolicyInput struct { _ struct{} `type:"structure"` @@ -6088,7 +6592,7 @@ type PutScalingPolicyInput struct { // PercentChangeInCapacity. // // For more information, see Dynamic Scaling (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-scale-based-on-demand.html) - // in the Auto Scaling Developer Guide. + // in the Auto Scaling User Guide. AdjustmentType *string `min:"1" type:"string" required:"true"` // The name or ARN of the group. @@ -6101,7 +6605,7 @@ type PutScalingPolicyInput struct { // This parameter is not supported unless the policy type is SimpleScaling. // // For more information, see Auto Scaling Cooldowns (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/Cooldown.html) - // in the Auto Scaling Developer Guide. + // in the Auto Scaling User Guide. Cooldown *int64 `type:"integer"` // The estimated time, in seconds, until a newly launched instance can contribute @@ -6204,6 +6708,7 @@ func (s *PutScalingPolicyInput) Validate() error { return nil } +// Contains the output of PutScalingPolicy. type PutScalingPolicyOutput struct { _ struct{} `type:"structure"` @@ -6221,6 +6726,7 @@ func (s PutScalingPolicyOutput) GoString() string { return s.String() } +// Contains the parameters for PutScheduledUpdateGroupAction. type PutScheduledUpdateGroupActionInput struct { _ struct{} `type:"structure"` @@ -6313,6 +6819,7 @@ func (s PutScheduledUpdateGroupActionOutput) GoString() string { return s.String() } +// Contains the parameters for RecordLifecycleActionHeartbeat. type RecordLifecycleActionHeartbeatInput struct { _ struct{} `type:"structure"` @@ -6369,6 +6876,7 @@ func (s *RecordLifecycleActionHeartbeatInput) Validate() error { return nil } +// Contains the output of RecordLifecycleActionHeartBeat. type RecordLifecycleActionHeartbeatOutput struct { _ struct{} `type:"structure"` } @@ -6461,29 +6969,31 @@ func (s ScalingPolicy) GoString() string { return s.String() } +// Contains the parameters for SuspendProcesses and ResumeProcesses. type ScalingProcessQuery struct { _ struct{} `type:"structure"` // The name or Amazon Resource Name (ARN) of the Auto Scaling group. AutoScalingGroupName *string `min:"1" type:"string" required:"true"` - // One or more of the following processes: + // One or more of the following processes. If you omit this parameter, all processes + // are specified. // - // Launch + // Launch // - // Terminate + // Terminate // - // HealthCheck + // HealthCheck // - // ReplaceUnhealthy + // ReplaceUnhealthy // - // AZRebalance + // AZRebalance // - // AlarmNotification + // AlarmNotification // - // ScheduledActions + // ScheduledActions // - // AddToLoadBalancer + // AddToLoadBalancer ScalingProcesses []*string `type:"list"` } @@ -6563,6 +7073,7 @@ func (s ScheduledUpdateGroupAction) GoString() string { return s.String() } +// Contains the parameters for SetDesiredCapacity. type SetDesiredCapacityInput struct { _ struct{} `type:"structure"` @@ -6622,6 +7133,7 @@ func (s SetDesiredCapacityOutput) GoString() string { return s.String() } +// Contains the parameters for SetInstanceHealth. type SetInstanceHealthInput struct { _ struct{} `type:"structure"` @@ -6689,6 +7201,7 @@ func (s SetInstanceHealthOutput) GoString() string { return s.String() } +// Contains the parameters for SetInstanceProtection. type SetInstanceProtectionInput struct { _ struct{} `type:"structure"` @@ -6735,6 +7248,7 @@ func (s *SetInstanceProtectionInput) Validate() error { return nil } +// Contains the output of SetInstanceProtection. type SetInstanceProtectionOutput struct { _ struct{} `type:"structure"` } @@ -6940,6 +7454,7 @@ func (s TagDescription) GoString() string { return s.String() } +// Contains the parameters for TerminateInstanceInAutoScalingGroup. type TerminateInstanceInAutoScalingGroupInput struct { _ struct{} `type:"structure"` @@ -6980,6 +7495,7 @@ func (s *TerminateInstanceInAutoScalingGroupInput) Validate() error { return nil } +// Contains the output of TerminateInstancesInAutoScalingGroup. type TerminateInstanceInAutoScalingGroupOutput struct { _ struct{} `type:"structure"` @@ -6997,6 +7513,7 @@ func (s TerminateInstanceInAutoScalingGroupOutput) GoString() string { return s.String() } +// Contains the parameters for UpdateAutoScalingGroup. type UpdateAutoScalingGroupInput struct { _ struct{} `type:"structure"` @@ -7010,7 +7527,7 @@ type UpdateAutoScalingGroupInput struct { // another scaling activity can start. The default is 300. // // For more information, see Auto Scaling Cooldowns (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/Cooldown.html) - // in the Auto Scaling Developer Guide. + // in the Auto Scaling User Guide. DefaultCooldown *int64 `type:"integer"` // The number of EC2 instances that should be running in the Auto Scaling group. @@ -7020,10 +7537,10 @@ type UpdateAutoScalingGroupInput struct { // The amount of time, in seconds, that Auto Scaling waits before checking the // health status of an EC2 instance that has come into service. The default - // is 300. + // is 0. // // For more information, see Health Checks (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/healthcheck.html) - // in the Auto Scaling Developer Guide. + // in the Auto Scaling User Guide. HealthCheckGracePeriod *int64 `type:"integer"` // The service to use for the health checks. The valid values are EC2 and ELB. @@ -7053,7 +7570,7 @@ type UpdateAutoScalingGroupInput struct { // // For more information, see Controlling Which Instances Auto Scaling Terminates // During Scale In (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingBehavior.InstanceTermination.html) - // in the Auto Scaling Developer Guide. + // in the Auto Scaling User Guide. TerminationPolicies []*string `type:"list"` // The ID of the subnet, if you are launching into a VPC. You can specify several @@ -7063,7 +7580,7 @@ type UpdateAutoScalingGroupInput struct { // subnets' Availability Zones match the values you specify for AvailabilityZones. // // For more information, see Launching Auto Scaling Instances in a VPC (http://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/asg-in-vpc.html) - // in the Auto Scaling Developer Guide. + // in the Auto Scaling User Guide. VPCZoneIdentifier *string `min:"1" type:"string"` } diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/api.go b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/api.go index f9bd33f6b..b6058d77a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/api.go @@ -13,7 +13,7 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/restxml" ) -const opCreateCloudFrontOriginAccessIdentity = "CreateCloudFrontOriginAccessIdentity2016_01_28" +const opCreateCloudFrontOriginAccessIdentity = "CreateCloudFrontOriginAccessIdentity2016_08_01" // CreateCloudFrontOriginAccessIdentityRequest generates a "aws/request.Request" representing the // client's request for the CreateCloudFrontOriginAccessIdentity operation. The "output" return @@ -41,7 +41,7 @@ func (c *CloudFront) CreateCloudFrontOriginAccessIdentityRequest(input *CreateCl op := &request.Operation{ Name: opCreateCloudFrontOriginAccessIdentity, HTTPMethod: "POST", - HTTPPath: "/2016-01-28/origin-access-identity/cloudfront", + HTTPPath: "/2016-08-01/origin-access-identity/cloudfront", } if input == nil { @@ -61,7 +61,7 @@ func (c *CloudFront) CreateCloudFrontOriginAccessIdentity(input *CreateCloudFron return out, err } -const opCreateDistribution = "CreateDistribution2016_01_28" +const opCreateDistribution = "CreateDistribution2016_08_01" // CreateDistributionRequest generates a "aws/request.Request" representing the // client's request for the CreateDistribution operation. The "output" return @@ -89,7 +89,7 @@ func (c *CloudFront) CreateDistributionRequest(input *CreateDistributionInput) ( op := &request.Operation{ Name: opCreateDistribution, HTTPMethod: "POST", - HTTPPath: "/2016-01-28/distribution", + HTTPPath: "/2016-08-01/distribution", } if input == nil { @@ -109,7 +109,55 @@ func (c *CloudFront) CreateDistribution(input *CreateDistributionInput) (*Create return out, err } -const opCreateInvalidation = "CreateInvalidation2016_01_28" +const opCreateDistributionWithTags = "CreateDistributionWithTags2016_08_01" + +// CreateDistributionWithTagsRequest generates a "aws/request.Request" representing the +// client's request for the CreateDistributionWithTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateDistributionWithTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateDistributionWithTagsRequest method. +// req, resp := client.CreateDistributionWithTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFront) CreateDistributionWithTagsRequest(input *CreateDistributionWithTagsInput) (req *request.Request, output *CreateDistributionWithTagsOutput) { + op := &request.Operation{ + Name: opCreateDistributionWithTags, + HTTPMethod: "POST", + HTTPPath: "/2016-08-01/distribution?WithTags", + } + + if input == nil { + input = &CreateDistributionWithTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateDistributionWithTagsOutput{} + req.Data = output + return +} + +// Create a new distribution with tags. +func (c *CloudFront) CreateDistributionWithTags(input *CreateDistributionWithTagsInput) (*CreateDistributionWithTagsOutput, error) { + req, out := c.CreateDistributionWithTagsRequest(input) + err := req.Send() + return out, err +} + +const opCreateInvalidation = "CreateInvalidation2016_08_01" // CreateInvalidationRequest generates a "aws/request.Request" representing the // client's request for the CreateInvalidation operation. The "output" return @@ -137,7 +185,7 @@ func (c *CloudFront) CreateInvalidationRequest(input *CreateInvalidationInput) ( op := &request.Operation{ Name: opCreateInvalidation, HTTPMethod: "POST", - HTTPPath: "/2016-01-28/distribution/{DistributionId}/invalidation", + HTTPPath: "/2016-08-01/distribution/{DistributionId}/invalidation", } if input == nil { @@ -157,7 +205,7 @@ func (c *CloudFront) CreateInvalidation(input *CreateInvalidationInput) (*Create return out, err } -const opCreateStreamingDistribution = "CreateStreamingDistribution2016_01_28" +const opCreateStreamingDistribution = "CreateStreamingDistribution2016_08_01" // CreateStreamingDistributionRequest generates a "aws/request.Request" representing the // client's request for the CreateStreamingDistribution operation. The "output" return @@ -185,7 +233,7 @@ func (c *CloudFront) CreateStreamingDistributionRequest(input *CreateStreamingDi op := &request.Operation{ Name: opCreateStreamingDistribution, HTTPMethod: "POST", - HTTPPath: "/2016-01-28/streaming-distribution", + HTTPPath: "/2016-08-01/streaming-distribution", } if input == nil { @@ -205,7 +253,55 @@ func (c *CloudFront) CreateStreamingDistribution(input *CreateStreamingDistribut return out, err } -const opDeleteCloudFrontOriginAccessIdentity = "DeleteCloudFrontOriginAccessIdentity2016_01_28" +const opCreateStreamingDistributionWithTags = "CreateStreamingDistributionWithTags2016_08_01" + +// CreateStreamingDistributionWithTagsRequest generates a "aws/request.Request" representing the +// client's request for the CreateStreamingDistributionWithTags operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateStreamingDistributionWithTags method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateStreamingDistributionWithTagsRequest method. +// req, resp := client.CreateStreamingDistributionWithTagsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFront) CreateStreamingDistributionWithTagsRequest(input *CreateStreamingDistributionWithTagsInput) (req *request.Request, output *CreateStreamingDistributionWithTagsOutput) { + op := &request.Operation{ + Name: opCreateStreamingDistributionWithTags, + HTTPMethod: "POST", + HTTPPath: "/2016-08-01/streaming-distribution?WithTags", + } + + if input == nil { + input = &CreateStreamingDistributionWithTagsInput{} + } + + req = c.newRequest(op, input, output) + output = &CreateStreamingDistributionWithTagsOutput{} + req.Data = output + return +} + +// Create a new streaming distribution with tags. +func (c *CloudFront) CreateStreamingDistributionWithTags(input *CreateStreamingDistributionWithTagsInput) (*CreateStreamingDistributionWithTagsOutput, error) { + req, out := c.CreateStreamingDistributionWithTagsRequest(input) + err := req.Send() + return out, err +} + +const opDeleteCloudFrontOriginAccessIdentity = "DeleteCloudFrontOriginAccessIdentity2016_08_01" // DeleteCloudFrontOriginAccessIdentityRequest generates a "aws/request.Request" representing the // client's request for the DeleteCloudFrontOriginAccessIdentity operation. The "output" return @@ -233,7 +329,7 @@ func (c *CloudFront) DeleteCloudFrontOriginAccessIdentityRequest(input *DeleteCl op := &request.Operation{ Name: opDeleteCloudFrontOriginAccessIdentity, HTTPMethod: "DELETE", - HTTPPath: "/2016-01-28/origin-access-identity/cloudfront/{Id}", + HTTPPath: "/2016-08-01/origin-access-identity/cloudfront/{Id}", } if input == nil { @@ -255,7 +351,7 @@ func (c *CloudFront) DeleteCloudFrontOriginAccessIdentity(input *DeleteCloudFron return out, err } -const opDeleteDistribution = "DeleteDistribution2016_01_28" +const opDeleteDistribution = "DeleteDistribution2016_08_01" // DeleteDistributionRequest generates a "aws/request.Request" representing the // client's request for the DeleteDistribution operation. The "output" return @@ -283,7 +379,7 @@ func (c *CloudFront) DeleteDistributionRequest(input *DeleteDistributionInput) ( op := &request.Operation{ Name: opDeleteDistribution, HTTPMethod: "DELETE", - HTTPPath: "/2016-01-28/distribution/{Id}", + HTTPPath: "/2016-08-01/distribution/{Id}", } if input == nil { @@ -305,7 +401,7 @@ func (c *CloudFront) DeleteDistribution(input *DeleteDistributionInput) (*Delete return out, err } -const opDeleteStreamingDistribution = "DeleteStreamingDistribution2016_01_28" +const opDeleteStreamingDistribution = "DeleteStreamingDistribution2016_08_01" // DeleteStreamingDistributionRequest generates a "aws/request.Request" representing the // client's request for the DeleteStreamingDistribution operation. The "output" return @@ -333,7 +429,7 @@ func (c *CloudFront) DeleteStreamingDistributionRequest(input *DeleteStreamingDi op := &request.Operation{ Name: opDeleteStreamingDistribution, HTTPMethod: "DELETE", - HTTPPath: "/2016-01-28/streaming-distribution/{Id}", + HTTPPath: "/2016-08-01/streaming-distribution/{Id}", } if input == nil { @@ -355,7 +451,7 @@ func (c *CloudFront) DeleteStreamingDistribution(input *DeleteStreamingDistribut return out, err } -const opGetCloudFrontOriginAccessIdentity = "GetCloudFrontOriginAccessIdentity2016_01_28" +const opGetCloudFrontOriginAccessIdentity = "GetCloudFrontOriginAccessIdentity2016_08_01" // GetCloudFrontOriginAccessIdentityRequest generates a "aws/request.Request" representing the // client's request for the GetCloudFrontOriginAccessIdentity operation. The "output" return @@ -383,7 +479,7 @@ func (c *CloudFront) GetCloudFrontOriginAccessIdentityRequest(input *GetCloudFro op := &request.Operation{ Name: opGetCloudFrontOriginAccessIdentity, HTTPMethod: "GET", - HTTPPath: "/2016-01-28/origin-access-identity/cloudfront/{Id}", + HTTPPath: "/2016-08-01/origin-access-identity/cloudfront/{Id}", } if input == nil { @@ -403,7 +499,7 @@ func (c *CloudFront) GetCloudFrontOriginAccessIdentity(input *GetCloudFrontOrigi return out, err } -const opGetCloudFrontOriginAccessIdentityConfig = "GetCloudFrontOriginAccessIdentityConfig2016_01_28" +const opGetCloudFrontOriginAccessIdentityConfig = "GetCloudFrontOriginAccessIdentityConfig2016_08_01" // GetCloudFrontOriginAccessIdentityConfigRequest generates a "aws/request.Request" representing the // client's request for the GetCloudFrontOriginAccessIdentityConfig operation. The "output" return @@ -431,7 +527,7 @@ func (c *CloudFront) GetCloudFrontOriginAccessIdentityConfigRequest(input *GetCl op := &request.Operation{ Name: opGetCloudFrontOriginAccessIdentityConfig, HTTPMethod: "GET", - HTTPPath: "/2016-01-28/origin-access-identity/cloudfront/{Id}/config", + HTTPPath: "/2016-08-01/origin-access-identity/cloudfront/{Id}/config", } if input == nil { @@ -451,7 +547,7 @@ func (c *CloudFront) GetCloudFrontOriginAccessIdentityConfig(input *GetCloudFron return out, err } -const opGetDistribution = "GetDistribution2016_01_28" +const opGetDistribution = "GetDistribution2016_08_01" // GetDistributionRequest generates a "aws/request.Request" representing the // client's request for the GetDistribution operation. The "output" return @@ -479,7 +575,7 @@ func (c *CloudFront) GetDistributionRequest(input *GetDistributionInput) (req *r op := &request.Operation{ Name: opGetDistribution, HTTPMethod: "GET", - HTTPPath: "/2016-01-28/distribution/{Id}", + HTTPPath: "/2016-08-01/distribution/{Id}", } if input == nil { @@ -499,7 +595,7 @@ func (c *CloudFront) GetDistribution(input *GetDistributionInput) (*GetDistribut return out, err } -const opGetDistributionConfig = "GetDistributionConfig2016_01_28" +const opGetDistributionConfig = "GetDistributionConfig2016_08_01" // GetDistributionConfigRequest generates a "aws/request.Request" representing the // client's request for the GetDistributionConfig operation. The "output" return @@ -527,7 +623,7 @@ func (c *CloudFront) GetDistributionConfigRequest(input *GetDistributionConfigIn op := &request.Operation{ Name: opGetDistributionConfig, HTTPMethod: "GET", - HTTPPath: "/2016-01-28/distribution/{Id}/config", + HTTPPath: "/2016-08-01/distribution/{Id}/config", } if input == nil { @@ -547,7 +643,7 @@ func (c *CloudFront) GetDistributionConfig(input *GetDistributionConfigInput) (* return out, err } -const opGetInvalidation = "GetInvalidation2016_01_28" +const opGetInvalidation = "GetInvalidation2016_08_01" // GetInvalidationRequest generates a "aws/request.Request" representing the // client's request for the GetInvalidation operation. The "output" return @@ -575,7 +671,7 @@ func (c *CloudFront) GetInvalidationRequest(input *GetInvalidationInput) (req *r op := &request.Operation{ Name: opGetInvalidation, HTTPMethod: "GET", - HTTPPath: "/2016-01-28/distribution/{DistributionId}/invalidation/{Id}", + HTTPPath: "/2016-08-01/distribution/{DistributionId}/invalidation/{Id}", } if input == nil { @@ -595,7 +691,7 @@ func (c *CloudFront) GetInvalidation(input *GetInvalidationInput) (*GetInvalidat return out, err } -const opGetStreamingDistribution = "GetStreamingDistribution2016_01_28" +const opGetStreamingDistribution = "GetStreamingDistribution2016_08_01" // GetStreamingDistributionRequest generates a "aws/request.Request" representing the // client's request for the GetStreamingDistribution operation. The "output" return @@ -623,7 +719,7 @@ func (c *CloudFront) GetStreamingDistributionRequest(input *GetStreamingDistribu op := &request.Operation{ Name: opGetStreamingDistribution, HTTPMethod: "GET", - HTTPPath: "/2016-01-28/streaming-distribution/{Id}", + HTTPPath: "/2016-08-01/streaming-distribution/{Id}", } if input == nil { @@ -643,7 +739,7 @@ func (c *CloudFront) GetStreamingDistribution(input *GetStreamingDistributionInp return out, err } -const opGetStreamingDistributionConfig = "GetStreamingDistributionConfig2016_01_28" +const opGetStreamingDistributionConfig = "GetStreamingDistributionConfig2016_08_01" // GetStreamingDistributionConfigRequest generates a "aws/request.Request" representing the // client's request for the GetStreamingDistributionConfig operation. The "output" return @@ -671,7 +767,7 @@ func (c *CloudFront) GetStreamingDistributionConfigRequest(input *GetStreamingDi op := &request.Operation{ Name: opGetStreamingDistributionConfig, HTTPMethod: "GET", - HTTPPath: "/2016-01-28/streaming-distribution/{Id}/config", + HTTPPath: "/2016-08-01/streaming-distribution/{Id}/config", } if input == nil { @@ -691,7 +787,7 @@ func (c *CloudFront) GetStreamingDistributionConfig(input *GetStreamingDistribut return out, err } -const opListCloudFrontOriginAccessIdentities = "ListCloudFrontOriginAccessIdentities2016_01_28" +const opListCloudFrontOriginAccessIdentities = "ListCloudFrontOriginAccessIdentities2016_08_01" // ListCloudFrontOriginAccessIdentitiesRequest generates a "aws/request.Request" representing the // client's request for the ListCloudFrontOriginAccessIdentities operation. The "output" return @@ -719,7 +815,7 @@ func (c *CloudFront) ListCloudFrontOriginAccessIdentitiesRequest(input *ListClou op := &request.Operation{ Name: opListCloudFrontOriginAccessIdentities, HTTPMethod: "GET", - HTTPPath: "/2016-01-28/origin-access-identity/cloudfront", + HTTPPath: "/2016-08-01/origin-access-identity/cloudfront", Paginator: &request.Paginator{ InputTokens: []string{"Marker"}, OutputTokens: []string{"CloudFrontOriginAccessIdentityList.NextMarker"}, @@ -770,7 +866,7 @@ func (c *CloudFront) ListCloudFrontOriginAccessIdentitiesPages(input *ListCloudF }) } -const opListDistributions = "ListDistributions2016_01_28" +const opListDistributions = "ListDistributions2016_08_01" // ListDistributionsRequest generates a "aws/request.Request" representing the // client's request for the ListDistributions operation. The "output" return @@ -798,7 +894,7 @@ func (c *CloudFront) ListDistributionsRequest(input *ListDistributionsInput) (re op := &request.Operation{ Name: opListDistributions, HTTPMethod: "GET", - HTTPPath: "/2016-01-28/distribution", + HTTPPath: "/2016-08-01/distribution", Paginator: &request.Paginator{ InputTokens: []string{"Marker"}, OutputTokens: []string{"DistributionList.NextMarker"}, @@ -849,7 +945,7 @@ func (c *CloudFront) ListDistributionsPages(input *ListDistributionsInput, fn fu }) } -const opListDistributionsByWebACLId = "ListDistributionsByWebACLId2016_01_28" +const opListDistributionsByWebACLId = "ListDistributionsByWebACLId2016_08_01" // ListDistributionsByWebACLIdRequest generates a "aws/request.Request" representing the // client's request for the ListDistributionsByWebACLId operation. The "output" return @@ -877,7 +973,7 @@ func (c *CloudFront) ListDistributionsByWebACLIdRequest(input *ListDistributions op := &request.Operation{ Name: opListDistributionsByWebACLId, HTTPMethod: "GET", - HTTPPath: "/2016-01-28/distributionsByWebACLId/{WebACLId}", + HTTPPath: "/2016-08-01/distributionsByWebACLId/{WebACLId}", } if input == nil { @@ -897,7 +993,7 @@ func (c *CloudFront) ListDistributionsByWebACLId(input *ListDistributionsByWebAC return out, err } -const opListInvalidations = "ListInvalidations2016_01_28" +const opListInvalidations = "ListInvalidations2016_08_01" // ListInvalidationsRequest generates a "aws/request.Request" representing the // client's request for the ListInvalidations operation. The "output" return @@ -925,7 +1021,7 @@ func (c *CloudFront) ListInvalidationsRequest(input *ListInvalidationsInput) (re op := &request.Operation{ Name: opListInvalidations, HTTPMethod: "GET", - HTTPPath: "/2016-01-28/distribution/{DistributionId}/invalidation", + HTTPPath: "/2016-08-01/distribution/{DistributionId}/invalidation", Paginator: &request.Paginator{ InputTokens: []string{"Marker"}, OutputTokens: []string{"InvalidationList.NextMarker"}, @@ -976,7 +1072,7 @@ func (c *CloudFront) ListInvalidationsPages(input *ListInvalidationsInput, fn fu }) } -const opListStreamingDistributions = "ListStreamingDistributions2016_01_28" +const opListStreamingDistributions = "ListStreamingDistributions2016_08_01" // ListStreamingDistributionsRequest generates a "aws/request.Request" representing the // client's request for the ListStreamingDistributions operation. The "output" return @@ -1004,7 +1100,7 @@ func (c *CloudFront) ListStreamingDistributionsRequest(input *ListStreamingDistr op := &request.Operation{ Name: opListStreamingDistributions, HTTPMethod: "GET", - HTTPPath: "/2016-01-28/streaming-distribution", + HTTPPath: "/2016-08-01/streaming-distribution", Paginator: &request.Paginator{ InputTokens: []string{"Marker"}, OutputTokens: []string{"StreamingDistributionList.NextMarker"}, @@ -1055,7 +1151,155 @@ func (c *CloudFront) ListStreamingDistributionsPages(input *ListStreamingDistrib }) } -const opUpdateCloudFrontOriginAccessIdentity = "UpdateCloudFrontOriginAccessIdentity2016_01_28" +const opListTagsForResource = "ListTagsForResource2016_08_01" + +// ListTagsForResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsForResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListTagsForResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListTagsForResourceRequest method. +// req, resp := client.ListTagsForResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFront) ListTagsForResourceRequest(input *ListTagsForResourceInput) (req *request.Request, output *ListTagsForResourceOutput) { + op := &request.Operation{ + Name: opListTagsForResource, + HTTPMethod: "GET", + HTTPPath: "/2016-08-01/tagging", + } + + if input == nil { + input = &ListTagsForResourceInput{} + } + + req = c.newRequest(op, input, output) + output = &ListTagsForResourceOutput{} + req.Data = output + return +} + +// List tags for a CloudFront resource. +func (c *CloudFront) ListTagsForResource(input *ListTagsForResourceInput) (*ListTagsForResourceOutput, error) { + req, out := c.ListTagsForResourceRequest(input) + err := req.Send() + return out, err +} + +const opTagResource = "TagResource2016_08_01" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the TagResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFront) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { + op := &request.Operation{ + Name: opTagResource, + HTTPMethod: "POST", + HTTPPath: "/2016-08-01/tagging?Operation=Tag", + } + + if input == nil { + input = &TagResourceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &TagResourceOutput{} + req.Data = output + return +} + +// Add tags to a CloudFront resource. +func (c *CloudFront) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) + err := req.Send() + return out, err +} + +const opUntagResource = "UntagResource2016_08_01" + +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UntagResource method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *CloudFront) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { + op := &request.Operation{ + Name: opUntagResource, + HTTPMethod: "POST", + HTTPPath: "/2016-08-01/tagging?Operation=Untag", + } + + if input == nil { + input = &UntagResourceInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &UntagResourceOutput{} + req.Data = output + return +} + +// Remove tags from a CloudFront resource. +func (c *CloudFront) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) + err := req.Send() + return out, err +} + +const opUpdateCloudFrontOriginAccessIdentity = "UpdateCloudFrontOriginAccessIdentity2016_08_01" // UpdateCloudFrontOriginAccessIdentityRequest generates a "aws/request.Request" representing the // client's request for the UpdateCloudFrontOriginAccessIdentity operation. The "output" return @@ -1083,7 +1327,7 @@ func (c *CloudFront) UpdateCloudFrontOriginAccessIdentityRequest(input *UpdateCl op := &request.Operation{ Name: opUpdateCloudFrontOriginAccessIdentity, HTTPMethod: "PUT", - HTTPPath: "/2016-01-28/origin-access-identity/cloudfront/{Id}/config", + HTTPPath: "/2016-08-01/origin-access-identity/cloudfront/{Id}/config", } if input == nil { @@ -1103,7 +1347,7 @@ func (c *CloudFront) UpdateCloudFrontOriginAccessIdentity(input *UpdateCloudFron return out, err } -const opUpdateDistribution = "UpdateDistribution2016_01_28" +const opUpdateDistribution = "UpdateDistribution2016_08_01" // UpdateDistributionRequest generates a "aws/request.Request" representing the // client's request for the UpdateDistribution operation. The "output" return @@ -1131,7 +1375,7 @@ func (c *CloudFront) UpdateDistributionRequest(input *UpdateDistributionInput) ( op := &request.Operation{ Name: opUpdateDistribution, HTTPMethod: "PUT", - HTTPPath: "/2016-01-28/distribution/{Id}/config", + HTTPPath: "/2016-08-01/distribution/{Id}/config", } if input == nil { @@ -1151,7 +1395,7 @@ func (c *CloudFront) UpdateDistribution(input *UpdateDistributionInput) (*Update return out, err } -const opUpdateStreamingDistribution = "UpdateStreamingDistribution2016_01_28" +const opUpdateStreamingDistribution = "UpdateStreamingDistribution2016_08_01" // UpdateStreamingDistributionRequest generates a "aws/request.Request" representing the // client's request for the UpdateStreamingDistribution operation. The "output" return @@ -1179,7 +1423,7 @@ func (c *CloudFront) UpdateStreamingDistributionRequest(input *UpdateStreamingDi op := &request.Operation{ Name: opUpdateStreamingDistribution, HTTPMethod: "PUT", - HTTPPath: "/2016-01-28/streaming-distribution/{Id}/config", + HTTPPath: "/2016-08-01/streaming-distribution/{Id}/config", } if input == nil { @@ -1785,6 +2029,67 @@ func (s CreateDistributionOutput) GoString() string { return s.String() } +// The request to create a new distribution with tags +type CreateDistributionWithTagsInput struct { + _ struct{} `type:"structure" payload:"DistributionConfigWithTags"` + + // The distribution's configuration information. + DistributionConfigWithTags *DistributionConfigWithTags `locationName:"DistributionConfigWithTags" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateDistributionWithTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDistributionWithTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateDistributionWithTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateDistributionWithTagsInput"} + if s.DistributionConfigWithTags == nil { + invalidParams.Add(request.NewErrParamRequired("DistributionConfigWithTags")) + } + if s.DistributionConfigWithTags != nil { + if err := s.DistributionConfigWithTags.Validate(); err != nil { + invalidParams.AddNested("DistributionConfigWithTags", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The returned result of the corresponding request. +type CreateDistributionWithTagsOutput struct { + _ struct{} `type:"structure" payload:"Distribution"` + + // The distribution's information. + Distribution *Distribution `type:"structure"` + + // The current version of the distribution created. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // The fully qualified URI of the new distribution resource just created. For + // example: https://cloudfront.amazonaws.com/2010-11-01/distribution/EDFDVBD632BHDS5. + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s CreateDistributionWithTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateDistributionWithTagsOutput) GoString() string { + return s.String() +} + // The request to create an invalidation. type CreateInvalidationInput struct { _ struct{} `type:"structure" payload:"InvalidationBatch"` @@ -1910,6 +2215,67 @@ func (s CreateStreamingDistributionOutput) GoString() string { return s.String() } +// The request to create a new streaming distribution with tags. +type CreateStreamingDistributionWithTagsInput struct { + _ struct{} `type:"structure" payload:"StreamingDistributionConfigWithTags"` + + // The streaming distribution's configuration information. + StreamingDistributionConfigWithTags *StreamingDistributionConfigWithTags `locationName:"StreamingDistributionConfigWithTags" type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateStreamingDistributionWithTagsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStreamingDistributionWithTagsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateStreamingDistributionWithTagsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateStreamingDistributionWithTagsInput"} + if s.StreamingDistributionConfigWithTags == nil { + invalidParams.Add(request.NewErrParamRequired("StreamingDistributionConfigWithTags")) + } + if s.StreamingDistributionConfigWithTags != nil { + if err := s.StreamingDistributionConfigWithTags.Validate(); err != nil { + invalidParams.AddNested("StreamingDistributionConfigWithTags", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The returned result of the corresponding request. +type CreateStreamingDistributionWithTagsOutput struct { + _ struct{} `type:"structure" payload:"StreamingDistribution"` + + // The current version of the streaming distribution created. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // The fully qualified URI of the new streaming distribution resource just created. + // For example: https://cloudfront.amazonaws.com/2010-11-01/streaming-distribution/EGTXBD79H29TRA8. + Location *string `location:"header" locationName:"Location" type:"string"` + + // The streaming distribution's information. + StreamingDistribution *StreamingDistribution `type:"structure"` +} + +// String returns the string representation +func (s CreateStreamingDistributionWithTagsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateStreamingDistributionWithTagsOutput) GoString() string { + return s.String() +} + // A complex type that describes how you'd prefer CloudFront to respond to requests // that result in either a 4xx or 5xx response. You can control whether a custom // error page should be displayed, what the desired response code should be @@ -2406,6 +2772,10 @@ func (s DeleteStreamingDistributionOutput) GoString() string { type Distribution struct { _ struct{} `type:"structure"` + // The ARN (Amazon Resource Name) for the distribution. For example: arn:aws:cloudfront::123456789012:distribution/EDFDVBD632BHDS5, + // where 123456789012 is your AWS account Id. + ARN *string `type:"string" required:"true"` + // CloudFront automatically adds this element to the response only if you've // set up the distribution to serve private content with signed URLs. The element // lists the key pair IDs that CloudFront is aware of for each trusted signer. @@ -2586,6 +2956,54 @@ func (s *DistributionConfig) Validate() error { return nil } +// A distribution Configuration and a list of tags to be associated with the +// distribution. +type DistributionConfigWithTags struct { + _ struct{} `type:"structure"` + + // A distribution Configuration. + DistributionConfig *DistributionConfig `type:"structure" required:"true"` + + // A complex type that contains zero or more Tag elements. + Tags *Tags `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DistributionConfigWithTags) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DistributionConfigWithTags) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DistributionConfigWithTags) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DistributionConfigWithTags"} + if s.DistributionConfig == nil { + invalidParams.Add(request.NewErrParamRequired("DistributionConfig")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.DistributionConfig != nil { + if err := s.DistributionConfig.Validate(); err != nil { + invalidParams.AddNested("DistributionConfig", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + if err := s.Tags.Validate(); err != nil { + invalidParams.AddNested("Tags", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // A distribution list. type DistributionList struct { _ struct{} `type:"structure"` @@ -2629,6 +3047,10 @@ func (s DistributionList) GoString() string { type DistributionSummary struct { _ struct{} `type:"structure"` + // The ARN (Amazon Resource Name) for the distribution. For example: arn:aws:cloudfront::123456789012:distribution/EDFDVBD632BHDS5, + // where 123456789012 is your AWS account Id. + ARN *string `type:"string" required:"true"` + // A complex type that contains information about CNAMEs (alternate domain names), // if any, for this distribution. Aliases *Aliases `type:"structure" required:"true"` @@ -3646,6 +4068,55 @@ func (s ListStreamingDistributionsOutput) GoString() string { return s.String() } +// The request to list tags for a CloudFront resource. +type ListTagsForResourceInput struct { + _ struct{} `type:"structure"` + + // An ARN of a CloudFront resource. + Resource *string `location:"querystring" locationName:"Resource" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListTagsForResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListTagsForResourceInput"} + if s.Resource == nil { + invalidParams.Add(request.NewErrParamRequired("Resource")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The returned result of the corresponding request. +type ListTagsForResourceOutput struct { + _ struct{} `type:"structure" payload:"Tags"` + + // A complex type that contains zero or more Tag elements. + Tags *Tags `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ListTagsForResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListTagsForResourceOutput) GoString() string { + return s.String() +} + // A complex type that controls whether access logs are written for the distribution. type LoggingConfig struct { _ struct{} `type:"structure"` @@ -4238,6 +4709,11 @@ func (s Signer) GoString() string { type StreamingDistribution struct { _ struct{} `type:"structure"` + // The ARN (Amazon Resource Name) for the streaming distribution. For example: + // arn:aws:cloudfront::123456789012:streaming-distribution/EDFDVBD632BHDS5, + // where 123456789012 is your AWS account Id. + ARN *string `type:"string" required:"true"` + // CloudFront automatically adds this element to the response only if you've // set up the distribution to serve private content with signed URLs. The element // lists the key pair IDs that CloudFront is aware of for each trusted signer. @@ -4385,6 +4861,54 @@ func (s *StreamingDistributionConfig) Validate() error { return nil } +// A streaming distribution Configuration and a list of tags to be associated +// with the streaming distribution. +type StreamingDistributionConfigWithTags struct { + _ struct{} `type:"structure"` + + // A streaming distribution Configuration. + StreamingDistributionConfig *StreamingDistributionConfig `type:"structure" required:"true"` + + // A complex type that contains zero or more Tag elements. + Tags *Tags `type:"structure" required:"true"` +} + +// String returns the string representation +func (s StreamingDistributionConfigWithTags) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StreamingDistributionConfigWithTags) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StreamingDistributionConfigWithTags) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StreamingDistributionConfigWithTags"} + if s.StreamingDistributionConfig == nil { + invalidParams.Add(request.NewErrParamRequired("StreamingDistributionConfig")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.StreamingDistributionConfig != nil { + if err := s.StreamingDistributionConfig.Validate(); err != nil { + invalidParams.AddNested("StreamingDistributionConfig", err.(request.ErrInvalidParams)) + } + } + if s.Tags != nil { + if err := s.Tags.Validate(); err != nil { + invalidParams.AddNested("Tags", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // A streaming distribution list. type StreamingDistributionList struct { _ struct{} `type:"structure"` @@ -4429,6 +4953,11 @@ func (s StreamingDistributionList) GoString() string { type StreamingDistributionSummary struct { _ struct{} `type:"structure"` + // The ARN (Amazon Resource Name) for the streaming distribution. For example: + // arn:aws:cloudfront::123456789012:streaming-distribution/EDFDVBD632BHDS5, + // where 123456789012 is your AWS account Id. + ARN *string `type:"string" required:"true"` + // A complex type that contains information about CNAMEs (alternate domain names), // if any, for this streaming distribution. Aliases *Aliases `type:"structure" required:"true"` @@ -4535,6 +5064,159 @@ func (s *StreamingLoggingConfig) Validate() error { return nil } +// A complex type that contains Tag key and Tag value. +type Tag struct { + _ struct{} `type:"structure"` + + // A string that contains Tag key. The string length should be between 1 and + // 128 characters. Valid characters include a-z, A-Z, 0-9, space, and the special + // characters _ - . : / = + @. + Key *string `min:"1" type:"string" required:"true"` + + // A string that contains an optional Tag value. The string length should be + // between 0 and 256 characters. Valid characters include a-z, A-Z, 0-9, space, + // and the special characters _ - . : / = + @. + Value *string `type:"string"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// A complex type that contains zero or more Tag elements. +type TagKeys struct { + _ struct{} `type:"structure"` + + // A complex type that contains Tag key elements + Items []*string `locationNameList:"Key" type:"list"` +} + +// String returns the string representation +func (s TagKeys) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagKeys) GoString() string { + return s.String() +} + +// The request to add tags to a CloudFront resource. +type TagResourceInput struct { + _ struct{} `type:"structure" payload:"Tags"` + + // An ARN of a CloudFront resource. + Resource *string `location:"querystring" locationName:"Resource" type:"string" required:"true"` + + // A complex type that contains zero or more Tag elements. + Tags *Tags `locationName:"Tags" type:"structure" required:"true"` +} + +// String returns the string representation +func (s TagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TagResourceInput"} + if s.Resource == nil { + invalidParams.Add(request.NewErrParamRequired("Resource")) + } + if s.Tags == nil { + invalidParams.Add(request.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + if err := s.Tags.Validate(); err != nil { + invalidParams.AddNested("Tags", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type TagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TagResourceOutput) GoString() string { + return s.String() +} + +// A complex type that contains zero or more Tag elements. +type Tags struct { + _ struct{} `type:"structure"` + + // A complex type that contains Tag elements + Items []*Tag `locationNameList:"Tag" type:"list"` +} + +// String returns the string representation +func (s Tags) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tags) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tags) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tags"} + if s.Items != nil { + for i, v := range s.Items { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Items", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // A complex type that specifies the AWS accounts, if any, that you want to // allow to create signed URLs for private content. If you want to require signed // URLs in requests for objects in the target origin that match the PathPattern @@ -4587,6 +5269,57 @@ func (s *TrustedSigners) Validate() error { return nil } +// The request to remove tags from a CloudFront resource. +type UntagResourceInput struct { + _ struct{} `type:"structure" payload:"TagKeys"` + + // An ARN of a CloudFront resource. + Resource *string `location:"querystring" locationName:"Resource" type:"string" required:"true"` + + // A complex type that contains zero or more Tag key elements. + TagKeys *TagKeys `locationName:"TagKeys" type:"structure" required:"true"` +} + +// String returns the string representation +func (s UntagResourceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagResourceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UntagResourceInput"} + if s.Resource == nil { + invalidParams.Add(request.NewErrParamRequired("Resource")) + } + if s.TagKeys == nil { + invalidParams.Add(request.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type UntagResourceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagResourceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UntagResourceOutput) GoString() string { + return s.String() +} + // The request to update an origin access identity. type UpdateCloudFrontOriginAccessIdentityInput struct { _ struct{} `type:"structure" payload:"CloudFrontOriginAccessIdentityConfig"` diff --git a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/service.go b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/service.go index 99d23c72e..d3ef6b076 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/cloudfront/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/cloudfront/service.go @@ -11,7 +11,11 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/restxml" ) -// CloudFront is a client for CloudFront. +// Amazon CloudFront is a global content delivery network (CDN) service that +// accelerates delivery of your websites, APIs, video content or other web assets. +// It integrates with other Amazon Web Services products to give developers +// and businesses an easy way to accelerate content to end users with no minimum +// usage commitments. //The service client's operations are safe to be used concurrently. // It is not safe to mutate any of the client's properties though. type CloudFront struct { @@ -51,7 +55,7 @@ func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio ServiceName: ServiceName, SigningRegion: signingRegion, Endpoint: endpoint, - APIVersion: "2016-01-28", + APIVersion: "2016-08-01", }, handlers, ), diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go index 9e94fe671..36181d991 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/customizations.go @@ -40,7 +40,11 @@ func fillPresignedURL(r *request.Request) { clientInfo := r.ClientInfo clientInfo.Endpoint, clientInfo.SigningRegion = endpoints.EndpointForRegion( - clientInfo.ServiceName, aws.StringValue(cfg.Region), aws.BoolValue(cfg.DisableSSL)) + clientInfo.ServiceName, + aws.StringValue(cfg.Region), + aws.BoolValue(cfg.DisableSSL), + aws.BoolValue(cfg.UseDualStack), + ) // Presign a CopySnapshot request with modified params req := request.New(*cfg, clientInfo, r.Handlers, r.Retryer, r.Operation, newParams, r.Data) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go b/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go index aa162c0be..b6ffa5109 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecr/api.go @@ -1159,7 +1159,7 @@ func (s *CreateRepositoryInput) Validate() error { type CreateRepositoryOutput struct { _ struct{} `type:"structure"` - // Object representing a repository. + // An object representing a repository. Repository *Repository `locationName:"repository" type:"structure"` } @@ -1216,7 +1216,7 @@ func (s *DeleteRepositoryInput) Validate() error { type DeleteRepositoryOutput struct { _ struct{} `type:"structure"` - // Object representing a repository. + // An object representing a repository. Repository *Repository `locationName:"repository" type:"structure"` } @@ -1309,6 +1309,9 @@ type DescribeRepositoriesInput struct { // parameter. Pagination continues from the end of the previous results that // returned the nextToken value. This value is null when there are no more results // to return. + // + // This token should be treated as an opaque identifier that is only used + // to retrieve the next items in a list and not for other programmatic purposes. NextToken *string `locationName:"nextToken" type:"string"` // The AWS account ID associated with the registry that contains the repositories @@ -1543,7 +1546,7 @@ func (s GetRepositoryPolicyOutput) GoString() string { return s.String() } -// Object representing an image. +// An object representing an Amazon ECR image. type Image struct { _ struct{} `type:"structure"` @@ -1570,6 +1573,7 @@ func (s Image) GoString() string { return s.String() } +// An object representing an Amazon ECR image failure. type ImageFailure struct { _ struct{} `type:"structure"` @@ -1593,6 +1597,7 @@ func (s ImageFailure) GoString() string { return s.String() } +// An object with identifying information for an Amazon ECR image. type ImageIdentifier struct { _ struct{} `type:"structure"` @@ -1672,6 +1677,7 @@ func (s InitiateLayerUploadOutput) GoString() string { return s.String() } +// An object representing an Amazon ECR image layer. type Layer struct { _ struct{} `type:"structure"` @@ -1696,6 +1702,7 @@ func (s Layer) GoString() string { return s.String() } +// An object representing an Amazon ECR image layer failure. type LayerFailure struct { _ struct{} `type:"structure"` @@ -1719,9 +1726,27 @@ func (s LayerFailure) GoString() string { return s.String() } +type ListImagesFilter struct { + _ struct{} `type:"structure"` + + TagStatus *string `locationName:"tagStatus" type:"string" enum:"TagStatus"` +} + +// String returns the string representation +func (s ListImagesFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListImagesFilter) GoString() string { + return s.String() +} + type ListImagesInput struct { _ struct{} `type:"structure"` + Filter *ListImagesFilter `locationName:"filter" type:"structure"` + // The maximum number of image results returned by ListImages in paginated output. // When this parameter is used, ListImages only returns maxResults results in // a single page along with a nextToken response element. The remaining results @@ -1735,6 +1760,9 @@ type ListImagesInput struct { // where maxResults was used and the results exceeded the value of that parameter. // Pagination continues from the end of the previous results that returned the // nextToken value. This value is null when there are no more results to return. + // + // This token should be treated as an opaque identifier that is only used + // to retrieve the next items in a list and not for other programmatic purposes. NextToken *string `locationName:"nextToken" type:"string"` // The AWS account ID associated with the registry that contains the repository @@ -1859,7 +1887,7 @@ func (s PutImageOutput) GoString() string { return s.String() } -// Object representing a repository. +// An object representing a repository. type Repository struct { _ struct{} `type:"structure"` @@ -2077,3 +2105,10 @@ const ( // @enum LayerFailureCode LayerFailureCodeMissingLayerDigest = "MissingLayerDigest" ) + +const ( + // @enum TagStatus + TagStatusTagged = "TAGGED" + // @enum TagStatus + TagStatusUntagged = "UNTAGGED" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/ecs/api.go b/vendor/github.com/aws/aws-sdk-go/service/ecs/api.go index 21bf3c381..c3fea11fb 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ecs/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ecs/api.go @@ -110,6 +110,8 @@ func (c *ECS) CreateServiceRequest(input *CreateServiceInput) (req *request.Requ // In addition to maintaining the desired count of tasks in your service, you // can optionally run your service behind a load balancer. The load balancer // distributes traffic across the tasks that are associated with the service. +// For more information, see Service Load Balancing (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-load-balancing.html) +// in the Amazon EC2 Container Service Developer Guide. // // You can optionally specify a deployment configuration for your service. // During a deployment (which is triggered by changing the task definition of @@ -1989,7 +1991,7 @@ type ContainerDefinition struct { // containers, see https://docs.docker.com/userguide/dockerlinks/ (https://docs.docker.com/userguide/dockerlinks/). // This parameter maps to Links in the Create a container (https://docs.docker.com/reference/api/docker_remote_api_v1.19/#create-a-container) // section of the Docker Remote API (https://docs.docker.com/reference/api/docker_remote_api_v1.19/) - // and the --link option to docker run (https://docs.docker.com/reference/commandline/run/). + // and the --link option to docker run (https://docs.docker.com/reference/commandline/run/). // // Containers that are collocated on a single container instance may be able // to communicate with each other without requiring links or host port mappings. @@ -2299,9 +2301,23 @@ type CreateServiceInput struct { // keep running on your cluster. DesiredCount *int64 `locationName:"desiredCount" type:"integer" required:"true"` - // A list of load balancer objects, containing the load balancer name, the container - // name (as it appears in a container definition), and the container port to - // access from the load balancer. + // A load balancer object representing the load balancer to use with your service. + // Currently, you are limited to one load balancer per service. After you create + // a service, the load balancer name, container name, and container port specified + // in the service definition are immutable. + // + // For Elastic Load Balancing Classic load balancers, this object must contain + // the load balancer name, the container name (as it appears in a container + // definition), and the container port to access from the load balancer. When + // a task from this service is placed on a container instance, the container + // instance is registered with the load balancer specified here. + // + // For Elastic Load Balancing Application load balancers, this object must + // contain the load balancer target group ARN, the container name (as it appears + // in a container definition), and the container port to access from the load + // balancer. When a task from this service is placed on a container instance, + // the container instance and port combination is registered as a target in + // the target group specified here. LoadBalancers []*LoadBalancer `locationName:"loadBalancers" type:"list"` // The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon @@ -2478,7 +2494,7 @@ func (s DeleteServiceOutput) GoString() string { type Deployment struct { _ struct{} `type:"structure"` - // The Unix time in seconds and milliseconds when the service was created. + // The Unix timestamp for when the service was created. CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unix"` // The most recent desired count of tasks that was specified for the service @@ -2503,7 +2519,7 @@ type Deployment struct { // The most recent task definition that was specified for the service to use. TaskDefinition *string `locationName:"taskDefinition" type:"string"` - // The Unix time in seconds and milliseconds when the service was last updated. + // The Unix timestamp for when the service was last updated. UpdatedAt *time.Time `locationName:"updatedAt" type:"timestamp" timestampFormat:"unix"` } @@ -3408,11 +3424,11 @@ type ListTasksInput struct { // limits the results to tasks that belong to that container instance. ContainerInstance *string `locationName:"containerInstance" type:"string"` - // The task status with which to filter the ListTasks results. Specifying a - // desiredStatus of STOPPED limits the results to tasks that are in the STOPPED - // status, which can be useful for debugging tasks that are not starting properly - // or have died or finished. The default status filter is status filter is RUNNING, - // which shows tasks that ECS has set the desired status to RUNNING. + // The task desired status with which to filter the ListTasks results. Specifying + // a desiredStatus of STOPPED limits the results to tasks that ECS has set the + // desired status to STOPPED, which can be useful for debugging tasks that are + // not starting properly or have died or finished. The default status filter + // is RUNNING, which shows tasks that ECS has set the desired status to RUNNING. // // Although you can filter results based on a desired status of PENDING, this // will not return any results because ECS never sets the desired status of @@ -3499,6 +3515,10 @@ type LoadBalancer struct { // The name of the load balancer. LoadBalancerName *string `locationName:"loadBalancerName" type:"string"` + + // The full Amazon Resource Name (ARN) of the Elastic Load Balancing target + // group associated with a service. + TargetGroupArn *string `locationName:"targetGroupArn" type:"string"` } // String returns the string representation @@ -3960,7 +3980,7 @@ type Service struct { // The Amazon Resource Name (ARN) of the cluster that hosts the service. ClusterArn *string `locationName:"clusterArn" type:"string"` - // The Unix time in seconds and milliseconds when the service was created. + // The Unix timestamp for when the service was created. CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unix"` // Optional deployment parameters that control how many tasks run during the @@ -3979,9 +3999,9 @@ type Service struct { // are displayed. Events []*ServiceEvent `locationName:"events" type:"list"` - // A list of load balancer objects, containing the load balancer name, the container - // name (as it appears in a container definition), and the container port to - // access from the load balancer. + // A list of Elastic Load Balancing load balancer objects, containing the load + // balancer name, the container name (as it appears in a container definition), + // and the container port to access from the load balancer. LoadBalancers []*LoadBalancer `locationName:"loadBalancers" type:"list"` // The number of tasks in the cluster that are in the PENDING state. @@ -3989,7 +4009,7 @@ type Service struct { // The Amazon Resource Name (ARN) of the IAM role associated with the service // that allows the Amazon ECS container agent to register container instances - // with a load balancer. + // with an Elastic Load Balancing load balancer. RoleArn *string `locationName:"roleArn" type:"string"` // The number of tasks in the cluster that are in the RUNNING state. @@ -4030,7 +4050,7 @@ func (s Service) GoString() string { type ServiceEvent struct { _ struct{} `type:"structure"` - // The Unix time in seconds and milliseconds when the event was triggered. + // The Unix timestamp for when the event was triggered. CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unix"` // The ID string of the event. @@ -4310,8 +4330,8 @@ type Task struct { // The containers associated with the task. Containers []*Container `locationName:"containers" type:"list"` - // The Unix time in seconds and milliseconds when the task was created (the - // task entered the PENDING state). + // The Unix timestamp for when the task was created (the task entered the PENDING + // state). CreatedAt *time.Time `locationName:"createdAt" type:"timestamp" timestampFormat:"unix"` // The desired status of the task. @@ -4323,8 +4343,8 @@ type Task struct { // One or more container overrides. Overrides *TaskOverride `locationName:"overrides" type:"structure"` - // The Unix time in seconds and milliseconds when the task was started (the - // task transitioned from the PENDING state to the RUNNING state). + // The Unix timestamp for when the task was started (the task transitioned from + // the PENDING state to the RUNNING state). StartedAt *time.Time `locationName:"startedAt" type:"timestamp" timestampFormat:"unix"` // The tag specified when a task is started. If the task is started by an Amazon @@ -4332,8 +4352,8 @@ type Task struct { // service that starts it. StartedBy *string `locationName:"startedBy" type:"string"` - // The Unix time in seconds and milliseconds when the task was stopped (the - // task transitioned from the RUNNING state to the STOPPED state). + // The Unix timestamp for when the task was stopped (the task transitioned from + // the RUNNING state to the STOPPED state). StoppedAt *time.Time `locationName:"stoppedAt" type:"timestamp" timestampFormat:"unix"` // The reason the task was stopped. @@ -4362,7 +4382,7 @@ type TaskDefinition struct { // A list of container definitions in JSON format that describe the different // containers that make up your task. For more information about container definition - // parameters and defaults, see Amazon ECS Task Definitions (http://docs.aws.amazon.com/http:/docs.aws.amazon.com/AmazonECS/latest/developerguidetask_defintions.html) + // parameters and defaults, see Amazon ECS Task Definitions (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_defintions.html) // in the Amazon EC2 Container Service Developer Guide. ContainerDefinitions []*ContainerDefinition `locationName:"containerDefinitions" type:"list"` @@ -4391,7 +4411,7 @@ type TaskDefinition struct { TaskRoleArn *string `locationName:"taskRoleArn" type:"string"` // The list of volumes in a task. For more information about volume definition - // parameters and defaults, see Amazon ECS Task Definitions (http://docs.aws.amazon.com/http:/docs.aws.amazon.com/AmazonECS/latest/developerguidetask_defintions.html) + // parameters and defaults, see Amazon ECS Task Definitions (http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_defintions.html) // in the Amazon EC2 Container Service Developer Guide. Volumes []*Volume `locationName:"volumes" type:"list"` } diff --git a/vendor/github.com/aws/aws-sdk-go/service/elb/api.go b/vendor/github.com/aws/aws-sdk-go/service/elb/api.go index 46c912729..7c0f327d8 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elb/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elb/api.go @@ -58,8 +58,8 @@ func (c *ELB) AddTagsRequest(input *AddTagsInput) (req *request.Request, output // Each tag consists of a key and an optional value. If a tag with the same // key is already associated with the load balancer, AddTags updates its value. // -// For more information, see Tag Your Load Balancer (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/add-remove-tags.html) -// in the Elastic Load Balancing Developer Guide. +// For more information, see Tag Your Classic Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/add-remove-tags.html) +// in the Classic Load Balancers Guide. func (c *ELB) AddTags(input *AddTagsInput) (*AddTagsOutput, error) { req, out := c.AddTagsRequest(input) err := req.Send() @@ -111,8 +111,8 @@ func (c *ELB) ApplySecurityGroupsToLoadBalancerRequest(input *ApplySecurityGroup // private cloud (VPC). The specified security groups override the previously // associated security groups. // -// For more information, see Security Groups for Load Balancers in a VPC (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-security-groups.html#elb-vpc-security-groups) -// in the Elastic Load Balancing Developer Guide. +// For more information, see Security Groups for Load Balancers in a VPC (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-security-groups.html#elb-vpc-security-groups) +// in the Classic Load Balancers Guide. func (c *ELB) ApplySecurityGroupsToLoadBalancer(input *ApplySecurityGroupsToLoadBalancerInput) (*ApplySecurityGroupsToLoadBalancerOutput, error) { req, out := c.ApplySecurityGroupsToLoadBalancerRequest(input) err := req.Send() @@ -165,8 +165,8 @@ func (c *ELB) AttachLoadBalancerToSubnetsRequest(input *AttachLoadBalancerToSubn // // The load balancer evenly distributes requests across all registered subnets. // For more information, see Add or Remove Subnets for Your Load Balancer in -// a VPC (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-manage-subnets.html) -// in the Elastic Load Balancing Developer Guide. +// a VPC (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-manage-subnets.html) +// in the Classic Load Balancers Guide. func (c *ELB) AttachLoadBalancerToSubnets(input *AttachLoadBalancerToSubnetsInput) (*AttachLoadBalancerToSubnetsOutput, error) { req, out := c.AttachLoadBalancerToSubnetsRequest(input) err := req.Send() @@ -215,10 +215,11 @@ func (c *ELB) ConfigureHealthCheckRequest(input *ConfigureHealthCheckInput) (req } // Specifies the health check settings to use when evaluating the health state -// of your back-end instances. +// of your EC2 instances. // -// For more information, see Configure Health Checks (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-healthchecks.html) -// in the Elastic Load Balancing Developer Guide. +// For more information, see Configure Health Checks for Your Load Balancer +// (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-healthchecks.html) +// in the Classic Load Balancers Guide. func (c *ELB) ConfigureHealthCheck(input *ConfigureHealthCheckInput) (*ConfigureHealthCheckOutput, error) { req, out := c.ConfigureHealthCheckRequest(input) err := req.Send() @@ -279,8 +280,8 @@ func (c *ELB) CreateAppCookieStickinessPolicyRequest(input *CreateAppCookieStick // If the application cookie is explicitly removed or expires, the session // stops being sticky until a new application cookie is issued. // -// For more information, see Application-Controlled Session Stickiness (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-sticky-sessions.html#enable-sticky-sessions-application) -// in the Elastic Load Balancing Developer Guide. +// For more information, see Application-Controlled Session Stickiness (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-sticky-sessions.html#enable-sticky-sessions-application) +// in the Classic Load Balancers Guide. func (c *ELB) CreateAppCookieStickinessPolicy(input *CreateAppCookieStickinessPolicyInput) (*CreateAppCookieStickinessPolicyOutput, error) { req, out := c.CreateAppCookieStickinessPolicyRequest(input) err := req.Send() @@ -333,18 +334,18 @@ func (c *ELB) CreateLBCookieStickinessPolicyRequest(input *CreateLBCookieStickin // This policy can be associated only with HTTP/HTTPS listeners. // // When a load balancer implements this policy, the load balancer uses a special -// cookie to track the back-end server instance for each request. When the load -// balancer receives a request, it first checks to see if this cookie is present -// in the request. If so, the load balancer sends the request to the application -// server specified in the cookie. If not, the load balancer sends the request -// to a server that is chosen based on the existing load-balancing algorithm. +// cookie to track the instance for each request. When the load balancer receives +// a request, it first checks to see if this cookie is present in the request. +// If so, the load balancer sends the request to the application server specified +// in the cookie. If not, the load balancer sends the request to a server that +// is chosen based on the existing load-balancing algorithm. // // A cookie is inserted into the response for binding subsequent requests from // the same user to that server. The validity of the cookie is based on the // cookie expiration time, which is specified in the policy configuration. // -// For more information, see Duration-Based Session Stickiness (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-sticky-sessions.html#enable-sticky-sessions-duration) -// in the Elastic Load Balancing Developer Guide. +// For more information, see Duration-Based Session Stickiness (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-sticky-sessions.html#enable-sticky-sessions-duration) +// in the Classic Load Balancers Guide. func (c *ELB) CreateLBCookieStickinessPolicy(input *CreateLBCookieStickinessPolicyInput) (*CreateLBCookieStickinessPolicyOutput, error) { req, out := c.CreateLBCookieStickinessPolicyRequest(input) err := req.Send() @@ -392,18 +393,19 @@ func (c *ELB) CreateLoadBalancerRequest(input *CreateLoadBalancerInput) (req *re return } -// Creates a load balancer. +// Creates a Classic load balancer. // -// If the call completes successfully, a new load balancer is created with -// a unique Domain Name Service (DNS) name. The load balancer receives incoming -// traffic and routes it to the registered instances. For more information, -// see How Elastic Load Balancing Works (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/how-elb-works.html) -// in the Elastic Load Balancing Developer Guide. +// You can add listeners, security groups, subnets, and tags when you create +// your load balancer, or you can add them later using CreateLoadBalancerListeners, +// ApplySecurityGroupsToLoadBalancer, AttachLoadBalancerToSubnets, and AddTags. +// +// To describe your current load balancers, see DescribeLoadBalancers. When +// you are finished with a load balancer, you can delete it using DeleteLoadBalancer. // // You can create up to 20 load balancers per region per account. You can request // an increase for the number of load balancers for your account. For more information, -// see Elastic Load Balancing Limits (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-limits.html) -// in the Elastic Load Balancing Developer Guide. +// see Limits for Your Classic Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-limits.html) +// in the Classic Load Balancers Guide. func (c *ELB) CreateLoadBalancer(input *CreateLoadBalancerInput) (*CreateLoadBalancerOutput, error) { req, out := c.CreateLoadBalancerRequest(input) err := req.Send() @@ -456,8 +458,8 @@ func (c *ELB) CreateLoadBalancerListenersRequest(input *CreateLoadBalancerListen // the properties of the new listener must match the properties of the existing // listener. // -// For more information, see Add a Listener to Your Load Balancer (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/us-add-listener.html) -// in the Elastic Load Balancing Developer Guide. +// For more information, see Listeners for Your Classic Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-listener-config.html) +// in the Classic Load Balancers Guide. func (c *ELB) CreateLoadBalancerListeners(input *CreateLoadBalancerListenersInput) (*CreateLoadBalancerListenersOutput, error) { req, out := c.CreateLoadBalancerListenersRequest(input) err := req.Send() @@ -508,8 +510,8 @@ func (c *ELB) CreateLoadBalancerPolicyRequest(input *CreateLoadBalancerPolicyInp // Creates a policy with the specified attributes for the specified load balancer. // // Policies are settings that are saved for your load balancer and that can -// be applied to the front-end listener or the back-end application server, -// depending on the policy type. +// be applied to the listener or the application server, depending on the policy +// type. func (c *ELB) CreateLoadBalancerPolicy(input *CreateLoadBalancerPolicyInput) (*CreateLoadBalancerPolicyOutput, error) { req, out := c.CreateLoadBalancerPolicyRequest(input) err := req.Send() @@ -563,7 +565,7 @@ func (c *ELB) DeleteLoadBalancerRequest(input *DeleteLoadBalancerInput) (req *re // all settings. The DNS name associated with a deleted load balancer are no // longer usable. The name and associated DNS record of the deleted load balancer // no longer exist and traffic sent to any of its IP addresses is no longer -// delivered to back-end instances. +// delivered to your instances. // // If the load balancer does not exist or has already been deleted, the call // to DeleteLoadBalancer still succeeds. @@ -718,8 +720,8 @@ func (c *ELB) DeregisterInstancesFromLoadBalancerRequest(input *DeregisterInstan // You can use DescribeLoadBalancers to verify that the instance is deregistered // from the load balancer. // -// For more information, see Deregister and Register Amazon EC2 Instances (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/US_DeReg_Reg_Instances.html) -// in the Elastic Load Balancing Developer Guide. +// For more information, see Register or De-Register EC2 Instances (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-deregister-register-instances.html) +// in the Classic Load Balancers Guide. func (c *ELB) DeregisterInstancesFromLoadBalancer(input *DeregisterInstancesFromLoadBalancerInput) (*DeregisterInstancesFromLoadBalancerOutput, error) { req, out := c.DeregisterInstancesFromLoadBalancerRequest(input) err := req.Send() @@ -923,10 +925,18 @@ func (c *ELB) DescribeLoadBalancerPolicyTypesRequest(input *DescribeLoadBalancer return } -// Describes the specified load balancer policy types. +// Describes the specified load balancer policy types or all load balancer policy +// types. // -// You can use these policy types with CreateLoadBalancerPolicy to create policy -// configurations for a load balancer. +// The description of each type indicates how it can be used. For example, +// some policies can be used only with layer 7 listeners, some policies can +// be used only with layer 4 listeners, and some policies can be used only with +// your EC2 instances. +// +// You can use CreateLoadBalancerPolicy to create a policy configuration for +// any of these policy types. Then, depending on the policy type, use either +// SetLoadBalancerPoliciesOfListener or SetLoadBalancerPoliciesForBackendServer +// to set the policy. func (c *ELB) DescribeLoadBalancerPolicyTypes(input *DescribeLoadBalancerPolicyTypesInput) (*DescribeLoadBalancerPolicyTypesOutput, error) { req, out := c.DescribeLoadBalancerPolicyTypesRequest(input) err := req.Send() @@ -1164,9 +1174,8 @@ func (c *ELB) DisableAvailabilityZonesForLoadBalancerRequest(input *DisableAvail // the OutOfService state. Then, the load balancer attempts to equally balance // the traffic among its remaining Availability Zones. // -// For more information, see Disable an Availability Zone from a Load-Balanced -// Application (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/US_ShrinkLBApp04.html) -// in the Elastic Load Balancing Developer Guide. +// For more information, see Add or Remove Availability Zones (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-disable-az.html) +// in the Classic Load Balancers Guide. func (c *ELB) DisableAvailabilityZonesForLoadBalancer(input *DisableAvailabilityZonesForLoadBalancerInput) (*DisableAvailabilityZonesForLoadBalancerOutput, error) { req, out := c.DisableAvailabilityZonesForLoadBalancerRequest(input) err := req.Send() @@ -1220,8 +1229,8 @@ func (c *ELB) EnableAvailabilityZonesForLoadBalancerRequest(input *EnableAvailab // The load balancer evenly distributes requests across all its registered // Availability Zones that contain instances. // -// For more information, see Add Availability Zone (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/US_AddLBAvailabilityZone.html) -// in the Elastic Load Balancing Developer Guide. +// For more information, see Add or Remove Availability Zones (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-disable-az.html) +// in the Classic Load Balancers Guide. func (c *ELB) EnableAvailabilityZonesForLoadBalancer(input *EnableAvailabilityZonesForLoadBalancerInput) (*EnableAvailabilityZonesForLoadBalancerOutput, error) { req, out := c.EnableAvailabilityZonesForLoadBalancerRequest(input) err := req.Send() @@ -1276,13 +1285,15 @@ func (c *ELB) ModifyLoadBalancerAttributesRequest(input *ModifyLoadBalancerAttri // can modify the load balancer attribute ConnectionSettings by specifying an // idle connection timeout value for your load balancer. // -// For more information, see the following in the Elastic Load Balancing Developer -// Guide: +// For more information, see the following in the Classic Load Balancers Guide: // -// Cross-Zone Load Balancing (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/TerminologyandKeyConcepts.html#request-routing) -// Connection Draining (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/TerminologyandKeyConcepts.html#conn-drain) -// Access Logs (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/access-log-collection.html) -// Idle Connection Timeout (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/TerminologyandKeyConcepts.html#idle-timeout) +// Cross-Zone Load Balancing (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-disable-crosszone-lb.html) +// +// Connection Draining (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-conn-drain.html) +// +// Access Logs (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/access-log-collection.html) +// +// Idle Connection Timeout (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-idle-timeout.html) func (c *ELB) ModifyLoadBalancerAttributes(input *ModifyLoadBalancerAttributesInput) (*ModifyLoadBalancerAttributesOutput, error) { req, out := c.ModifyLoadBalancerAttributesRequest(input) err := req.Send() @@ -1349,15 +1360,10 @@ func (c *ELB) RegisterInstancesWithLoadBalancerRequest(input *RegisterInstancesW // If an Availability Zone is added to the load balancer later, any instances // registered with the load balancer move to the InService state. // -// If you stop an instance registered with a load balancer and then start it, -// the IP addresses associated with the instance changes. Elastic Load Balancing -// cannot recognize the new IP address, which prevents it from routing traffic -// to the instances. We recommend that you use the following sequence: stop -// the instance, deregister the instance, start the instance, and then register -// the instance. To deregister instances from a load balancer, use DeregisterInstancesFromLoadBalancer. +// To deregister instances from a load balancer, use DeregisterInstancesFromLoadBalancer. // -// For more information, see Deregister and Register EC2 Instances (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/US_DeReg_Reg_Instances.html) -// in the Elastic Load Balancing Developer Guide. +// For more information, see Register or De-Register EC2 Instances (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-deregister-register-instances.html) +// in the Classic Load Balancers Guide. func (c *ELB) RegisterInstancesWithLoadBalancer(input *RegisterInstancesWithLoadBalancerInput) (*RegisterInstancesWithLoadBalancerOutput, error) { req, out := c.RegisterInstancesWithLoadBalancerRequest(input) err := req.Send() @@ -1457,9 +1463,9 @@ func (c *ELB) SetLoadBalancerListenerSSLCertificateRequest(input *SetLoadBalance // The specified certificate replaces any prior certificate that was used on // the same load balancer and port. // -// For more information about updating your SSL certificate, see Updating an -// SSL Certificate for a Load Balancer (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/US_UpdatingLoadBalancerSSL.html) -// in the Elastic Load Balancing Developer Guide. +// For more information about updating your SSL certificate, see Replace the +// SSL Certificate for Your Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-update-ssl-cert.html) +// in the Classic Load Balancers Guide. func (c *ELB) SetLoadBalancerListenerSSLCertificate(input *SetLoadBalancerListenerSSLCertificateInput) (*SetLoadBalancerListenerSSLCertificateOutput, error) { req, out := c.SetLoadBalancerListenerSSLCertificateRequest(input) err := req.Send() @@ -1508,16 +1514,22 @@ func (c *ELB) SetLoadBalancerPoliciesForBackendServerRequest(input *SetLoadBalan } // Replaces the set of policies associated with the specified port on which -// the back-end server is listening with a new set of policies. At this time, -// only the back-end server authentication policy type can be applied to the -// back-end ports; this policy type is composed of multiple public key policies. +// the EC2 instance is listening with a new set of policies. At this time, only +// the back-end server authentication policy type can be applied to the instance +// ports; this policy type is composed of multiple public key policies. // // Each time you use SetLoadBalancerPoliciesForBackendServer to enable the // policies, use the PolicyNames parameter to list the policies that you want // to enable. // // You can use DescribeLoadBalancers or DescribeLoadBalancerPolicies to verify -// that the policy is associated with the back-end server. +// that the policy is associated with the EC2 instance. +// +// For more information about enabling back-end instance authentication, see +// Configure Back-end Instance Authentication (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-create-https-ssl-load-balancer.html#configure_backendauth_clt) +// in the Classic Load Balancers Guide. For more information about Proxy Protocol, +// see Configure Proxy Protocol Support (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-proxy-protocol.html) +// in the Classic Load Balancers Guide. func (c *ELB) SetLoadBalancerPoliciesForBackendServer(input *SetLoadBalancerPoliciesForBackendServerInput) (*SetLoadBalancerPoliciesForBackendServerOutput, error) { req, out := c.SetLoadBalancerPoliciesForBackendServerRequest(input) err := req.Send() @@ -1565,8 +1577,16 @@ func (c *ELB) SetLoadBalancerPoliciesOfListenerRequest(input *SetLoadBalancerPol return } -// Associates, updates, or disables a policy with a listener for the specified -// load balancer. You can associate multiple policies with a listener. +// Replaces the current set of policies for the specified load balancer port +// with the specified set of policies. +// +// To enable back-end server authentication, use SetLoadBalancerPoliciesForBackendServer. +// +// For more information about setting policies, see Update the SSL Negotiation +// Configuration (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/ssl-config-update.html), +// Duration-Based Session Stickiness (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-sticky-sessions.html#enable-sticky-sessions-duration), +// and Application-Controlled Session Stickiness (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-sticky-sessions.html#enable-sticky-sessions-application) +// in the Classic Load Balancers Guide. func (c *ELB) SetLoadBalancerPoliciesOfListener(input *SetLoadBalancerPoliciesOfListenerInput) (*SetLoadBalancerPoliciesOfListenerOutput, error) { req, out := c.SetLoadBalancerPoliciesOfListenerRequest(input) err := req.Send() @@ -1583,7 +1603,7 @@ type AccessLog struct { // Default: 60 minutes EmitInterval *int64 `type:"integer"` - // Specifies whether access log is enabled for the load balancer. + // Specifies whether access logs are enabled for the load balancer. Enabled *bool `type:"boolean" required:"true"` // The name of the Amazon S3 bucket where the access logs are stored. @@ -1618,6 +1638,7 @@ func (s *AccessLog) Validate() error { return nil } +// Contains the parameters for AddTags. type AddTagsInput struct { _ struct{} `type:"structure"` @@ -1667,6 +1688,7 @@ func (s *AddTagsInput) Validate() error { return nil } +// Contains the output of AddTags. type AddTagsOutput struct { _ struct{} `type:"structure"` } @@ -1724,6 +1746,7 @@ func (s AppCookieStickinessPolicy) GoString() string { return s.String() } +// Contains the parameters for ApplySecurityGroupsToLoadBalancer. type ApplySecurityGroupsToLoadBalancerInput struct { _ struct{} `type:"structure"` @@ -1761,6 +1784,7 @@ func (s *ApplySecurityGroupsToLoadBalancerInput) Validate() error { return nil } +// Contains the output of ApplySecurityGroupsToLoadBalancer. type ApplySecurityGroupsToLoadBalancerOutput struct { _ struct{} `type:"structure"` @@ -1778,14 +1802,15 @@ func (s ApplySecurityGroupsToLoadBalancerOutput) GoString() string { return s.String() } +// Contains the parameters for AttachLoaBalancerToSubnets. type AttachLoadBalancerToSubnetsInput struct { _ struct{} `type:"structure"` // The name of the load balancer. LoadBalancerName *string `type:"string" required:"true"` - // The IDs of the subnets to add for the load balancer. You can add only one - // subnet per Availability Zone. + // The IDs of the subnets to add. You can add only one subnet per Availability + // Zone. Subnets []*string `type:"list" required:"true"` } @@ -1815,6 +1840,7 @@ func (s *AttachLoadBalancerToSubnetsInput) Validate() error { return nil } +// Contains the output of AttachLoadBalancerToSubnets. type AttachLoadBalancerToSubnetsOutput struct { _ struct{} `type:"structure"` @@ -1832,14 +1858,14 @@ func (s AttachLoadBalancerToSubnetsOutput) GoString() string { return s.String() } -// Information about the configuration of a back-end server. +// Information about the configuration of an EC2 instance. type BackendServerDescription struct { _ struct{} `type:"structure"` - // The port on which the back-end server is listening. + // The port on which the EC2 instance is listening. InstancePort *int64 `min:"1" type:"integer"` - // The names of the policies enabled for the back-end server. + // The names of the policies enabled for the EC2 instance. PolicyNames []*string `type:"list"` } @@ -1853,10 +1879,11 @@ func (s BackendServerDescription) GoString() string { return s.String() } +// Contains the parameters for ConfigureHealthCheck. type ConfigureHealthCheckInput struct { _ struct{} `type:"structure"` - // The configuration information for the new health check. + // The configuration information. HealthCheck *HealthCheck `type:"structure" required:"true"` // The name of the load balancer. @@ -1894,6 +1921,7 @@ func (s *ConfigureHealthCheckInput) Validate() error { return nil } +// Contains the output of ConfigureHealthCheck. type ConfigureHealthCheckOutput struct { _ struct{} `type:"structure"` @@ -1981,6 +2009,7 @@ func (s *ConnectionSettings) Validate() error { return nil } +// Contains the parameters for CreateAppCookieStickinessPolicy. type CreateAppCookieStickinessPolicyInput struct { _ struct{} `type:"structure"` @@ -2025,6 +2054,7 @@ func (s *CreateAppCookieStickinessPolicyInput) Validate() error { return nil } +// Contains the output for CreateAppCookieStickinessPolicy. type CreateAppCookieStickinessPolicyOutput struct { _ struct{} `type:"structure"` } @@ -2039,12 +2069,14 @@ func (s CreateAppCookieStickinessPolicyOutput) GoString() string { return s.String() } +// Contains the parameters for CreateLBCookieStickinessPolicy. type CreateLBCookieStickinessPolicyInput struct { _ struct{} `type:"structure"` // The time period, in seconds, after which the cookie should be considered - // stale. If you do not specify this parameter, the sticky session lasts for - // the duration of the browser session. + // stale. If you do not specify this parameter, the default value is 0, which + // indicates that the sticky session should last for the duration of the browser + // session. CookieExpirationPeriod *int64 `type:"long"` // The name of the load balancer. @@ -2082,6 +2114,7 @@ func (s *CreateLBCookieStickinessPolicyInput) Validate() error { return nil } +// Contains the output for CreateLBCookieStickinessPolicy. type CreateLBCookieStickinessPolicyOutput struct { _ struct{} `type:"structure"` } @@ -2096,11 +2129,11 @@ func (s CreateLBCookieStickinessPolicyOutput) GoString() string { return s.String() } +// Contains the parameters for CreateLoadBalancer. type CreateLoadBalancerInput struct { _ struct{} `type:"structure"` // One or more Availability Zones from the same region as the load balancer. - // Traffic is equally distributed across all specified Availability Zones. // // You must specify at least one Availability Zone. // @@ -2110,8 +2143,8 @@ type CreateLoadBalancerInput struct { // The listeners. // - // For more information, see Listeners for Your Load Balancer (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-listener-config.html) - // in the Elastic Load Balancing Developer Guide. + // For more information, see Listeners for Your Classic Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-listener-config.html) + // in the Classic Load Balancers Guide. Listeners []*Listener `type:"list" required:"true"` // The name of the load balancer. @@ -2124,13 +2157,13 @@ type CreateLoadBalancerInput struct { // The type of a load balancer. Valid only for load balancers in a VPC. // // By default, Elastic Load Balancing creates an Internet-facing load balancer - // with a publicly resolvable DNS name, which resolves to public IP addresses. - // For more information about Internet-facing and Internal load balancers, see - // Internet-facing and Internal Load Balancers (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/vpc-loadbalancer-types.html) - // in the Elastic Load Balancing Developer Guide. + // with a DNS name that resolves to public IP addresses. For more information + // about Internet-facing and Internal load balancers, see Load Balancer Scheme + // (http://docs.aws.amazon.com/elasticloadbalancing/latest/userguide/how-elastic-load-balancing-works.html#load-balancer-scheme) + // in the Elastic Load Balancing User Guide. // - // Specify internal to create an internal load balancer with a DNS name that - // resolves to private IP addresses. + // Specify internal to create a load balancer with a DNS name that resolves + // to private IP addresses. Scheme *string `type:"string"` // The IDs of the security groups to assign to the load balancer. @@ -2142,8 +2175,9 @@ type CreateLoadBalancerInput struct { // A list of tags to assign to the load balancer. // - // For more information about tagging your load balancer, see Tagging (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/TerminologyandKeyConcepts.html#tagging-elb) - // in the Elastic Load Balancing Developer Guide. + // For more information about tagging your load balancer, see Tag Your Classic + // Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/add-remove-tags.html) + // in the Classic Load Balancers Guide. Tags []*Tag `min:"1" type:"list"` } @@ -2196,6 +2230,7 @@ func (s *CreateLoadBalancerInput) Validate() error { return nil } +// Contains the parameters for CreateLoadBalancerListeners. type CreateLoadBalancerListenersInput struct { _ struct{} `type:"structure"` @@ -2242,6 +2277,7 @@ func (s *CreateLoadBalancerListenersInput) Validate() error { return nil } +// Contains the parameters for CreateLoadBalancerListener. type CreateLoadBalancerListenersOutput struct { _ struct{} `type:"structure"` } @@ -2256,6 +2292,7 @@ func (s CreateLoadBalancerListenersOutput) GoString() string { return s.String() } +// Contains the output for CreateLoadBalancer. type CreateLoadBalancerOutput struct { _ struct{} `type:"structure"` @@ -2273,13 +2310,14 @@ func (s CreateLoadBalancerOutput) GoString() string { return s.String() } +// Contains the parameters for CreateLoadBalancerPolicy. type CreateLoadBalancerPolicyInput struct { _ struct{} `type:"structure"` // The name of the load balancer. LoadBalancerName *string `type:"string" required:"true"` - // The attributes for the policy. + // The policy attributes. PolicyAttributes []*PolicyAttribute `type:"list"` // The name of the load balancer policy to be created. This name must be unique @@ -2319,6 +2357,7 @@ func (s *CreateLoadBalancerPolicyInput) Validate() error { return nil } +// Contains the output of CreateLoadBalancerPolicy. type CreateLoadBalancerPolicyOutput struct { _ struct{} `type:"structure"` } @@ -2364,6 +2403,7 @@ func (s *CrossZoneLoadBalancing) Validate() error { return nil } +// Contains the parameters for DeleteLoadBalancer. type DeleteLoadBalancerInput struct { _ struct{} `type:"structure"` @@ -2394,6 +2434,7 @@ func (s *DeleteLoadBalancerInput) Validate() error { return nil } +// Contains the parameters for DeleteLoadBalancerListeners. type DeleteLoadBalancerListenersInput struct { _ struct{} `type:"structure"` @@ -2430,6 +2471,7 @@ func (s *DeleteLoadBalancerListenersInput) Validate() error { return nil } +// Contains the output of DeleteLoadBalancerListeners. type DeleteLoadBalancerListenersOutput struct { _ struct{} `type:"structure"` } @@ -2444,6 +2486,7 @@ func (s DeleteLoadBalancerListenersOutput) GoString() string { return s.String() } +// Contains the output of DeleteLoadBalancer. type DeleteLoadBalancerOutput struct { _ struct{} `type:"structure"` } @@ -2458,7 +2501,7 @@ func (s DeleteLoadBalancerOutput) GoString() string { return s.String() } -// = +// Contains the parameters for DeleteLoadBalancerPolicy. type DeleteLoadBalancerPolicyInput struct { _ struct{} `type:"structure"` @@ -2495,6 +2538,7 @@ func (s *DeleteLoadBalancerPolicyInput) Validate() error { return nil } +// Contains the output of DeleteLoadBalancerPolicy. type DeleteLoadBalancerPolicyOutput struct { _ struct{} `type:"structure"` } @@ -2509,6 +2553,7 @@ func (s DeleteLoadBalancerPolicyOutput) GoString() string { return s.String() } +// Contains the parameters for DeregisterInstancesFromLoadBalancer. type DeregisterInstancesFromLoadBalancerInput struct { _ struct{} `type:"structure"` @@ -2545,6 +2590,7 @@ func (s *DeregisterInstancesFromLoadBalancerInput) Validate() error { return nil } +// Contains the output of DeregisterInstancesFromLoadBalancer. type DeregisterInstancesFromLoadBalancerOutput struct { _ struct{} `type:"structure"` @@ -2562,6 +2608,7 @@ func (s DeregisterInstancesFromLoadBalancerOutput) GoString() string { return s.String() } +// Contains the parameters for DescribeInstanceHealth. type DescribeInstanceHealthInput struct { _ struct{} `type:"structure"` @@ -2595,6 +2642,7 @@ func (s *DescribeInstanceHealthInput) Validate() error { return nil } +// Contains the output for DescribeInstanceHealth. type DescribeInstanceHealthOutput struct { _ struct{} `type:"structure"` @@ -2612,6 +2660,7 @@ func (s DescribeInstanceHealthOutput) GoString() string { return s.String() } +// Contains the parameters for DescribeLoadBalancerAttributes. type DescribeLoadBalancerAttributesInput struct { _ struct{} `type:"structure"` @@ -2642,6 +2691,7 @@ func (s *DescribeLoadBalancerAttributesInput) Validate() error { return nil } +// Contains the output of DescribeLoadBalancerAttributes. type DescribeLoadBalancerAttributesOutput struct { _ struct{} `type:"structure"` @@ -2659,6 +2709,7 @@ func (s DescribeLoadBalancerAttributesOutput) GoString() string { return s.String() } +// Contains the parameters for DescribeLoadBalancerPolicies. type DescribeLoadBalancerPoliciesInput struct { _ struct{} `type:"structure"` @@ -2679,6 +2730,7 @@ func (s DescribeLoadBalancerPoliciesInput) GoString() string { return s.String() } +// Contains the output of DescribeLoadBalancerPolicies. type DescribeLoadBalancerPoliciesOutput struct { _ struct{} `type:"structure"` @@ -2696,6 +2748,7 @@ func (s DescribeLoadBalancerPoliciesOutput) GoString() string { return s.String() } +// Contains the parameters for DescribeLoadBalancerPolicyTypes. type DescribeLoadBalancerPolicyTypesInput struct { _ struct{} `type:"structure"` @@ -2714,6 +2767,7 @@ func (s DescribeLoadBalancerPolicyTypesInput) GoString() string { return s.String() } +// Contains the output of DescribeLoadBalancerPolicyTypes. type DescribeLoadBalancerPolicyTypesOutput struct { _ struct{} `type:"structure"` @@ -2731,6 +2785,7 @@ func (s DescribeLoadBalancerPolicyTypesOutput) GoString() string { return s.String() } +// Contains the parameters for DescribeLoadBalancers. type DescribeLoadBalancersInput struct { _ struct{} `type:"structure"` @@ -2769,6 +2824,7 @@ func (s *DescribeLoadBalancersInput) Validate() error { return nil } +// Contains the parameters for DescribeLoadBalancers. type DescribeLoadBalancersOutput struct { _ struct{} `type:"structure"` @@ -2790,6 +2846,7 @@ func (s DescribeLoadBalancersOutput) GoString() string { return s.String() } +// Contains the parameters for DescribeTags. type DescribeTagsInput struct { _ struct{} `type:"structure"` @@ -2823,6 +2880,7 @@ func (s *DescribeTagsInput) Validate() error { return nil } +// Contains the output for DescribeTags. type DescribeTagsOutput struct { _ struct{} `type:"structure"` @@ -2840,6 +2898,7 @@ func (s DescribeTagsOutput) GoString() string { return s.String() } +// Contains the parameters for DetachLoadBalancerFromSubnets. type DetachLoadBalancerFromSubnetsInput struct { _ struct{} `type:"structure"` @@ -2876,6 +2935,7 @@ func (s *DetachLoadBalancerFromSubnetsInput) Validate() error { return nil } +// Contains the output of DetachLoadBalancerFromSubnets. type DetachLoadBalancerFromSubnetsOutput struct { _ struct{} `type:"structure"` @@ -2893,6 +2953,7 @@ func (s DetachLoadBalancerFromSubnetsOutput) GoString() string { return s.String() } +// Contains the parameters for DisableAvailabilityZonesForLoadBalancer. type DisableAvailabilityZonesForLoadBalancerInput struct { _ struct{} `type:"structure"` @@ -2929,6 +2990,7 @@ func (s *DisableAvailabilityZonesForLoadBalancerInput) Validate() error { return nil } +// Contains the output for DisableAvailabilityZonesForLoadBalancer. type DisableAvailabilityZonesForLoadBalancerOutput struct { _ struct{} `type:"structure"` @@ -2946,6 +3008,7 @@ func (s DisableAvailabilityZonesForLoadBalancerOutput) GoString() string { return s.String() } +// Contains the parameters for EnableAvailabilityZonesForLoadBalancer. type EnableAvailabilityZonesForLoadBalancerInput struct { _ struct{} `type:"structure"` @@ -2982,6 +3045,7 @@ func (s *EnableAvailabilityZonesForLoadBalancerInput) Validate() error { return nil } +// Contains the output of EnableAvailabilityZonesForLoadBalancer. type EnableAvailabilityZonesForLoadBalancerOutput struct { _ struct{} `type:"structure"` @@ -3009,7 +3073,7 @@ type HealthCheck struct { // The approximate interval, in seconds, between health checks of an individual // instance. - Interval *int64 `min:"1" type:"integer" required:"true"` + Interval *int64 `min:"5" type:"integer" required:"true"` // The instance being checked. The protocol is either TCP, HTTP, HTTPS, or SSL. // The range of valid ports is one (1) through 65535. @@ -3035,7 +3099,7 @@ type HealthCheck struct { // check. // // This value must be less than the Interval value. - Timeout *int64 `min:"1" type:"integer" required:"true"` + Timeout *int64 `min:"2" type:"integer" required:"true"` // The number of consecutive health check failures required before moving the // instance to the Unhealthy state. @@ -3064,8 +3128,8 @@ func (s *HealthCheck) Validate() error { if s.Interval == nil { invalidParams.Add(request.NewErrParamRequired("Interval")) } - if s.Interval != nil && *s.Interval < 1 { - invalidParams.Add(request.NewErrParamMinValue("Interval", 1)) + if s.Interval != nil && *s.Interval < 5 { + invalidParams.Add(request.NewErrParamMinValue("Interval", 5)) } if s.Target == nil { invalidParams.Add(request.NewErrParamRequired("Target")) @@ -3073,8 +3137,8 @@ func (s *HealthCheck) Validate() error { if s.Timeout == nil { invalidParams.Add(request.NewErrParamRequired("Timeout")) } - if s.Timeout != nil && *s.Timeout < 1 { - invalidParams.Add(request.NewErrParamMinValue("Timeout", 1)) + if s.Timeout != nil && *s.Timeout < 2 { + invalidParams.Add(request.NewErrParamMinValue("Timeout", 2)) } if s.UnhealthyThreshold == nil { invalidParams.Add(request.NewErrParamRequired("UnhealthyThreshold")) @@ -3089,11 +3153,11 @@ func (s *HealthCheck) Validate() error { return nil } -// The ID of a back-end instance. +// The ID of an EC2 instance. type Instance struct { _ struct{} `type:"structure"` - // The ID of the instance. + // The instance ID. InstanceId *string `type:"string"` } @@ -3107,39 +3171,39 @@ func (s Instance) GoString() string { return s.String() } -// Information about the state of a back-end instance. +// Information about the state of an EC2 instance. type InstanceState struct { _ struct{} `type:"structure"` // A description of the instance state. This string can contain one or more // of the following messages. // - // N/A + // N/A // - // A transient error occurred. Please try again later. + // A transient error occurred. Please try again later. // - // Instance has failed at least the UnhealthyThreshold number of health checks - // consecutively. - // - // Instance has not passed the configured HealthyThreshold number of health + // Instance has failed at least the UnhealthyThreshold number of health // checks consecutively. // - // Instance registration is still in progress. + // Instance has not passed the configured HealthyThreshold number of health + // checks consecutively. // - // Instance is in the EC2 Availability Zone for which LoadBalancer is not + // Instance registration is still in progress. + // + // Instance is in the EC2 Availability Zone for which LoadBalancer is not // configured to route traffic to. // - // Instance is not currently registered with the LoadBalancer. + // Instance is not currently registered with the LoadBalancer. // - // Instance deregistration currently in progress. + // Instance deregistration currently in progress. // - // Disable Availability Zone is currently in progress. + // Disable Availability Zone is currently in progress. // - // Instance is in pending state. + // Instance is in pending state. // - // Instance is in stopped state. + // Instance is in stopped state. // - // Instance is in terminated state. + // Instance is in terminated state. Description *string `type:"string"` // The ID of the instance. @@ -3176,8 +3240,8 @@ type LBCookieStickinessPolicy struct { // the duration of the browser session. CookieExpirationPeriod *int64 `type:"long"` - // The name for the policy being created. The name must be unique within the - // set of policies for this load balancer. + // The name of the policy. This name must be unique within the set of policies + // for this load balancer. PolicyName *string `type:"string"` } @@ -3194,16 +3258,16 @@ func (s LBCookieStickinessPolicy) GoString() string { // Information about a listener. // // For information about the protocols and the ports supported by Elastic Load -// Balancing, see Listener Configurations for Elastic Load Balancing (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-listener-config.html) -// in the Elastic Load Balancing Developer Guide. +// Balancing, see Listeners for Your Classic Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-listener-config.html) +// in the Classic Load Balancers Guide. type Listener struct { _ struct{} `type:"structure"` // The port on which the instance is listening. InstancePort *int64 `min:"1" type:"integer" required:"true"` - // The protocol to use for routing traffic to back-end instances: HTTP, HTTPS, - // TCP, or SSL. + // The protocol to use for routing traffic to instances: HTTP, HTTPS, TCP, or + // SSL. // // If the front-end protocol is HTTP, HTTPS, TCP, or SSL, InstanceProtocol // must be at the same protocol. @@ -3267,8 +3331,8 @@ type ListenerDescription struct { // Information about a listener. // // For information about the protocols and the ports supported by Elastic Load - // Balancing, see Listener Configurations for Elastic Load Balancing (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-listener-config.html) - // in the Elastic Load Balancing Developer Guide. + // Balancing, see Listeners for Your Classic Load Balancer (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/elb-listener-config.html) + // in the Classic Load Balancers Guide. Listener *Listener `type:"structure"` // The policies. If there are no policies enabled, the list is empty. @@ -3292,19 +3356,18 @@ type LoadBalancerAttributes struct { // If enabled, the load balancer captures detailed information of all requests // and delivers the information to the Amazon S3 bucket that you specify. // - // For more information, see Enable Access Logs (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/enable-access-logs.html) - // in the Elastic Load Balancing Developer Guide. + // For more information, see Enable Access Logs (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-access-logs.html) + // in the Classic Load Balancers Guide. AccessLog *AccessLog `type:"structure"` // This parameter is reserved. AdditionalAttributes []*AdditionalAttribute `type:"list"` // If enabled, the load balancer allows existing requests to complete before - // the load balancer shifts traffic away from a deregistered or unhealthy back-end - // instance. + // the load balancer shifts traffic away from a deregistered or unhealthy instance. // - // For more information, see Enable Connection Draining (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/config-conn-drain.html) - // in the Elastic Load Balancing Developer Guide. + // For more information, see Configure Connection Draining (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-conn-drain.html) + // in the Classic Load Balancers Guide. ConnectionDraining *ConnectionDraining `type:"structure"` // If enabled, the load balancer allows the connections to remain idle (no data @@ -3312,15 +3375,15 @@ type LoadBalancerAttributes struct { // // By default, Elastic Load Balancing maintains a 60-second idle connection // timeout for both front-end and back-end connections of your load balancer. - // For more information, see Configure Idle Connection Timeout (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/config-idle-timeout.html) - // in the Elastic Load Balancing Developer Guide. + // For more information, see Configure Idle Connection Timeout (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/config-idle-timeout.html) + // in the Classic Load Balancers Guide. ConnectionSettings *ConnectionSettings `type:"structure"` // If enabled, the load balancer routes the request traffic evenly across all - // back-end instances regardless of the Availability Zones. + // instances regardless of the Availability Zones. // - // For more information, see Enable Cross-Zone Load Balancing (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/enable-disable-crosszone-lb.html) - // in the Elastic Load Balancing Developer Guide. + // For more information, see Configure Cross-Zone Load Balancing (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-disable-crosszone-lb.html) + // in the Classic Load Balancers Guide. CrossZoneLoadBalancing *CrossZoneLoadBalancing `type:"structure"` } @@ -3371,23 +3434,22 @@ type LoadBalancerDescription struct { // The Availability Zones for the load balancer. AvailabilityZones []*string `type:"list"` - // Information about the back-end servers. + // Information about your EC2 instances. BackendServerDescriptions []*BackendServerDescription `type:"list"` - // The Amazon Route 53 hosted zone associated with the load balancer. + // The DNS name of the load balancer. // - // For more information, see Using Domain Names With Elastic Load Balancing - // (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/using-domain-names-with-elb.html) - // in the Elastic Load Balancing Developer Guide. + // For more information, see Configure a Custom Domain Name (http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/using-domain-names-with-elb.html) + // in the Classic Load Balancers Guide. CanonicalHostedZoneName *string `type:"string"` - // The ID of the Amazon Route 53 hosted zone name associated with the load balancer. + // The ID of the Amazon Route 53 hosted zone for the load balancer. CanonicalHostedZoneNameID *string `type:"string"` // The date and time the load balancer was created. CreatedTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` - // The external DNS name of the load balancer. + // The DNS name of the load balancer. DNSName *string `type:"string"` // Information about the health checks conducted on the load balancer. @@ -3418,10 +3480,10 @@ type LoadBalancerDescription struct { // in a VPC. SecurityGroups []*string `type:"list"` - // The security group that you can use as part of your inbound rules for your - // load balancer's back-end application instances. To only allow traffic from - // load balancers, add a security group rule to your back end instance that - // specifies this source security group as the inbound source. + // The security group for the load balancer, which you can use as part of your + // inbound rules for your registered instances. To only allow traffic from load + // balancers, add a security group rule that specifies this source security + // group as the inbound source. SourceSecurityGroup *SourceSecurityGroup `type:"structure"` // The IDs of the subnets for the load balancer. @@ -3441,6 +3503,7 @@ func (s LoadBalancerDescription) GoString() string { return s.String() } +// Contains the parameters for ModifyLoadBalancerAttributes. type ModifyLoadBalancerAttributesInput struct { _ struct{} `type:"structure"` @@ -3482,6 +3545,7 @@ func (s *ModifyLoadBalancerAttributesInput) Validate() error { return nil } +// Contains the output of ModifyLoadBalancerAttributes. type ModifyLoadBalancerAttributesOutput struct { _ struct{} `type:"structure"` @@ -3582,9 +3646,13 @@ type PolicyAttributeTypeDescription struct { // // Valid values: // - // ONE(1) : Single value required ZERO_OR_ONE(0..1) : Up to one value can - // be supplied ZERO_OR_MORE(0..*) : Optional. Multiple values are allowed ONE_OR_MORE(1..*0) - // : Required. Multiple values are allowed + // ONE(1) : Single value required + // + // ZERO_OR_ONE(0..1) : Up to one value is allowed + // + // ZERO_OR_MORE(0..*) : Optional. Multiple values are allowed + // + // ONE_OR_MORE(1..*0) : Required. Multiple values are allowed Cardinality *string `type:"string"` // The default value of the attribute, if applicable. @@ -3653,6 +3721,7 @@ func (s PolicyTypeDescription) GoString() string { return s.String() } +// Contains the parameters for RegisterInstancesWithLoadBalancer. type RegisterInstancesWithLoadBalancerInput struct { _ struct{} `type:"structure"` @@ -3689,6 +3758,7 @@ func (s *RegisterInstancesWithLoadBalancerInput) Validate() error { return nil } +// Contains the output of RegisterInstancesWithLoadBalancer. type RegisterInstancesWithLoadBalancerOutput struct { _ struct{} `type:"structure"` @@ -3706,6 +3776,7 @@ func (s RegisterInstancesWithLoadBalancerOutput) GoString() string { return s.String() } +// Contains the parameters for RemoveTags. type RemoveTagsInput struct { _ struct{} `type:"structure"` @@ -3756,6 +3827,7 @@ func (s *RemoveTagsInput) Validate() error { return nil } +// Contains the output of RemoveTags. type RemoveTagsOutput struct { _ struct{} `type:"structure"` } @@ -3770,6 +3842,7 @@ func (s RemoveTagsOutput) GoString() string { return s.String() } +// Contains the parameters for SetLoadBalancerListenerSSLCertificate. type SetLoadBalancerListenerSSLCertificateInput struct { _ struct{} `type:"structure"` @@ -3812,6 +3885,7 @@ func (s *SetLoadBalancerListenerSSLCertificateInput) Validate() error { return nil } +// Contains the output of SetLoadBalancerListenerSSLCertificate. type SetLoadBalancerListenerSSLCertificateOutput struct { _ struct{} `type:"structure"` } @@ -3826,17 +3900,18 @@ func (s SetLoadBalancerListenerSSLCertificateOutput) GoString() string { return s.String() } +// Contains the parameters for SetLoadBalancerPoliciesForBackendServer. type SetLoadBalancerPoliciesForBackendServerInput struct { _ struct{} `type:"structure"` - // The port number associated with the back-end server. + // The port number associated with the EC2 instance. InstancePort *int64 `type:"integer" required:"true"` // The name of the load balancer. LoadBalancerName *string `type:"string" required:"true"` // The names of the policies. If the list is empty, then all current polices - // are removed from the back-end server. + // are removed from the EC2 instance. PolicyNames []*string `type:"list" required:"true"` } @@ -3869,6 +3944,7 @@ func (s *SetLoadBalancerPoliciesForBackendServerInput) Validate() error { return nil } +// Contains the output of SetLoadBalancerPoliciesForBackendServer. type SetLoadBalancerPoliciesForBackendServerOutput struct { _ struct{} `type:"structure"` } @@ -3883,17 +3959,19 @@ func (s SetLoadBalancerPoliciesForBackendServerOutput) GoString() string { return s.String() } +// Contains the parameters for SetLoadBalancePoliciesOfListener. type SetLoadBalancerPoliciesOfListenerInput struct { _ struct{} `type:"structure"` // The name of the load balancer. LoadBalancerName *string `type:"string" required:"true"` - // The external port of the load balancer for the policy. + // The external port of the load balancer. LoadBalancerPort *int64 `type:"integer" required:"true"` - // The names of the policies. If the list is empty, the current policy is removed - // from the listener. + // The names of the policies. This list must include all policies to be enabled. + // If you omit a policy that is currently enabled, it is disabled. If the list + // is empty, all current policies are disabled. PolicyNames []*string `type:"list" required:"true"` } @@ -3926,6 +4004,7 @@ func (s *SetLoadBalancerPoliciesOfListenerInput) Validate() error { return nil } +// Contains the output of SetLoadBalancePoliciesOfListener. type SetLoadBalancerPoliciesOfListenerOutput struct { _ struct{} `type:"structure"` } diff --git a/vendor/github.com/aws/aws-sdk-go/service/elb/service.go b/vendor/github.com/aws/aws-sdk-go/service/elb/service.go index 3dc5c73fb..57f3e1943 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/elb/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/elb/service.go @@ -11,15 +11,30 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/query" ) -// Elastic Load Balancing distributes incoming traffic across your EC2 instances. +// A load balancer distributes incoming traffic across your EC2 instances. This +// enables you to increase the availability of your application. The load balancer +// also monitors the health of its registered instances and ensures that it +// routes traffic only to healthy instances. You configure your load balancer +// to accept incoming traffic by specifying one or more listeners, which are +// configured with a protocol and port number for connections from clients to +// the load balancer and a protocol and port number for connections from the +// load balancer to the instances. // -// For information about the features of Elastic Load Balancing, see What Is -// Elastic Load Balancing? (http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elastic-load-balancing.html) -// in the Elastic Load Balancing Developer Guide. +// Elastic Load Balancing supports two types of load balancers: Classic load +// balancers and Application load balancers (new). A Classic load balancer makes +// routing and load balancing decisions either at the transport layer (TCP/SSL) +// or the application layer (HTTP/HTTPS), and supports either EC2-Classic or +// a VPC. An Application load balancer makes routing and load balancing decisions +// at the application layer (HTTP/HTTPS), supports path-based routing, and can +// route requests to one or more ports on each EC2 instance or container instance +// in your virtual private cloud (VPC). For more information, see the . // -// For information about the AWS regions supported by Elastic Load Balancing, -// see Regions and Endpoints - Elastic Load Balancing (http://docs.aws.amazon.com/general/latest/gr/rande.html#elb_region) -// in the Amazon Web Services General Reference. +// This reference covers the 2012-06-01 API, which supports Classic load balancers. +// The 2015-12-01 API supports Application load balancers. +// +// To get started, create a load balancer with one or more listeners using +// CreateLoadBalancer. Register your instances with the load balancer using +// RegisterInstancesWithLoadBalancer. // // All Elastic Load Balancing operations are idempotent, which means that they // complete at most one time. If you repeat an operation, it succeeds with a diff --git a/vendor/github.com/aws/aws-sdk-go/service/kms/api.go b/vendor/github.com/aws/aws-sdk-go/service/kms/api.go index 18055b42d..447d2f886 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/kms/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/kms/api.go @@ -351,6 +351,65 @@ func (c *KMS) DeleteAlias(input *DeleteAliasInput) (*DeleteAliasOutput, error) { return out, err } +const opDeleteImportedKeyMaterial = "DeleteImportedKeyMaterial" + +// DeleteImportedKeyMaterialRequest generates a "aws/request.Request" representing the +// client's request for the DeleteImportedKeyMaterial operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteImportedKeyMaterial method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteImportedKeyMaterialRequest method. +// req, resp := client.DeleteImportedKeyMaterialRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *KMS) DeleteImportedKeyMaterialRequest(input *DeleteImportedKeyMaterialInput) (req *request.Request, output *DeleteImportedKeyMaterialOutput) { + op := &request.Operation{ + Name: opDeleteImportedKeyMaterial, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteImportedKeyMaterialInput{} + } + + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + output = &DeleteImportedKeyMaterialOutput{} + req.Data = output + return +} + +// Deletes key material that you previously imported and makes the specified +// customer master key (CMK) unusable. For more information about importing +// key material into AWS KMS, see Importing Key Material (http://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) +// in the AWS Key Management Service Developer Guide. +// +// When the specified CMK is in the PendingDeletion state, this operation does +// not change the CMK's state. Otherwise, it changes the CMK's state to PendingImport. +// +// After you delete key material, you can use ImportKeyMaterial to reimport +// the same key material into the CMK. +func (c *KMS) DeleteImportedKeyMaterial(input *DeleteImportedKeyMaterialInput) (*DeleteImportedKeyMaterialOutput, error) { + req, out := c.DeleteImportedKeyMaterialRequest(input) + err := req.Send() + return out, err +} + const opDescribeKey = "DescribeKey" // DescribeKeyRequest generates a "aws/request.Request" representing the @@ -946,6 +1005,138 @@ func (c *KMS) GetKeyRotationStatus(input *GetKeyRotationStatusInput) (*GetKeyRot return out, err } +const opGetParametersForImport = "GetParametersForImport" + +// GetParametersForImportRequest generates a "aws/request.Request" representing the +// client's request for the GetParametersForImport operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetParametersForImport method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetParametersForImportRequest method. +// req, resp := client.GetParametersForImportRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *KMS) GetParametersForImportRequest(input *GetParametersForImportInput) (req *request.Request, output *GetParametersForImportOutput) { + op := &request.Operation{ + Name: opGetParametersForImport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetParametersForImportInput{} + } + + req = c.newRequest(op, input, output) + output = &GetParametersForImportOutput{} + req.Data = output + return +} + +// Returns the items you need in order to import key material into AWS KMS from +// your existing key management infrastructure. For more information about importing +// key material into AWS KMS, see Importing Key Material (http://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) +// in the AWS Key Management Service Developer Guide. +// +// You must specify the key ID of the customer master key (CMK) into which +// you will import key material. This CMK's Origin must be EXTERNAL. You must +// also specify the wrapping algorithm and type of wrapping key (public key) +// that you will use to encrypt the key material. +// +// This operation returns a public key and an import token. Use the public +// key to encrypt the key material. Store the import token to send with a subsequent +// ImportKeyMaterial request. The public key and import token from the same +// response must be used together. These items are valid for 24 hours, after +// which they cannot be used for a subsequent ImportKeyMaterial request. To +// retrieve new ones, send another GetParametersForImport request. +func (c *KMS) GetParametersForImport(input *GetParametersForImportInput) (*GetParametersForImportOutput, error) { + req, out := c.GetParametersForImportRequest(input) + err := req.Send() + return out, err +} + +const opImportKeyMaterial = "ImportKeyMaterial" + +// ImportKeyMaterialRequest generates a "aws/request.Request" representing the +// client's request for the ImportKeyMaterial operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ImportKeyMaterial method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ImportKeyMaterialRequest method. +// req, resp := client.ImportKeyMaterialRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +func (c *KMS) ImportKeyMaterialRequest(input *ImportKeyMaterialInput) (req *request.Request, output *ImportKeyMaterialOutput) { + op := &request.Operation{ + Name: opImportKeyMaterial, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ImportKeyMaterialInput{} + } + + req = c.newRequest(op, input, output) + output = &ImportKeyMaterialOutput{} + req.Data = output + return +} + +// Imports key material into an AWS KMS customer master key (CMK) from your +// existing key management infrastructure. For more information about importing +// key material into AWS KMS, see Importing Key Material (http://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) +// in the AWS Key Management Service Developer Guide. +// +// You must specify the key ID of the CMK to import the key material into. +// This CMK's Origin must be EXTERNAL. You must also send an import token and +// the encrypted key material. Send the import token that you received in the +// same GetParametersForImport response that contained the public key that you +// used to encrypt the key material. You must also specify whether the key material +// expires and if so, when. When the key material expires, AWS KMS deletes the +// key material and the CMK becomes unusable. To use the CMK again, you can +// reimport the same key material. If you set an expiration date, you can change +// it only by reimporting the same key material and specifying a new expiration +// date. +// +// When this operation is successful, the specified CMK's key state changes +// to Enabled, and you can use the CMK. +// +// After you successfully import key material into a CMK, you can reimport +// the same key material into that CMK, but you cannot import different key +// material. +func (c *KMS) ImportKeyMaterial(input *ImportKeyMaterialInput) (*ImportKeyMaterialOutput, error) { + req, out := c.ImportKeyMaterialRequest(input) + err := req.Send() + return out, err +} + const opListAliases = "ListAliases" // ListAliasesRequest generates a "aws/request.Request" representing the @@ -2043,9 +2234,22 @@ type CreateKeyInput struct { // You can use CMKs only for symmetric encryption and decryption. KeyUsage *string `type:"string" enum:"KeyUsageType"` + // The source of the CMK's key material. + // + // The default is AWS_KMS, which means AWS KMS creates the key material. When + // this parameter is set to EXTERNAL, the request creates a CMK without key + // material so that you can import key material from your existing key management + // infrastructure. For more information about importing key material into AWS + // KMS, see Importing Key Material (http://docs.aws.amazon.com/kms/latest/developerguide/importing-keys.html) + // in the AWS Key Management Service Developer Guide. + // + // The CMK's Origin is immutable and is set when the CMK is created. + Origin *string `type:"string" enum:"OriginType"` + // The key policy to attach to the CMK. // - // If you specify a key policy, it must meet the following criteria: + // If you specify a policy and do not set BypassPolicyLockoutSafetyCheck to + // true, the policy must meet the following criteria: // // It must allow the principal making the CreateKey request to make a subsequent // PutKeyPolicy request on the CMK. This reduces the likelihood that the CMK @@ -2227,6 +2431,61 @@ func (s DeleteAliasOutput) GoString() string { return s.String() } +type DeleteImportedKeyMaterialInput struct { + _ struct{} `type:"structure"` + + // The identifier of the CMK whose key material to delete. The CMK's Origin + // must be EXTERNAL. + // + // A valid identifier is the unique key ID or the Amazon Resource Name (ARN) + // of the CMK. Examples: + // + // Unique key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + KeyId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteImportedKeyMaterialInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteImportedKeyMaterialInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteImportedKeyMaterialInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteImportedKeyMaterialInput"} + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + if s.KeyId != nil && len(*s.KeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type DeleteImportedKeyMaterialOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteImportedKeyMaterialOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteImportedKeyMaterialOutput) GoString() string { + return s.String() +} + type DescribeKeyInput struct { _ struct{} `type:"structure"` @@ -2955,6 +3214,97 @@ func (s GetKeyRotationStatusOutput) GoString() string { return s.String() } +type GetParametersForImportInput struct { + _ struct{} `type:"structure"` + + // The identifier of the CMK into which you will import key material. The CMK's + // Origin must be EXTERNAL. + // + // A valid identifier is the unique key ID or the Amazon Resource Name (ARN) + // of the CMK. Examples: + // + // Unique key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + KeyId *string `min:"1" type:"string" required:"true"` + + // The algorithm you will use to encrypt the key material before importing it + // with ImportKeyMaterial. For more information, see Encrypt the Key Material + // (http://docs.aws.amazon.com/kms/latest/developerguide/importing-keys-encrypt-key-material.html) + // in the AWS Key Management Service Developer Guide. + WrappingAlgorithm *string `type:"string" required:"true" enum:"AlgorithmSpec"` + + // The type of wrapping key (public key) to return in the response. Only 2048-bit + // RSA public keys are supported. + WrappingKeySpec *string `type:"string" required:"true" enum:"WrappingKeySpec"` +} + +// String returns the string representation +func (s GetParametersForImportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetParametersForImportInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetParametersForImportInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetParametersForImportInput"} + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + if s.KeyId != nil && len(*s.KeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) + } + if s.WrappingAlgorithm == nil { + invalidParams.Add(request.NewErrParamRequired("WrappingAlgorithm")) + } + if s.WrappingKeySpec == nil { + invalidParams.Add(request.NewErrParamRequired("WrappingKeySpec")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type GetParametersForImportOutput struct { + _ struct{} `type:"structure"` + + // The import token to send in a subsequent ImportKeyMaterial request. + // + // ImportToken is automatically base64 encoded/decoded by the SDK. + ImportToken []byte `min:"1" type:"blob"` + + // The identifier of the CMK to use in a subsequent ImportKeyMaterial request. + // This is the same CMK specified in the GetParametersForImport request. + KeyId *string `min:"1" type:"string"` + + // The time at which the import token and public key are no longer valid. After + // this time, you cannot use them to make an ImportKeyMaterial request and you + // must send another GetParametersForImport request to retrieve new ones. + ParametersValidTo *time.Time `type:"timestamp" timestampFormat:"unix"` + + // The public key to use to encrypt the key material before importing it with + // ImportKeyMaterial. + // + // PublicKey is automatically base64 encoded/decoded by the SDK. + PublicKey []byte `min:"1" type:"blob"` +} + +// String returns the string representation +func (s GetParametersForImportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetParametersForImportOutput) GoString() string { + return s.String() +} + // A structure for specifying the conditions under which the operations permitted // by the grant are allowed. // @@ -3034,6 +3384,101 @@ func (s GrantListEntry) GoString() string { return s.String() } +type ImportKeyMaterialInput struct { + _ struct{} `type:"structure"` + + // The encrypted key material to import. It must be encrypted with the public + // key that you received in the response to a previous GetParametersForImport + // request, using the wrapping algorithm that you specified in that request. + // + // EncryptedKeyMaterial is automatically base64 encoded/decoded by the SDK. + EncryptedKeyMaterial []byte `min:"1" type:"blob" required:"true"` + + // Specifies whether the key material expires. The default is KEY_MATERIAL_EXPIRES, + // in which case you must include the ValidTo parameter. When this parameter + // is set to KEY_MATERIAL_DOES_NOT_EXPIRE, you must omit the ValidTo parameter. + ExpirationModel *string `type:"string" enum:"ExpirationModelType"` + + // The import token that you received in the response to a previous GetParametersForImport + // request. It must be from the same response that contained the public key + // that you used to encrypt the key material. + // + // ImportToken is automatically base64 encoded/decoded by the SDK. + ImportToken []byte `min:"1" type:"blob" required:"true"` + + // The identifier of the CMK to import the key material into. The CMK's Origin + // must be EXTERNAL. + // + // A valid identifier is the unique key ID or the Amazon Resource Name (ARN) + // of the CMK. Examples: + // + // Unique key ID: 1234abcd-12ab-34cd-56ef-1234567890ab + // + // Key ARN: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab + KeyId *string `min:"1" type:"string" required:"true"` + + // The time at which the imported key material expires. When the key material + // expires, AWS KMS deletes the key material and the CMK becomes unusable. You + // must omit this parameter when the ExpirationModel parameter is set to KEY_MATERIAL_DOES_NOT_EXPIRE. + // Otherwise it is required. + ValidTo *time.Time `type:"timestamp" timestampFormat:"unix" required:"true"` +} + +// String returns the string representation +func (s ImportKeyMaterialInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportKeyMaterialInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ImportKeyMaterialInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ImportKeyMaterialInput"} + if s.EncryptedKeyMaterial == nil { + invalidParams.Add(request.NewErrParamRequired("EncryptedKeyMaterial")) + } + if s.EncryptedKeyMaterial != nil && len(s.EncryptedKeyMaterial) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EncryptedKeyMaterial", 1)) + } + if s.ImportToken == nil { + invalidParams.Add(request.NewErrParamRequired("ImportToken")) + } + if s.ImportToken != nil && len(s.ImportToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ImportToken", 1)) + } + if s.KeyId == nil { + invalidParams.Add(request.NewErrParamRequired("KeyId")) + } + if s.KeyId != nil && len(*s.KeyId) < 1 { + invalidParams.Add(request.NewErrParamMinLen("KeyId", 1)) + } + if s.ValidTo == nil { + invalidParams.Add(request.NewErrParamRequired("ValidTo")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +type ImportKeyMaterialOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ImportKeyMaterialOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ImportKeyMaterialOutput) GoString() string { + return s.String() +} + // Contains information about each entry in the key list. type KeyListEntry struct { _ struct{} `type:"structure"` @@ -3062,43 +3507,58 @@ func (s KeyListEntry) GoString() string { type KeyMetadata struct { _ struct{} `type:"structure"` - // The twelve-digit account ID of the AWS account that owns the key. + // The twelve-digit account ID of the AWS account that owns the CMK. AWSAccountId *string `type:"string"` - // The Amazon Resource Name (ARN) of the key. For examples, see AWS Key Management + // The Amazon Resource Name (ARN) of the CMK. For examples, see AWS Key Management // Service (AWS KMS) (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arn-syntax-kms) // in the Example ARNs section of the AWS General Reference. Arn *string `min:"20" type:"string"` - // The date and time when the key was created. + // The date and time when the CMK was created. CreationDate *time.Time `type:"timestamp" timestampFormat:"unix"` - // The date and time after which AWS KMS deletes the customer master key (CMK). - // This value is present only when KeyState is PendingDeletion, otherwise this - // value is null. + // The date and time after which AWS KMS deletes the CMK. This value is present + // only when KeyState is PendingDeletion, otherwise this value is omitted. DeletionDate *time.Time `type:"timestamp" timestampFormat:"unix"` - // The friendly description of the key. + // The description of the CMK. Description *string `type:"string"` - // Specifies whether the key is enabled. When KeyState is Enabled this value + // Specifies whether the CMK is enabled. When KeyState is Enabled this value // is true, otherwise it is false. Enabled *bool `type:"boolean"` - // The globally unique identifier for the key. + // Specifies whether the CMK's key material expires. This value is present only + // when Origin is EXTERNAL, otherwise this value is omitted. + ExpirationModel *string `type:"string" enum:"ExpirationModelType"` + + // The globally unique identifier for the CMK. KeyId *string `min:"1" type:"string" required:"true"` - // The state of the customer master key (CMK). + // The state of the CMK. // // For more information about how key state affects the use of a CMK, see How // Key State Affects the Use of a Customer Master Key (http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html) // in the AWS Key Management Service Developer Guide. KeyState *string `type:"string" enum:"KeyState"` - // The cryptographic operations for which you can use the key. Currently the - // only allowed value is ENCRYPT_DECRYPT, which means you can use the key for + // The cryptographic operations for which you can use the CMK. Currently the + // only allowed value is ENCRYPT_DECRYPT, which means you can use the CMK for // the Encrypt and Decrypt operations. KeyUsage *string `type:"string" enum:"KeyUsageType"` + + // The source of the CMK's key material. When this value is AWS_KMS, AWS KMS + // created the key material. When this value is EXTERNAL, the key material was + // imported from your existing key management infrastructure or the CMK lacks + // key material. + Origin *string `type:"string" enum:"OriginType"` + + // The time at which the imported key material expires. When the key material + // expires, AWS KMS deletes the key material and the CMK becomes unusable. This + // value is present only for CMKs whose Origin is EXTERNAL and whose ExpirationModel + // is KEY_MATERIAL_EXPIRES, otherwise this value is omitted. + ValidTo *time.Time `type:"timestamp" timestampFormat:"unix"` } // String returns the string representation @@ -3510,7 +3970,8 @@ type PutKeyPolicyInput struct { // The key policy to attach to the CMK. // - // The key policy must meet the following criteria: + // If you do not set BypassPolicyLockoutSafetyCheck to true, the policy must + // meet the following criteria: // // It must allow the principal making the PutKeyPolicy request to make a // subsequent PutKeyPolicy request on the CMK. This reduces the likelihood that @@ -4006,6 +4467,15 @@ func (s UpdateKeyDescriptionOutput) GoString() string { return s.String() } +const ( + // @enum AlgorithmSpec + AlgorithmSpecRsaesPkcs1V15 = "RSAES_PKCS1_V1_5" + // @enum AlgorithmSpec + AlgorithmSpecRsaesOaepSha1 = "RSAES_OAEP_SHA_1" + // @enum AlgorithmSpec + AlgorithmSpecRsaesOaepSha256 = "RSAES_OAEP_SHA_256" +) + const ( // @enum DataKeySpec DataKeySpecAes256 = "AES_256" @@ -4013,6 +4483,13 @@ const ( DataKeySpecAes128 = "AES_128" ) +const ( + // @enum ExpirationModelType + ExpirationModelTypeKeyMaterialExpires = "KEY_MATERIAL_EXPIRES" + // @enum ExpirationModelType + ExpirationModelTypeKeyMaterialDoesNotExpire = "KEY_MATERIAL_DOES_NOT_EXPIRE" +) + const ( // @enum GrantOperation GrantOperationDecrypt = "Decrypt" @@ -4041,9 +4518,23 @@ const ( KeyStateDisabled = "Disabled" // @enum KeyState KeyStatePendingDeletion = "PendingDeletion" + // @enum KeyState + KeyStatePendingImport = "PendingImport" ) const ( // @enum KeyUsageType KeyUsageTypeEncryptDecrypt = "ENCRYPT_DECRYPT" ) + +const ( + // @enum OriginType + OriginTypeAwsKms = "AWS_KMS" + // @enum OriginType + OriginTypeExternal = "EXTERNAL" +) + +const ( + // @enum WrappingKeySpec + WrappingKeySpecRsa2048 = "RSA_2048" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/rds/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/rds/waiters.go index b73b51bda..c1a643560 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/rds/waiters.go +++ b/vendor/github.com/aws/aws-sdk-go/service/rds/waiters.go @@ -48,18 +48,6 @@ func (c *RDS) WaitUntilDBInstanceAvailable(input *DescribeDBInstancesInput) erro Argument: "DBInstances[].DBInstanceStatus", Expected: "incompatible-parameters", }, - { - State: "failure", - Matcher: "pathAny", - Argument: "DBInstances[].DBInstanceStatus", - Expected: "incompatible-parameters", - }, - { - State: "failure", - Matcher: "pathAny", - Argument: "DBInstances[].DBInstanceStatus", - Expected: "incompatible-restore", - }, }, } diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go index 517292903..ccbf5cc1a 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go @@ -37,6 +37,14 @@ var accelerateOpBlacklist = operationBlacklist{ func updateEndpointForS3Config(r *request.Request) { forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle) accelerate := aws.BoolValue(r.Config.S3UseAccelerate) + useDualStack := aws.BoolValue(r.Config.UseDualStack) + + if useDualStack && accelerate { + r.Error = awserr.New("InvalidParameterException", + fmt.Sprintf("configuration aws.Config.UseDualStack is not compatible with aws.Config.Accelerate"), + nil) + return + } if accelerate && accelerateOpBlacklist.Continue(r) { if forceHostStyle { diff --git a/vendor/vendor.json b/vendor/vendor.json index 2aead827c..b5498dbdc 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -285,411 +285,411 @@ { "checksumSHA1": "QhFYdDb2z6DMbZPsDi9oCQS9nRY=", "path": "github.com/aws/aws-sdk-go", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z", + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z", "version": "v1.3.1" }, { - "checksumSHA1": "4e7X+SkJ2EfR4pNJtMlTVwIh90g=", + "checksumSHA1": "k4BkX61fhl/oX9X0lP7GFSvdz1s=", "path": "github.com/aws/aws-sdk-go/aws", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "Y9W+4GimK4Fuxq+vyIskVYFRnX4=", "path": "github.com/aws/aws-sdk-go/aws/awserr", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "dkfyy7aRNZ6BmUZ4ZdLIcMMXiPA=", "path": "github.com/aws/aws-sdk-go/aws/awsutil", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "RsYlRfQceaAgqjIrExwNsb/RBEM=", "path": "github.com/aws/aws-sdk-go/aws/client", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "ieAJ+Cvp/PKv1LpUEnUXpc3OI6E=", "path": "github.com/aws/aws-sdk-go/aws/client/metadata", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "gNWirlrTfSLbOe421hISBAhTqa4=", "path": "github.com/aws/aws-sdk-go/aws/corehandlers", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "dNZNaOPfBPnzE2CBnfhXXZ9g9jU=", "path": "github.com/aws/aws-sdk-go/aws/credentials", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "KQiUK/zr3mqnAXD7x/X55/iNme0=", "path": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "NUJUTWlc1sV8b7WjfiYc4JZbXl0=", "path": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "4Ipx+5xN0gso+cENC2MHMWmQlR4=", "path": "github.com/aws/aws-sdk-go/aws/credentials/stscreds", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { - "checksumSHA1": "dVqXFA18tta86y9KIfBqejJRI8Q=", + "checksumSHA1": "nCMd1XKjgV21bEl7J8VZFqTV8PE=", "path": "github.com/aws/aws-sdk-go/aws/defaults", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "U0SthWum+t9ACanK7SDJOg3dO6M=", "path": "github.com/aws/aws-sdk-go/aws/ec2metadata", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "NyUg1P8ZS/LHAAQAk/4C5O4X3og=", "path": "github.com/aws/aws-sdk-go/aws/request", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { - "checksumSHA1": "6rpx6vnvZFvQTIKtxCmhKctnTBU=", + "checksumSHA1": "tBdFneml1Vn7uvezcktsa+hUsGg=", "path": "github.com/aws/aws-sdk-go/aws/session", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "7lla+sckQeF18wORAGuU2fFMlp4=", "path": "github.com/aws/aws-sdk-go/aws/signer/v4", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { - "checksumSHA1": "sgft7A0lRCVD7QBogydg46lr3NM=", + "checksumSHA1": "Bm6UrYb2QCzpYseLwwgw6aetgRc=", "path": "github.com/aws/aws-sdk-go/private/endpoints", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "wk7EyvDaHwb5qqoOP/4d3cV0708=", "path": "github.com/aws/aws-sdk-go/private/protocol", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "uNmSKXAF8B9HWEciW+iyUwZ99qQ=", "path": "github.com/aws/aws-sdk-go/private/protocol/ec2query", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "L7xWYwx0jNQnzlYHwBS+1q6DcCI=", "path": "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "H9TymcQkQnXSXSVfjggiiS4bpzM=", "path": "github.com/aws/aws-sdk-go/private/protocol/jsonrpc", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "isoix7lTx4qIq2zI2xFADtti5SI=", "path": "github.com/aws/aws-sdk-go/private/protocol/query", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "5xzix1R8prUyWxgLnzUQoxTsfik=", "path": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "TW/7U+/8ormL7acf6z2rv2hDD+s=", "path": "github.com/aws/aws-sdk-go/private/protocol/rest", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "oUOTWZIpPJiGjc9p/hntdBDvS10=", "path": "github.com/aws/aws-sdk-go/private/protocol/restjson", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "Y6Db2GGfGD9LPpcJIPj8vXE8BbQ=", "path": "github.com/aws/aws-sdk-go/private/protocol/restxml", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "eUEkjyMPAuekKBE4ou+nM9tXEas=", "path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "F6mth+G7dXN1GI+nktaGo8Lx8aE=", "path": "github.com/aws/aws-sdk-go/private/signer/v2", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "Eo9yODN5U99BK0pMzoqnBm7PCrY=", "path": "github.com/aws/aws-sdk-go/private/waiter", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "j8CUd3jhZ8K+cI8fy785NmqJyzg=", "path": "github.com/aws/aws-sdk-go/service/apigateway", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "qoTWohhN8wMZvdMAbwi+B5YhQJ0=", "path": "github.com/aws/aws-sdk-go/service/applicationautoscaling", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { - "checksumSHA1": "AUA6op9dlm0X4vv1YPFnIFs6404=", + "checksumSHA1": "ygJl5okbywr9j3Cl2GTI/e8f94c=", "path": "github.com/aws/aws-sdk-go/service/autoscaling", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "vp/AYdsQnZtoPqtX86VsgmLIx1w=", "path": "github.com/aws/aws-sdk-go/service/cloudformation", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { - "checksumSHA1": "4deSd9La3EF2Cmq+tD5rcvhfTGQ=", + "checksumSHA1": "CpWQcLIxUTtkF6NBRg0QwdeSA/k=", "path": "github.com/aws/aws-sdk-go/service/cloudfront", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "eCFTaV9GKqv/UEzwRgFFUaFz098=", "path": "github.com/aws/aws-sdk-go/service/cloudtrail", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "G9CmCfw00Bjz0TtJsEnxGE6mv/0=", "path": "github.com/aws/aws-sdk-go/service/cloudwatch", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "mWNJKpt18ASs9/RhnIjILcsGlng=", "path": "github.com/aws/aws-sdk-go/service/cloudwatchevents", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "sP/qEaDICVBV3rRw2sl759YI0iw=", "path": "github.com/aws/aws-sdk-go/service/cloudwatchlogs", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "p5a/DcdUvhTx0PCRR+/CRXk9g6c=", "path": "github.com/aws/aws-sdk-go/service/codecommit", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "N8Sgq+xG2vYJdKBikM3yQuIBZfs=", "path": "github.com/aws/aws-sdk-go/service/codedeploy", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "i4hrcsFXLAQXzaxvWh6+BG8XcIU=", "path": "github.com/aws/aws-sdk-go/service/directoryservice", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "y+pZPK8hcTDwq1zHuRduWE14flw=", "path": "github.com/aws/aws-sdk-go/service/dynamodb", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { - "checksumSHA1": "Ao/Vq8RYiaW63HasBBPkNg/i7CM=", + "checksumSHA1": "lJYBdjCwoqPpmmjNyW2yYk9VGiY=", "path": "github.com/aws/aws-sdk-go/service/ec2", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { - "checksumSHA1": "IEHq+VLH1fud1oQ4MXj1nqfpgUY=", + "checksumSHA1": "RUfkmRJpf1l6rHJfh/86gtG4Was=", "path": "github.com/aws/aws-sdk-go/service/ecr", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { - "checksumSHA1": "sHPoLMWXO5tM63ipuxVXduuRypI=", + "checksumSHA1": "n6llxIMIGbjTer/33Zmz4cNKVQA=", "path": "github.com/aws/aws-sdk-go/service/ecs", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "1vOgFGxLhjNe6BK3RJaV1OqisCs=", "path": "github.com/aws/aws-sdk-go/service/efs", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "rjSScNzMTvEHv7Lk5KcxDpNU5EE=", "path": "github.com/aws/aws-sdk-go/service/elasticache", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "RZF1yHtJhAqaMwbeAM/6BdLLavk=", "path": "github.com/aws/aws-sdk-go/service/elasticbeanstalk", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "VAlXnW+WxxWRcCv4xsCoox2kgE0=", "path": "github.com/aws/aws-sdk-go/service/elasticsearchservice", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "qHuJHGUAuuizD9834MP3gVupfdo=", "path": "github.com/aws/aws-sdk-go/service/elastictranscoder", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { - "checksumSHA1": "1c9xsISLQWKSrORIpdokCCWCe2M=", + "checksumSHA1": "YiNiSOILzSOaKB4JwdM4SDw7daM=", "path": "github.com/aws/aws-sdk-go/service/elb", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "MA6U/Vj0D00yihMHD6bXKyjtfeE=", "path": "github.com/aws/aws-sdk-go/service/emr", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "TtIAgZ+evpkKB5bBYCB69k0wZoU=", "path": "github.com/aws/aws-sdk-go/service/firehose", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "B1EtgBrv//gYqA+Sp6a/SK2zLO4=", "path": "github.com/aws/aws-sdk-go/service/glacier", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "kXJ9ycLAIj0PFSFbfrA/LR/hIi8=", "path": "github.com/aws/aws-sdk-go/service/iam", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "2n5/m0ClE4OyQRNdjfLwg+nSY3o=", "path": "github.com/aws/aws-sdk-go/service/kinesis", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { - "checksumSHA1": "/cFX1/Gr6M+r9232gLIV+4np7Po=", + "checksumSHA1": "IUiewu7NPRaPPGtWkXHkvVU+80c=", "path": "github.com/aws/aws-sdk-go/service/kms", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "Qpi347xz5FIQISq73dZSdIf47AU=", "path": "github.com/aws/aws-sdk-go/service/lambda", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "aLwDFgrPzIBidURxso1ujcr2pDs=", "path": "github.com/aws/aws-sdk-go/service/opsworks", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { - "checksumSHA1": "9JvmBN9zOBFAIMhBUNU81ZTdFQA=", + "checksumSHA1": "abtQbJdjxwPxvt4p/X0My6FtfZI=", "path": "github.com/aws/aws-sdk-go/service/rds", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "mgImZ/bluUOY9GpQ/oAnscIXwrA=", "path": "github.com/aws/aws-sdk-go/service/redshift", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "y6jKUvrpTJxj5uh6OqQ4FujhCHU=", "path": "github.com/aws/aws-sdk-go/service/route53", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { - "checksumSHA1": "+608jtc5uRpGqGu5ntpKhfWgwGc=", + "checksumSHA1": "imxJucuPrgaPRMPtAgsu+Y7soB4=", "path": "github.com/aws/aws-sdk-go/service/s3", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "o+bjuT6ycywUf+vXY9hYK4Z3okE=", "path": "github.com/aws/aws-sdk-go/service/ses", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "DW5kDRWLA2yAgYh9vsI+0uVqq/Q=", "path": "github.com/aws/aws-sdk-go/service/simpledb", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "+ic7vevBfganFLENR29pJaEf4Tw=", "path": "github.com/aws/aws-sdk-go/service/sns", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "oLAlquYlQzgYFS9ochS/iQ9+uXY=", "path": "github.com/aws/aws-sdk-go/service/sqs", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "checksumSHA1": "nH/itbdeFHpl4ysegdtgww9bFSA=", "path": "github.com/aws/aws-sdk-go/service/sts", - "revision": "13d7266e75bd078dd0e904106be68b2e52860fd7", - "revisionTime": "2016-08-06T21:45:34Z" + "revision": "f80e7d0182a463dff0c0da6bbed57f21369d4346", + "revisionTime": "2016-08-11T16:24:59Z" }, { "path": "github.com/bgentry/speakeasy", From 3671bae61025566071c0be2ba15725fd52e3265e Mon Sep 17 00:00:00 2001 From: Ayu Demura Date: Thu, 11 Aug 2016 13:20:15 -0400 Subject: [PATCH 0675/1238] Update packngo once again --- command/internal_plugin_list.go | 2 +- vendor/github.com/packethost/packngo/volumes.go | 4 ++-- vendor/vendor.json | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/command/internal_plugin_list.go b/command/internal_plugin_list.go index e4807b799..e3fc743c3 100644 --- a/command/internal_plugin_list.go +++ b/command/internal_plugin_list.go @@ -61,6 +61,7 @@ import ( ) var InternalProviders = map[string]plugin.ProviderFunc{ + "archive": archiveprovider.Provider, "atlas": atlasprovider.Provider, "aws": awsprovider.Provider, "azure": azureprovider.Provider, @@ -105,7 +106,6 @@ var InternalProviders = map[string]plugin.ProviderFunc{ "ultradns": ultradnsprovider.Provider, "vcd": vcdprovider.Provider, "vsphere": vsphereprovider.Provider, - "archive": archiveprovider.Provider, } var InternalProvisioners = map[string]plugin.ProvisionerFunc{ diff --git a/vendor/github.com/packethost/packngo/volumes.go b/vendor/github.com/packethost/packngo/volumes.go index c3c3e561b..96ab3b6e8 100644 --- a/vendor/github.com/packethost/packngo/volumes.go +++ b/vendor/github.com/packethost/packngo/volumes.go @@ -56,7 +56,7 @@ type VolumeCreateRequest struct { ProjectID string `json:"project_id"` PlanID string `json:"plan_id"` FacilityID string `json:"facility_id"` - Description string `json:"Description,omitempty"` + Description string `json:"description,omitempty"` SnapshotPolicies []*SnapshotPolicy `json:"snapshot_policies,omitempty"` } @@ -82,7 +82,7 @@ type VolumeServiceOp struct { // Get returns a volume by id func (v *VolumeServiceOp) Get(volumeID string) (*Volume, *Response, error) { - path := fmt.Sprintf("%s/%s?include=facility", volumeBasePath, volumeID) + path := fmt.Sprintf("%s/%s?include=facility,snapshot_policies,attachments.device", volumeBasePath, volumeID) req, err := v.client.NewRequest("GET", path, nil) if err != nil { return nil, nil, err diff --git a/vendor/vendor.json b/vendor/vendor.json index 319059693..d8c7daa5f 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -1464,10 +1464,10 @@ "revision": "3d184cea22ee1c41ec1697e0d830ff0c78f7ea97" }, { - "checksumSHA1": "bpR9eoBfEDcXrm5Q0By4cDDlXo8=", + "checksumSHA1": "MexE5QPVAwVfQcJBnMGMgD+s9L0=", "path": "github.com/packethost/packngo", - "revision": "5c74c3276242fb359a06b643437a9d8903eb76e6", - "revisionTime": "2016-07-26T11:06:53Z" + "revision": "7cd5fed006859e86dd5641a6cf9812e855b7574a", + "revisionTime": "2016-08-11T16:27:25Z" }, { "path": "github.com/pborman/uuid", From 45c5675c8e57016eb3e7b7836db7483b282dca81 Mon Sep 17 00:00:00 2001 From: Linda Xu Date: Thu, 11 Aug 2016 14:51:25 -0700 Subject: [PATCH 0676/1238] add Aurora instance failover priority feature (#8087) * add Aurora instance failover priority feature * promotion_tier move to input directly * fix format issue --- .../aws/resource_aws_rds_cluster_instance.go | 14 ++++++++++++++ .../aws/resource_aws_rds_cluster_instance_test.go | 1 + .../aws/r/rds_cluster_instance.html.markdown | 1 + 3 files changed, 16 insertions(+) diff --git a/builtin/providers/aws/resource_aws_rds_cluster_instance.go b/builtin/providers/aws/resource_aws_rds_cluster_instance.go index 72914b14d..4cc403a3f 100644 --- a/builtin/providers/aws/resource_aws_rds_cluster_instance.go +++ b/builtin/providers/aws/resource_aws_rds_cluster_instance.go @@ -109,6 +109,12 @@ func resourceAwsRDSClusterInstance() *schema.Resource { Default: 0, }, + "promotion_tier": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 0, + }, + "tags": tagsSchema(), }, } @@ -123,6 +129,7 @@ func resourceAwsRDSClusterInstanceCreate(d *schema.ResourceData, meta interface{ DBClusterIdentifier: aws.String(d.Get("cluster_identifier").(string)), Engine: aws.String("aurora"), PubliclyAccessible: aws.Bool(d.Get("publicly_accessible").(bool)), + PromotionTier: aws.Int64(int64(d.Get("promotion_tier").(int))), Tags: tags, } @@ -226,6 +233,7 @@ func resourceAwsRDSClusterInstanceRead(d *schema.ResourceData, meta interface{}) d.Set("instance_class", db.DBInstanceClass) d.Set("identifier", db.DBInstanceIdentifier) d.Set("storage_encrypted", db.StorageEncrypted) + d.Set("promotion_tier", db.PromotionTier) if db.MonitoringInterval != nil { d.Set("monitoring_interval", db.MonitoringInterval) @@ -285,6 +293,12 @@ func resourceAwsRDSClusterInstanceUpdate(d *schema.ResourceData, meta interface{ requestUpdate = true } + if d.HasChange("promotion_tier") { + d.SetPartial("promotion_tier") + req.PromotionTier = aws.Int64(int64(d.Get("promotion_tier").(int))) + requestUpdate = true + } + log.Printf("[DEBUG] Send DB Instance Modification request: %#v", requestUpdate) if requestUpdate { log.Printf("[DEBUG] DB Instance Modification request: %#v", req) diff --git a/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go b/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go index 12663a702..43d13739f 100644 --- a/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go +++ b/builtin/providers/aws/resource_aws_rds_cluster_instance_test.go @@ -222,6 +222,7 @@ resource "aws_rds_cluster_instance" "cluster_instances" { cluster_identifier = "${aws_rds_cluster.default.id}" instance_class = "db.r3.large" db_parameter_group_name = "${aws_db_parameter_group.bar.name}" + promotion_tier = "3" } resource "aws_db_parameter_group" "bar" { diff --git a/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown b/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown index 1a3dba1fd..c599bb48b 100644 --- a/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown +++ b/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown @@ -70,6 +70,7 @@ details on controlling this property. enhanced monitoring metrics to CloudWatch Logs. You can find more information on the [AWS Documentation](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.html) what IAM permissions are needed to allow Enhanced Monitoring for RDS Instances. * `monitoring_interval` - (Optional) The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0. Valid Values: 0, 1, 5, 10, 15, 30, 60. +* `promotion_tier` - (Optional) Default 0. Failover Priority setting on instance level. The reader who has lower tier has higher priority to get promoter to writer * `kms_key_id` - (Optional) The ARN for the KMS encryption key. When specifying `kms_key_id`, `storage_encrypted` needs to be set to true * `tags` - (Optional) A mapping of tags to assign to the instance. From 691bcda02608a3da9692d573081f8531bbaac3f6 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Fri, 12 Aug 2016 07:52:37 +1000 Subject: [PATCH 0677/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b55d47359..50538efbd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,7 @@ IMPROVEMENTS * provider/aws: Add ability to set canned ACL in `aws_s3_bucket_object` [GH-8091] * provider/aws: Allow skipping credentials validation, requesting Account ID and/or metadata API check [GH-7874] * provider/aws: API gateway request/response parameters can now be specified as map, original `*_in_json` parameters deprecated [GH-7794] + * provider/aws: Add support for `promotion_tier` to `aws_rds_cluster_instance` [GH-8087] * provider/azurerm: Adds support for uploading blobs to azure storage from local source [GH-7994] * provider/google: allows atomic Cloud DNS record changes [GH-6575] * provider/google: Move URLMap hosts to TypeSet from TypeList [GH-7472] From 01fb6c79a7e585c5943250f47f5f6b8ed5d19375 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Thu, 11 Aug 2016 18:03:35 -0400 Subject: [PATCH 0678/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 50538efbd..5123615a4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,6 +30,7 @@ IMPROVEMENTS * provider/vsphere: Improved SCSI controller handling in `vsphere_virtual_machine` [GH-7908] * provider/vsphere: Adding disk type of `Thick Lazy` to `vsphere_virtual_disk` and `vsphere_virtual_machine` [GH-7916] * remote/consul: Support setting datacenter when using consul remote state [GH-8102] + * provider/google: Support Import of `google_compute_target_pool` [GH-8133] BUG FIXES: * provider/aws: guard against missing image_digest in `aws_ecs_task_definition` [GH-7966] From 6523ebd9614522589cfc394888d8e23e9f11dbfd Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Thu, 11 Aug 2016 18:04:25 -0400 Subject: [PATCH 0679/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5123615a4..f47c39ea5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ IMPROVEMENTS * provider/vsphere: Adding disk type of `Thick Lazy` to `vsphere_virtual_disk` and `vsphere_virtual_machine` [GH-7916] * remote/consul: Support setting datacenter when using consul remote state [GH-8102] * provider/google: Support Import of `google_compute_target_pool` [GH-8133] + * provider/google: Support Import of 'google_compute_forwarding_rule' [GH-8122] BUG FIXES: * provider/aws: guard against missing image_digest in `aws_ecs_task_definition` [GH-7966] From 82e2bd504b24c5c43a9174ee95c820b3f1c6cd9b Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Thu, 11 Aug 2016 18:06:07 -0400 Subject: [PATCH 0680/1238] Update CHANGELOG.md --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f47c39ea5..8faafaa5a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,7 +31,8 @@ IMPROVEMENTS * provider/vsphere: Adding disk type of `Thick Lazy` to `vsphere_virtual_disk` and `vsphere_virtual_machine` [GH-7916] * remote/consul: Support setting datacenter when using consul remote state [GH-8102] * provider/google: Support Import of `google_compute_target_pool` [GH-8133] - * provider/google: Support Import of 'google_compute_forwarding_rule' [GH-8122] + * provider/google: Support Import of `google_compute_forwarding_rule` [GH-8122] + * provider/google: Support Import of `google_resource_http_health_check` [GH-8121] BUG FIXES: * provider/aws: guard against missing image_digest in `aws_ecs_task_definition` [GH-7966] From 45219dbb4f5982f0aa1575c2d2d61316617e51e7 Mon Sep 17 00:00:00 2001 From: Lars Wander Date: Thu, 11 Aug 2016 18:07:59 -0400 Subject: [PATCH 0681/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8faafaa5a..b71220bbd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -33,6 +33,7 @@ IMPROVEMENTS * provider/google: Support Import of `google_compute_target_pool` [GH-8133] * provider/google: Support Import of `google_compute_forwarding_rule` [GH-8122] * provider/google: Support Import of `google_resource_http_health_check` [GH-8121] + * provider/google: Support Import of `google_compute_autoscaler` [GH-8115] BUG FIXES: * provider/aws: guard against missing image_digest in `aws_ecs_task_definition` [GH-7966] From c072c0dfbb0a002eca9292e693811dd7cd0b0e2a Mon Sep 17 00:00:00 2001 From: Max Englander Date: Thu, 11 Aug 2016 22:22:41 -0400 Subject: [PATCH 0682/1238] #7013 add tls config support to consul provider (#7015) * #7013 add tls config support to consul provider * #7013 add acceptance tests * #7013 use GFM tables * #7013 require one of {CONSUL_ADDRESS,CONSUL_HTTP_ADDR} when running consul acc tests --- builtin/providers/consul/config.go | 18 +- builtin/providers/consul/resource_provider.go | 27 ++- .../consul/resource_provider_test.go | 40 +++-- .../providers/consul/test-fixtures/README.md | 41 +++++ .../consul/test-fixtures/agent.json.example | 11 ++ .../consul/test-fixtures/agentcert.pem | 27 +++ .../consul/test-fixtures/agentkey.pem | 27 +++ .../providers/consul/test-fixtures/cacert.pem | 22 +++ .../consul/test-fixtures/usercert.pem | 25 +++ .../consul/test-fixtures/userkey.pem | 27 +++ .../github.com/hashicorp/consul/api/agent.go | 107 +++++++++--- vendor/github.com/hashicorp/consul/api/api.go | 113 ++++++++++++- .../hashicorp/consul/api/catalog.go | 15 +- .../github.com/hashicorp/consul/api/health.go | 22 ++- vendor/github.com/hashicorp/consul/api/kv.go | 156 ++++++++++++++++++ vendor/vendor.json | 4 +- .../docs/providers/consul/index.html.markdown | 4 + 17 files changed, 633 insertions(+), 53 deletions(-) create mode 100644 builtin/providers/consul/test-fixtures/README.md create mode 100644 builtin/providers/consul/test-fixtures/agent.json.example create mode 100644 builtin/providers/consul/test-fixtures/agentcert.pem create mode 100644 builtin/providers/consul/test-fixtures/agentkey.pem create mode 100644 builtin/providers/consul/test-fixtures/cacert.pem create mode 100644 builtin/providers/consul/test-fixtures/usercert.pem create mode 100644 builtin/providers/consul/test-fixtures/userkey.pem diff --git a/builtin/providers/consul/config.go b/builtin/providers/consul/config.go index cb6d7af79..c048b5dda 100644 --- a/builtin/providers/consul/config.go +++ b/builtin/providers/consul/config.go @@ -2,6 +2,7 @@ package consul import ( "log" + "net/http" consulapi "github.com/hashicorp/consul/api" ) @@ -9,8 +10,11 @@ import ( type Config struct { Datacenter string `mapstructure:"datacenter"` Address string `mapstructure:"address"` - Token string `mapstructure:"token"` Scheme string `mapstructure:"scheme"` + Token string `mapstructure:"token"` + CAFile string `mapstructure:"ca_file"` + CertFile string `mapstructure:"cert_file"` + KeyFile string `mapstructure:"key_file"` } // Client() returns a new client for accessing consul. @@ -26,9 +30,21 @@ func (c *Config) Client() (*consulapi.Client, error) { if c.Scheme != "" { config.Scheme = c.Scheme } + + tlsConfig := &consulapi.TLSConfig{} + tlsConfig.CAFile = c.CAFile + tlsConfig.CertFile = c.CertFile + tlsConfig.KeyFile = c.KeyFile + cc, err := consulapi.SetupTLSConfig(tlsConfig) + if err != nil { + return nil, err + } + config.HttpClient.Transport.(*http.Transport).TLSClientConfig = cc + if c.Token != "" { config.Token = c.Token } + client, err := consulapi.NewClient(config) log.Printf("[INFO] Consul Client configured with address: '%s', scheme: '%s', datacenter: '%s'", diff --git a/builtin/providers/consul/resource_provider.go b/builtin/providers/consul/resource_provider.go index 9b15ecdab..c6e3c5b8a 100644 --- a/builtin/providers/consul/resource_provider.go +++ b/builtin/providers/consul/resource_provider.go @@ -20,11 +20,34 @@ func Provider() terraform.ResourceProvider { "address": &schema.Schema{ Type: schema.TypeString, Optional: true, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "CONSUL_ADDRESS", + "CONSUL_HTTP_ADDR", + }, nil), }, "scheme": &schema.Schema{ - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("CONSUL_SCHEME", nil), + }, + + "ca_file": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("CONSUL_CA_FILE", nil), + }, + + "cert_file": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("CONSUL_CERT_FILE", nil), + }, + + "key_file": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("CONSUL_KEY_FILE", nil), }, "token": &schema.Schema{ diff --git a/builtin/providers/consul/resource_provider_test.go b/builtin/providers/consul/resource_provider_test.go index eb7f73ba0..df8fe1b85 100644 --- a/builtin/providers/consul/resource_provider_test.go +++ b/builtin/providers/consul/resource_provider_test.go @@ -4,7 +4,6 @@ import ( "os" "testing" - consulapi "github.com/hashicorp/consul/api" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/terraform" @@ -18,12 +17,6 @@ func init() { testAccProviders = map[string]terraform.ResourceProvider{ "consul": testAccProvider, } - - // Use the demo address for the acceptance tests - testAccProvider.ConfigureFunc = func(d *schema.ResourceData) (interface{}, error) { - conf := consulapi.DefaultConfig() - return consulapi.NewClient(conf) - } } func TestResourceProvider(t *testing.T) { @@ -56,8 +49,35 @@ func TestResourceProvider_Configure(t *testing.T) { } } -func testAccPreCheck(t *testing.T) { - if v := os.Getenv("CONSUL_HTTP_ADDR"); v == "" { - t.Fatal("CONSUL_HTTP_ADDR must be set for acceptance tests") +func TestResourceProvider_ConfigureTLS(t *testing.T) { + rp := Provider() + + raw := map[string]interface{}{ + "address": "demo.consul.io:80", + "ca_file": "test-fixtures/cacert.pem", + "cert_file": "test-fixtures/usercert.pem", + "datacenter": "nyc3", + "key_file": "test-fixtures/userkey.pem", + "scheme": "https", + } + + rawConfig, err := config.NewRawConfig(raw) + if err != nil { + t.Fatalf("err: %s", err) + } + + err = rp.Configure(terraform.NewResourceConfig(rawConfig)) + if err != nil { + t.Fatalf("err: %s", err) } } + +func testAccPreCheck(t *testing.T) { + if v := os.Getenv("CONSUL_HTTP_ADDR"); v != "" { + return + } + if v := os.Getenv("CONSUL_ADDRESS"); v != "" { + return + } + t.Fatal("Either CONSUL_ADDRESS or CONSUL_HTTP_ADDR must be set for acceptance tests") +} diff --git a/builtin/providers/consul/test-fixtures/README.md b/builtin/providers/consul/test-fixtures/README.md new file mode 100644 index 000000000..91cdc1248 --- /dev/null +++ b/builtin/providers/consul/test-fixtures/README.md @@ -0,0 +1,41 @@ +# Running Consul for Terraform Acceptance Tests + +## TLS + +Some of the acceptance tests for the `consul` provider use +TLS. To service these tests, a Consul server must be started +with HTTPS enabled with TLS certificates. + +### Test fixtures + +File | Description +--- | --- +`agent.json.example` | Configures the Consul agent to respond to HTTPS requests, and verifies the authenticity of HTTPS requests +`agentcert.pem` | A PEM-encoded certificate used by the Consul agent, valid only for 127.0.0.1 signed by `cacert.pem`, expires 2026 +`agentkey.pem` | A PEM-encoded private key used by the Consul agent +`cacert.pem` | A PEM-encoded Certificate Authority, expires 2036 +`usercert.pem` | A PEM-encoded certificate used by the Terraform acceptance tests, signed by `cacert.pem`, expires 2026 +`userkey.pem` | A PEM-encoded private key used by the Terraform acceptance tests + +### Start + +Start a Consul server configured to serve HTTP traffic, and validate incoming +HTTPS requests. + + ~/.go/src/github.com/hashicorp/terraform> consul agent \ + -bind 127.0.0.1 \ + -data-dir=/tmp \ + -dev \ + -config-file=builtin/providers/consul/text-fixtures/agent.json.example \ + -server + +### Test + +With TLS, `CONSUL_HTTP_ADDR` must match the Common Name of the agent certificate. + + ~/.go/src/github.com/hashicorp/terraform> CONSUL_CERT_FILE=test-fixtures/usercert.pem \ + CONSUL_KEY_FILE=test-fixtures/userkey.pem \ + CONSUL_CA_FILE=test-fixtures/cacert.pem \ + CONSUL_SCHEME=https \ + CONSUL_HTTP_ADDR=127.0.0.1:8943 \ + make testacc TEST=./builtin/providers/consul/ diff --git a/builtin/providers/consul/test-fixtures/agent.json.example b/builtin/providers/consul/test-fixtures/agent.json.example new file mode 100644 index 000000000..aefe437a7 --- /dev/null +++ b/builtin/providers/consul/test-fixtures/agent.json.example @@ -0,0 +1,11 @@ +{ + "ca_file": "./cacert.pem", + "cert_file": "./agentcert.pem", + "datacenter": "dc1", + "domain": "hashicorp.test", + "key_file": "./agentkey.pem", + "ports": { + "https": 8943 + }, + "verify_incoming": true +} diff --git a/builtin/providers/consul/test-fixtures/agentcert.pem b/builtin/providers/consul/test-fixtures/agentcert.pem new file mode 100644 index 000000000..b1b904e95 --- /dev/null +++ b/builtin/providers/consul/test-fixtures/agentcert.pem @@ -0,0 +1,27 @@ +-----BEGIN CERTIFICATE----- +MIIEjjCCA3agAwIBAgICEAMwDQYJKoZIhvcNAQELBQAwZjELMAkGA1UEBhMCVVMx +EzARBgNVBAgMCkNhbGlmb3JuaWExEjAQBgNVBAoMCUhhc2hpQ29ycDESMBAGA1UE +CwwJSGFzaGlDb3JwMRowGAYDVQQDDBFIYXNoaUNvcnAgVGVzdCBDQTAgFw0xNjA4 +MTEwNTEwMDRaGA8yMDY2MDczMDA1MTAwNFowXjELMAkGA1UEBhMCVVMxEzARBgNV +BAgMCkNhbGlmb3JuaWExEjAQBgNVBAoMCUhhc2hpQ29ycDESMBAGA1UECwwJSGFz +aGlDb3JwMRIwEAYDVQQDDAkxMjcuMC4wLjEwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQDQiR2zwLdfCd95LIrKekZlDKo3YF5nMZFzR3OR1Mc8jCAaYLz/ +ZFr8hVSAsygwZ+tHzoHP0U3FxeYemPtjLAPE077C6h+v6pHiTLxOkd22GtyalgMZ +E4ACGSogqDUvwssxxDUsG2ItzhVCB0GXTlfo/6XhApyRqvnEto+ZJ+zk6MiHnAmc +eN9sx0c5K097+Nq7PZgtk6HOxbKSvMWEkTtHrOBrhc9lTfwVSiWHdZ2X0wpOL1Ra +pFnBMDxnWyH2ivVMknarzKz2pBBDJwTGvsJcC1ymprqU+SRyjs75BfNv2BKJrhb4 +vBj3YEGMBEhHKtnObniGqV8W4o9jBIwocFpfAgMBAAGjggFKMIIBRjAPBgNVHREE +CDAGhwR/AAABMAkGA1UdEwQCMAAwEQYJYIZIAYb4QgEBBAQDAgZAMDMGCWCGSAGG ++EIBDQQmFiRPcGVuU1NMIEdlbmVyYXRlZCBTZXJ2ZXIgQ2VydGlmaWNhdGUwHQYD +VR0OBBYEFNzoTM6XceaITc2lVIDrXaYBKJolMIGRBgNVHSMEgYkwgYaAFANEnP7/ +5Iil24eKuYJTt/IJPfamoWqkaDBmMQswCQYDVQQGEwJVUzETMBEGA1UECAwKQ2Fs +aWZvcm5pYTESMBAGA1UECgwJSGFzaGlDb3JwMRIwEAYDVQQLDAlIYXNoaUNvcnAx +GjAYBgNVBAMMEUhhc2hpQ29ycCBUZXN0IENBggIQADAOBgNVHQ8BAf8EBAMCBaAw +HQYDVR0lBBYwFAYIKwYBBQUHAwIGCCsGAQUFBwMBMA0GCSqGSIb3DQEBCwUAA4IB +AQB4p5sWE+p+sheXYgkg/PsATRMxYDPRTw0Zvd2AKOrodqF4IlibSb4mVVh0fPtx +2VX3z/WOZb8wgXNnEUhVcijf7LgGvw/SvQGgW5mXYSCcHeb4ETFJ1yuKZj5yn5tl +vZx1Sq/fGFkjHn3mgL+gzyQlNk1Wt0p3fLsIfpMOgpntSdRq3IUvf+W+oH5BUrTl +WgaXUD3lkdx3R9h3uLX4nxJWpMPViPCpr3ADW9oEwoHHQbe3LC7iJI2Us/qIH73n +Du7mUk+/HSkajjFsxnVoFCF1+RMqf1i9w7tXaAwWYT+vaP46fq3M/Bmsv/gDc5ur +8p48hpQ61Sfj0oU38Ftzzcs+ +-----END CERTIFICATE----- diff --git a/builtin/providers/consul/test-fixtures/agentkey.pem b/builtin/providers/consul/test-fixtures/agentkey.pem new file mode 100644 index 000000000..61e19a191 --- /dev/null +++ b/builtin/providers/consul/test-fixtures/agentkey.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEA0Ikds8C3XwnfeSyKynpGZQyqN2BeZzGRc0dzkdTHPIwgGmC8 +/2Ra/IVUgLMoMGfrR86Bz9FNxcXmHpj7YywDxNO+wuofr+qR4ky8TpHdthrcmpYD +GROAAhkqIKg1L8LLMcQ1LBtiLc4VQgdBl05X6P+l4QKckar5xLaPmSfs5OjIh5wJ +nHjfbMdHOStPe/jauz2YLZOhzsWykrzFhJE7R6zga4XPZU38FUolh3Wdl9MKTi9U +WqRZwTA8Z1sh9or1TJJ2q8ys9qQQQycExr7CXAtcpqa6lPkkco7O+QXzb9gSia4W ++LwY92BBjARIRyrZzm54hqlfFuKPYwSMKHBaXwIDAQABAoIBAFQxyAg3GtIITm3C +ChdN3vYVcvQAuJy5apw8kPCkE/ziJmQAAs6qWgHyYvfDXcqNanUHb2nUe64KBKr9 +4SFdN/hT9YUEud5wuo2/pZejVPydQ8w2HPIW6WvvdQ7SWwb5gsiJC17Pf4g22GZc +P6MzQlMURIjgYQ5/FXDStI+FiyOwDhHDbLoMaIOfToWJupd+sGREccSKOVmJdGY/ +7/n1AGvfbgUToy2sMEz7HqTtgRJW/Knko2dD3ZWh7KqFS2GUYsJ3Ake1CG7xT6sA +4MWQvfR/+t7xSpDDU2WlNgFi9sQ8UjrIhaMYaiFhV6h2bTVeGl1tvBmbE77Z1Lne +jcobwKECgYEA6SuDF0wuu8WnDXnCpNrd83gONy1pEp9s7vbf/GrMXGaavQmqb6x1 +sLZTXho1srqRLXGSAvbDS/yO6wRUd/CdLB8WBda3lcH9y/II1BDynEygGpipGa83 +7Ti+D2hMSMLhX1vsUcCwz3fz61wzBqvdvrjdymmivPLu3rMINd8twl0CgYEA5PQi +jwi483icPPOc1S/5CsKnj6nJwKVQz9dDeT6QLVDCoVh5u0X9WZAAnMdrR9yZS6ax +ZAF453DPlK6Ct7vcBPEFC1W6QrRhjJrzvRb/PLnzaRMY+YoEg2qmGreb+30jrx+4 +jkTLkz4Qag+jOdR3t4104Pix1CkpQUX0chD5u+sCgYAiuI8Bxh9jaLBSimIYqFrK +qYL8Zm+yDTlscCi0brbVv5WlNq5BiN3RnaTWa3K5lZyOts22UUaNpyMlDfUCEztk +WZCu9+VIkKWZXAZChe+KpMJmk3sCzxu14HA03SQW5aYnzAlptxbdHhCdaJJUmP0h +LGgifw5zsn0tfl1noD8xJQKBgBKSSwtXJcl6CxJWoG4aihT5XSYmG5tozXlOeMao +8ID8gA0eZCFwt/A/4gzVkDowBq9AQjteczQyzmO9FBVbQ6mS81nMBmPKxe7l0seP +yfxfCQOI7QmwzFTsnbSlGB36NJ7L7+h6ZBj5e9NemVrjhSJ6cvSct7AB9rq4te9a +uScpAoGBAOIjcv2lQsJ3GWBTHWCh23jC/0XPE4bJg9DjliHQDAB/Yp49oV1giWs6 +xI0SBsovtJqJxOd6F8e6HuQTt1X1kQ8Q1Itb78Wx9Rs4bvN51pxj4L+DTxLBMl5g +xXsS+Zfm5O2HGxU5t60CsxRyF0EVNVEtgKkIiQE+ZaQ1d0WJC8RI +-----END RSA PRIVATE KEY----- diff --git a/builtin/providers/consul/test-fixtures/cacert.pem b/builtin/providers/consul/test-fixtures/cacert.pem new file mode 100644 index 000000000..21aca8283 --- /dev/null +++ b/builtin/providers/consul/test-fixtures/cacert.pem @@ -0,0 +1,22 @@ +-----BEGIN CERTIFICATE----- +MIIDrTCCApWgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwZjELMAkGA1UEBhMCVVMx +EzARBgNVBAgMCkNhbGlmb3JuaWExEjAQBgNVBAoMCUhhc2hpQ29ycDESMBAGA1UE +CwwJSGFzaGlDb3JwMRowGAYDVQQDDBFIYXNoaUNvcnAgVGVzdCBDQTAgFw0xNjA4 +MTEwNTA1MDZaGA8yMTE2MDcxODA1MDUwNlowZjELMAkGA1UEBhMCVVMxEzARBgNV +BAgMCkNhbGlmb3JuaWExEjAQBgNVBAoMCUhhc2hpQ29ycDESMBAGA1UECwwJSGFz +aGlDb3JwMRowGAYDVQQDDBFIYXNoaUNvcnAgVGVzdCBDQTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAKglYmf9Tv1u6e1ulQZNpmvUHi+D+/PBHyg9Ft60 +HiZaeBGyNPZX9uVuM1jN3o/qpwBQxhq3ojQafU3WDSU6Y0GZ1e8AcLsObeNUjma4 +eLjZy+059Vt7DKcp6LA+7JqELToK83QzqNdYuocze5v9hPt5W6Q3Dp5rsmVjOFim +6LxcN/TAcmW+ZrykOGOT4QyYFkamp4uMJkpX1UwO3djdQF7CllnOboUUYqGyOt9e +BBudhCsSvWpJa/wNcAH2AxzaIVu85Dmg3G0Erekcget5ewebsnhGs3emfWO/XQht +uwKdz60mz1vAIK3UR5eYCbxnLrXM0WfcYKFqhuQpqqONWtUCAwEAAaNjMGEwHQYD +VR0OBBYEFANEnP7/5Iil24eKuYJTt/IJPfamMB8GA1UdIwQYMBaAFANEnP7/5Iil +24eKuYJTt/IJPfamMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMA0G +CSqGSIb3DQEBCwUAA4IBAQBzzvX4GiBalxuW5YxMiuFAljNB+tW20Frz0s7bq0+Z +1+ErQIW26NUHH14RUU4vbisX09QMm4p62oJOpo/5nW1VqsyoTCQJXaolGF6UidFy +l/2bgOy8QbCOqrS0jt0MFQFDr9Z/m8dBgbjFzv8gfsnpxDQvi+iKkVSuzlIfcvoo +xlwtNnrD9lSsinP4Zo8sNqjhaRbih8zhsUdd0mUDDGczw2mY2CdMmeH0wflJMEVe +3hwR8650sCJlJfVuFUDsqy1K9T5j5NVv7i6RloeMvYOH2nwpIejE88lmjpXR6Bzw +g8geEjKOLBN8Nmak3jSvH2IewczZKSaKNSiv/4Izut/8 +-----END CERTIFICATE----- diff --git a/builtin/providers/consul/test-fixtures/usercert.pem b/builtin/providers/consul/test-fixtures/usercert.pem new file mode 100644 index 000000000..0580e364f --- /dev/null +++ b/builtin/providers/consul/test-fixtures/usercert.pem @@ -0,0 +1,25 @@ +-----BEGIN CERTIFICATE----- +MIIEJjCCAw6gAwIBAgICEAIwDQYJKoZIhvcNAQELBQAwZjELMAkGA1UEBhMCVVMx +EzARBgNVBAgMCkNhbGlmb3JuaWExEjAQBgNVBAoMCUhhc2hpQ29ycDESMBAGA1UE +CwwJSGFzaGlDb3JwMRowGAYDVQQDDBFIYXNoaUNvcnAgVGVzdCBDQTAgFw0xNjA4 +MTEwNTA1MzFaGA8yMDY2MDczMDA1MDUzMVowcjELMAkGA1UEBhMCVVMxEzARBgNV +BAgMCkNhbGlmb3JuaWExEjAQBgNVBAoMCUhhc2hpQ29ycDESMBAGA1UECwwJSGFz +aGlDb3JwMSYwJAYDVQQDDB1IYXNoaUNvcnAgVGVzdCBUZXJyYWZvcm0gVXNlcjCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANrfzsj+gIM3pvuI+hdx2W5s +hTh2YXd4Q7cByLDzXPRgI0W1BFOIxOAdHy/zqxCKQPxiibxPqDCxzPnc7mSco8e0 +zvihAysthiUmWcNdF1pIh6631SU9rE+Mis6XcW2beuh/IVloXBwI4dmSuX3Urb0D +Aw3Rb5kCJzXUTBG/g8KriR6KyNFTu0Wb/1NcrrCnNAteQmpDuuMtx75stfoMUnlr +xZfsCZXHVpe8GmVlwqr8Mw7NKmyeKgl0rH1Mef6+ce9BPnVBxdJMEYWl+UQfTSV+ +pWoNtQTZxEbhbMFhYi410EJ5s0Nw6lyUnXrQ2/YglikIvnyfWj/CwLTZwaXlgAkC +AwEAAaOBzzCBzDAJBgNVHRMEAjAAMBEGCWCGSAGG+EIBAQQEAwIFoDAzBglghkgB +hvhCAQ0EJhYkT3BlblNTTCBHZW5lcmF0ZWQgQ2xpZW50IENlcnRpZmljYXRlMB0G +A1UdDgQWBBRIgDbi1wLtW+u3PgrbVrNDkGfwGjAfBgNVHSMEGDAWgBQDRJz+/+SI +pduHirmCU7fyCT32pjAOBgNVHQ8BAf8EBAMCBeAwJwYDVR0lBCAwHgYIKwYBBQUH +AwIGCCsGAQUFBwMEBggrBgEFBQcDATANBgkqhkiG9w0BAQsFAAOCAQEAi1pDjqy1 +bN9cLknPyblWdgaO0xSXJAvBpEaFnz88wmEPOmsg1889x7jJKhrGjTpjDJMeq3kh +ziDVCpOJesJOlUsa8ejOhMbcdiqHOWSk12bQC7dBbnAAXwO1Tr583IdLhC+ej64r +J4dBk7/wLBx2Deh8wxW+6TrIFNCyVptcHw76K2AkO11fscqS0sL/WVxqi1mjA9rV +KNGDIEIzqu13jZ3t0Cxc5AZ6dGHBALGNkfhjJ9SCpuPf8CmWNYHGWeIV0N4AB2SQ +gAjRYUKY4+zU3e+lUuudgTYZIM+zark6hLUAfXTeNRk6kGHod7x/Q9NvLB4SLwlI +DAzXJ9QHZyO1vQ== +-----END CERTIFICATE----- diff --git a/builtin/providers/consul/test-fixtures/userkey.pem b/builtin/providers/consul/test-fixtures/userkey.pem new file mode 100644 index 000000000..e3171bc28 --- /dev/null +++ b/builtin/providers/consul/test-fixtures/userkey.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpgIBAAKCAQEA2t/OyP6Agzem+4j6F3HZbmyFOHZhd3hDtwHIsPNc9GAjRbUE +U4jE4B0fL/OrEIpA/GKJvE+oMLHM+dzuZJyjx7TO+KEDKy2GJSZZw10XWkiHrrfV +JT2sT4yKzpdxbZt66H8hWWhcHAjh2ZK5fdStvQMDDdFvmQInNdRMEb+DwquJHorI +0VO7RZv/U1yusKc0C15CakO64y3Hvmy1+gxSeWvFl+wJlcdWl7waZWXCqvwzDs0q +bJ4qCXSsfUx5/r5x70E+dUHF0kwRhaX5RB9NJX6lag21BNnERuFswWFiLjXQQnmz +Q3DqXJSdetDb9iCWKQi+fJ9aP8LAtNnBpeWACQIDAQABAoIBAQCRXS8zIoQrodyP +FkwzIfPseLqJ42WcOQ2QD+lATIEh9G+4rh5vdFh9GBpMeKLWW1wJw1AC90yW+p9O +G0NhIv9LdXQ4gIdgN93t8miPbdZCqgUjLwiqsSkttAPEbaRxzV915mk5vivemq+V +FvOG9Kdm7wcqODzL/DgaciMLboyNzrChltgybGKQIHAd9UFm+jE86IeeBsVcHuoL +0rEsYFKKzgdmIizjDOWPDSzVKL+dkiZ/8rYgoe1VtGV1DRWAWU5fawDFrdOxsGCh +Ob+rEmosTvONEQhB6GsdOQZ8++N6UTiJw32jqgieeP+Xj+K4XNG3nhP000DUIx/o +pRnj+KDhAoGBAPWXFbGHIMwEJCUV6OUXiY9Enb3A/Jf65f7cKbvo1i/nIbhzEv3v +LBtcUrsTmgTgNvuh3tF1RnwAUeYgedjdnALGJL16h3E0IWLAStaovoKgQyHHrtp9 +CEnOIj3PcPTFJVe+2FeV0+/kLjTHsZj9gMljzfxgswNdYfeGjXp4o1lVAoGBAOQm +06TW3smWW0FIRyyDNBwhQjD8tg3Yn0QJ+zFP6WMk5qbu/LeJnJSevBpQt1PvJ6xQ +kCj6Bi90jtLeuCW/8XLQjP46jQLU+3a74m3Nszgu9JVofiK6EPIsx62TGlwtIJfJ +U4+C5D/Piw/3qy6MjDbA1NJlSE4i2hAgGA79cDvlAoGBAN2o2sSbkOdyyWjLiKPV +BaxQowrUN2e45YONFQHsGf2sYEwJWNfm2elr/6OoAnhqIlYleGWWsuJSq5jIMRGi +myAJ1LlL8Rkkkwl9Q07RiPl/SngfsVq0RRnQOimNpIbXtWen8b3DlkFLssSihFHw +ZB/gu9cRNCFSVIzDXchvQAftAoGBAL0EzeOLgRhSUVhMoWrnaIzFoSkktU/TYF/m +RQ4dvqY9NDqpVQZaJDedKwpCRSBsytmgBU9tlSJL1ugtTTM5Srhsv+MAb0MhYRSF +pJqECS9K96ew4o+yx8dcAjJz5Sro2E/opCoJr0COmg+oiVIPbzsNl0SYVMcnaLJj +ZItGvW1hAoGBALeVUiqLgEDNQAIvprRlpJhU/ANpKm01ja9v66cgvmg62P9fcqb+ +yYPuJ2CwFDlb70KIDys6q9sTKUFykPDiKQgAFvEBQgAyOb3kdl4SXoUPbVDhMqwB +OfPznsXM6Y5LFNLzEi4n0QP4KsLc+wM52On5vnj7Mgvt5h2QlllPPTXy +-----END RSA PRIVATE KEY----- diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go index e4466a651..3df013cc5 100644 --- a/vendor/github.com/hashicorp/consul/api/agent.go +++ b/vendor/github.com/hashicorp/consul/api/agent.go @@ -18,11 +18,12 @@ type AgentCheck struct { // AgentService represents a service known to the agent type AgentService struct { - ID string - Service string - Tags []string - Port int - Address string + ID string + Service string + Tags []string + Port int + Address string + EnableTagOverride bool } // AgentMember represents a cluster member known to the agent @@ -42,13 +43,14 @@ type AgentMember struct { // AgentServiceRegistration is used to register a new service type AgentServiceRegistration struct { - ID string `json:",omitempty"` - Name string `json:",omitempty"` - Tags []string `json:",omitempty"` - Port int `json:",omitempty"` - Address string `json:",omitempty"` - Check *AgentServiceCheck - Checks AgentServiceChecks + ID string `json:",omitempty"` + Name string `json:",omitempty"` + Tags []string `json:",omitempty"` + Port int `json:",omitempty"` + Address string `json:",omitempty"` + EnableTagOverride bool `json:",omitempty"` + Check *AgentServiceCheck + Checks AgentServiceChecks } // AgentCheckRegistration is used to register a new check @@ -196,23 +198,43 @@ func (a *Agent) ServiceDeregister(serviceID string) error { return nil } -// PassTTL is used to set a TTL check to the passing state +// PassTTL is used to set a TTL check to the passing state. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 or changed to use +// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. func (a *Agent) PassTTL(checkID, note string) error { - return a.UpdateTTL(checkID, note, "pass") + return a.updateTTL(checkID, note, "pass") } -// WarnTTL is used to set a TTL check to the warning state +// WarnTTL is used to set a TTL check to the warning state. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 or changed to use +// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. func (a *Agent) WarnTTL(checkID, note string) error { - return a.UpdateTTL(checkID, note, "warn") + return a.updateTTL(checkID, note, "warn") } -// FailTTL is used to set a TTL check to the failing state +// FailTTL is used to set a TTL check to the failing state. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 or changed to use +// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9. func (a *Agent) FailTTL(checkID, note string) error { - return a.UpdateTTL(checkID, note, "fail") + return a.updateTTL(checkID, note, "fail") } -// UpdateTTL is used to update the TTL of a check -func (a *Agent) UpdateTTL(checkID, note, status string) error { +// updateTTL is used to update the TTL of a check. This is the internal +// method that uses the old API that's present in Consul versions prior to +// 0.6.4. Since Consul didn't have an analogous "update" API before it seemed +// ok to break this (former) UpdateTTL in favor of the new UpdateTTL below, +// but keep the old Pass/Warn/Fail methods using the old API under the hood. +// +// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL(). +// The client interface will be removed in 0.8 and the server endpoints will +// be removed in 0.9. +func (a *Agent) updateTTL(checkID, note, status string) error { switch status { case "pass": case "warn": @@ -231,6 +253,51 @@ func (a *Agent) UpdateTTL(checkID, note, status string) error { return nil } +// checkUpdate is the payload for a PUT for a check update. +type checkUpdate struct { + // Status is one of the api.Health* states: HealthPassing + // ("passing"), HealthWarning ("warning"), or HealthCritical + // ("critical"). + Status string + + // Output is the information to post to the UI for operators as the + // output of the process that decided to hit the TTL check. This is + // different from the note field that's associated with the check + // itself. + Output string +} + +// UpdateTTL is used to update the TTL of a check. This uses the newer API +// that was introduced in Consul 0.6.4 and later. We translate the old status +// strings for compatibility (though a newer version of Consul will still be +// required to use this API). +func (a *Agent) UpdateTTL(checkID, output, status string) error { + switch status { + case "pass", HealthPassing: + status = HealthPassing + case "warn", HealthWarning: + status = HealthWarning + case "fail", HealthCritical: + status = HealthCritical + default: + return fmt.Errorf("Invalid status: %s", status) + } + + endpoint := fmt.Sprintf("/v1/agent/check/update/%s", checkID) + r := a.c.newRequest("PUT", endpoint) + r.obj = &checkUpdate{ + Status: status, + Output: output, + } + + _, resp, err := requireOK(a.c.doRequest(r)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + // CheckRegister is used to register a new check with // the local agent func (a *Agent) CheckRegister(check *AgentCheckRegistration) error { diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go index 0a2a76e5d..590b858e1 100644 --- a/vendor/github.com/hashicorp/consul/api/api.go +++ b/vendor/github.com/hashicorp/consul/api/api.go @@ -3,9 +3,11 @@ package api import ( "bytes" "crypto/tls" + "crypto/x509" "encoding/json" "fmt" "io" + "io/ioutil" "log" "net" "net/http" @@ -122,12 +124,58 @@ type Config struct { Token string } -// DefaultConfig returns a default configuration for the client +// TLSConfig is used to generate a TLSClientConfig that's useful for talking to +// Consul using TLS. +type TLSConfig struct { + // Address is the optional address of the Consul server. The port, if any + // will be removed from here and this will be set to the ServerName of the + // resulting config. + Address string + + // CAFile is the optional path to the CA certificate used for Consul + // communication, defaults to the system bundle if not specified. + CAFile string + + // CertFile is the optional path to the certificate for Consul + // communication. If this is set then you need to also set KeyFile. + CertFile string + + // KeyFile is the optional path to the private key for Consul communication. + // If this is set then you need to also set CertFile. + KeyFile string + + // InsecureSkipVerify if set to true will disable TLS host verification. + InsecureSkipVerify bool +} + +// DefaultConfig returns a default configuration for the client. By default this +// will pool and reuse idle connections to Consul. If you have a long-lived +// client object, this is the desired behavior and should make the most efficient +// use of the connections to Consul. If you don't reuse a client object , which +// is not recommended, then you may notice idle connections building up over +// time. To avoid this, use the DefaultNonPooledConfig() instead. func DefaultConfig() *Config { + return defaultConfig(cleanhttp.DefaultPooledTransport) +} + +// DefaultNonPooledConfig returns a default configuration for the client which +// does not pool connections. This isn't a recommended configuration because it +// will reconnect to Consul on every request, but this is useful to avoid the +// accumulation of idle connections if you make many client objects during the +// lifetime of your application. +func DefaultNonPooledConfig() *Config { + return defaultConfig(cleanhttp.DefaultTransport) +} + +// defaultConfig returns the default configuration for the client, using the +// given function to make the transport. +func defaultConfig(transportFn func() *http.Transport) *Config { config := &Config{ - Address: "127.0.0.1:8500", - Scheme: "http", - HttpClient: cleanhttp.DefaultClient(), + Address: "127.0.0.1:8500", + Scheme: "http", + HttpClient: &http.Client{ + Transport: transportFn(), + }, } if addr := os.Getenv("CONSUL_HTTP_ADDR"); addr != "" { @@ -172,10 +220,19 @@ func DefaultConfig() *Config { } if !doVerify { - transport := cleanhttp.DefaultTransport() - transport.TLSClientConfig = &tls.Config{ + tlsClientConfig, err := SetupTLSConfig(&TLSConfig{ InsecureSkipVerify: true, + }) + + // We don't expect this to fail given that we aren't + // parsing any of the input, but we panic just in case + // since this doesn't have an error return. + if err != nil { + panic(err) } + + transport := transportFn() + transport.TLSClientConfig = tlsClientConfig config.HttpClient.Transport = transport } } @@ -183,6 +240,50 @@ func DefaultConfig() *Config { return config } +// TLSConfig is used to generate a TLSClientConfig that's useful for talking to +// Consul using TLS. +func SetupTLSConfig(tlsConfig *TLSConfig) (*tls.Config, error) { + tlsClientConfig := &tls.Config{ + InsecureSkipVerify: tlsConfig.InsecureSkipVerify, + } + + if tlsConfig.Address != "" { + server := tlsConfig.Address + hasPort := strings.LastIndex(server, ":") > strings.LastIndex(server, "]") + if hasPort { + var err error + server, _, err = net.SplitHostPort(server) + if err != nil { + return nil, err + } + } + tlsClientConfig.ServerName = server + } + + if tlsConfig.CertFile != "" && tlsConfig.KeyFile != "" { + tlsCert, err := tls.LoadX509KeyPair(tlsConfig.CertFile, tlsConfig.KeyFile) + if err != nil { + return nil, err + } + tlsClientConfig.Certificates = []tls.Certificate{tlsCert} + } + + if tlsConfig.CAFile != "" { + data, err := ioutil.ReadFile(tlsConfig.CAFile) + if err != nil { + return nil, fmt.Errorf("failed to read CA file: %v", err) + } + + caPool := x509.NewCertPool() + if !caPool.AppendCertsFromPEM(data) { + return nil, fmt.Errorf("failed to parse CA certificate") + } + tlsClientConfig.RootCAs = caPool + } + + return tlsClientConfig, nil +} + // Client provides a client to the Consul API type Client struct { config Config diff --git a/vendor/github.com/hashicorp/consul/api/catalog.go b/vendor/github.com/hashicorp/consul/api/catalog.go index cf64bd909..52a00b304 100644 --- a/vendor/github.com/hashicorp/consul/api/catalog.go +++ b/vendor/github.com/hashicorp/consul/api/catalog.go @@ -6,13 +6,14 @@ type Node struct { } type CatalogService struct { - Node string - Address string - ServiceID string - ServiceName string - ServiceAddress string - ServiceTags []string - ServicePort int + Node string + Address string + ServiceID string + ServiceName string + ServiceAddress string + ServiceTags []string + ServicePort int + ServiceEnableTagOverride bool } type CatalogNode struct { diff --git a/vendor/github.com/hashicorp/consul/api/health.go b/vendor/github.com/hashicorp/consul/api/health.go index 1a273e087..5bb403f55 100644 --- a/vendor/github.com/hashicorp/consul/api/health.go +++ b/vendor/github.com/hashicorp/consul/api/health.go @@ -4,6 +4,16 @@ import ( "fmt" ) +const ( + // HealthAny is special, and is used as a wild card, + // not as a specific state. + HealthAny = "any" + HealthUnknown = "unknown" + HealthPassing = "passing" + HealthWarning = "warning" + HealthCritical = "critical" +) + // HealthCheck is used to represent a single check type HealthCheck struct { Node string @@ -85,7 +95,7 @@ func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) r.params.Set("tag", tag) } if passingOnly { - r.params.Set("passing", "1") + r.params.Set(HealthPassing, "1") } rtt, resp, err := requireOK(h.c.doRequest(r)) if err != nil { @@ -108,11 +118,11 @@ func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) // The wildcard "any" state can also be used for all checks. func (h *Health) State(state string, q *QueryOptions) ([]*HealthCheck, *QueryMeta, error) { switch state { - case "any": - case "warning": - case "critical": - case "passing": - case "unknown": + case HealthAny: + case HealthWarning: + case HealthCritical: + case HealthPassing: + case HealthUnknown: default: return nil, nil, fmt.Errorf("Unsupported state: %v", state) } diff --git a/vendor/github.com/hashicorp/consul/api/kv.go b/vendor/github.com/hashicorp/consul/api/kv.go index 688b3a09d..3dac2583c 100644 --- a/vendor/github.com/hashicorp/consul/api/kv.go +++ b/vendor/github.com/hashicorp/consul/api/kv.go @@ -23,6 +23,43 @@ type KVPair struct { // KVPairs is a list of KVPair objects type KVPairs []*KVPair +// KVOp constants give possible operations available in a KVTxn. +type KVOp string + +const ( + KVSet KVOp = "set" + KVDelete = "delete" + KVDeleteCAS = "delete-cas" + KVDeleteTree = "delete-tree" + KVCAS = "cas" + KVLock = "lock" + KVUnlock = "unlock" + KVGet = "get" + KVGetTree = "get-tree" + KVCheckSession = "check-session" + KVCheckIndex = "check-index" +) + +// KVTxnOp defines a single operation inside a transaction. +type KVTxnOp struct { + Verb string + Key string + Value []byte + Flags uint64 + Index uint64 + Session string +} + +// KVTxnOps defines a set of operations to be performed inside a single +// transaction. +type KVTxnOps []*KVTxnOp + +// KVTxnResponse has the outcome of a transaction. +type KVTxnResponse struct { + Results []*KVPair + Errors TxnErrors +} + // KV is used to manipulate the K/V API type KV struct { c *Client @@ -238,3 +275,122 @@ func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOption res := strings.Contains(string(buf.Bytes()), "true") return res, qm, nil } + +// TxnOp is the internal format we send to Consul. It's not specific to KV, +// though currently only KV operations are supported. +type TxnOp struct { + KV *KVTxnOp +} + +// TxnOps is a list of transaction operations. +type TxnOps []*TxnOp + +// TxnResult is the internal format we receive from Consul. +type TxnResult struct { + KV *KVPair +} + +// TxnResults is a list of TxnResult objects. +type TxnResults []*TxnResult + +// TxnError is used to return information about an operation in a transaction. +type TxnError struct { + OpIndex int + What string +} + +// TxnErrors is a list of TxnError objects. +type TxnErrors []*TxnError + +// TxnResponse is the internal format we receive from Consul. +type TxnResponse struct { + Results TxnResults + Errors TxnErrors +} + +// Txn is used to apply multiple KV operations in a single, atomic transaction. +// +// Note that Go will perform the required base64 encoding on the values +// automatically because the type is a byte slice. Transactions are defined as a +// list of operations to perform, using the KVOp constants and KVTxnOp structure +// to define operations. If any operation fails, none of the changes are applied +// to the state store. Note that this hides the internal raw transaction interface +// and munges the input and output types into KV-specific ones for ease of use. +// If there are more non-KV operations in the future we may break out a new +// transaction API client, but it will be easy to keep this KV-specific variant +// supported. +// +// Even though this is generally a write operation, we take a QueryOptions input +// and return a QueryMeta output. If the transaction contains only read ops, then +// Consul will fast-path it to a different endpoint internally which supports +// consistency controls, but not blocking. If there are write operations then +// the request will always be routed through raft and any consistency settings +// will be ignored. +// +// Here's an example: +// +// ops := KVTxnOps{ +// &KVTxnOp{ +// Verb: KVLock, +// Key: "test/lock", +// Session: "adf4238a-882b-9ddc-4a9d-5b6758e4159e", +// Value: []byte("hello"), +// }, +// &KVTxnOp{ +// Verb: KVGet, +// Key: "another/key", +// }, +// } +// ok, response, _, err := kv.Txn(&ops, nil) +// +// If there is a problem making the transaction request then an error will be +// returned. Otherwise, the ok value will be true if the transaction succeeded +// or false if it was rolled back. The response is a structured return value which +// will have the outcome of the transaction. Its Results member will have entries +// for each operation. Deleted keys will have a nil entry in the, and to save +// space, the Value of each key in the Results will be nil unless the operation +// is a KVGet. If the transaction was rolled back, the Errors member will have +// entries referencing the index of the operation that failed along with an error +// message. +func (k *KV) Txn(txn KVTxnOps, q *QueryOptions) (bool, *KVTxnResponse, *QueryMeta, error) { + r := k.c.newRequest("PUT", "/v1/txn") + r.setQueryOptions(q) + + // Convert into the internal format since this is an all-KV txn. + ops := make(TxnOps, 0, len(txn)) + for _, kvOp := range txn { + ops = append(ops, &TxnOp{KV: kvOp}) + } + r.obj = ops + rtt, resp, err := k.c.doRequest(r) + if err != nil { + return false, nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusConflict { + var txnResp TxnResponse + if err := decodeBody(resp, &txnResp); err != nil { + return false, nil, nil, err + } + + // Convert from the internal format. + kvResp := KVTxnResponse{ + Errors: txnResp.Errors, + } + for _, result := range txnResp.Results { + kvResp.Results = append(kvResp.Results, result.KV) + } + return resp.StatusCode == http.StatusOK, &kvResp, qm, nil + } + + var buf bytes.Buffer + if _, err := io.Copy(&buf, resp.Body); err != nil { + return false, nil, nil, fmt.Errorf("Failed to read response: %v", err) + } + return false, nil, nil, fmt.Errorf("Failed request: %s", buf.String()) +} diff --git a/vendor/vendor.json b/vendor/vendor.json index b5498dbdc..9b21d0098 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -890,9 +890,11 @@ "revisionTime": "2016-07-26T16:33:11Z" }, { + "checksumSHA1": "glOabn8rkJvz7tjz/xfX4lmt070=", "comment": "v0.6.3-28-g3215b87", "path": "github.com/hashicorp/consul/api", - "revision": "3215b8727f44c778dd7045dcfd5ac42735c581a9" + "revision": "d4a8a43d2b600e662a50a75be70daed5fad8dd2d", + "revisionTime": "2016-06-04T06:35:46Z" }, { "path": "github.com/hashicorp/errwrap", diff --git a/website/source/docs/providers/consul/index.html.markdown b/website/source/docs/providers/consul/index.html.markdown index 793b1fc30..1d39f95a3 100644 --- a/website/source/docs/providers/consul/index.html.markdown +++ b/website/source/docs/providers/consul/index.html.markdown @@ -46,4 +46,8 @@ The following arguments are supported: * `address` - (Optional) The HTTP(S) API address of the agent to use. Defaults to "127.0.0.1:8500". * `scheme` - (Optional) The URL scheme of the agent to use ("http" or "https"). Defaults to "http". * `datacenter` - (Optional) The datacenter to use. Defaults to that of the agent. +* `token` - (Optional) The ACL token to use by default when making requests to the agent. +* `ca_file` - (Optional) A path to a PEM-encoded certificate authority used to verify the remote agent's certificate. +* `cert_file` - (Optional) A path to a PEM-encoded certificate provided to the remote agent; requires use of `key_file`. +* `key_file`- (Optional) A path to a PEM-encoded private key, required if `cert_file` is specified. From f8e4166beee9c46d49f93157e91676fc00f9e2c7 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Fri, 12 Aug 2016 12:24:21 +1000 Subject: [PATCH 0683/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b71220bbd..09e02e2a7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,6 +29,7 @@ IMPROVEMENTS * provider/openstack: Support pdating the External Gateway assigned to a Neutron router [GH-8070] * provider/vsphere: Improved SCSI controller handling in `vsphere_virtual_machine` [GH-7908] * provider/vsphere: Adding disk type of `Thick Lazy` to `vsphere_virtual_disk` and `vsphere_virtual_machine` [GH-7916] + * provider/consul: add tls config support to consul provider [GH-7015] * remote/consul: Support setting datacenter when using consul remote state [GH-8102] * provider/google: Support Import of `google_compute_target_pool` [GH-8133] * provider/google: Support Import of `google_compute_forwarding_rule` [GH-8122] From 6ff3df8552e1e5f8eb270f21b50fc6d318060759 Mon Sep 17 00:00:00 2001 From: KOJIMA Kazunori Date: Fri, 12 Aug 2016 11:34:27 +0900 Subject: [PATCH 0684/1238] Fix invalid reference in iam_policy_document document page. (#8151) --- .../docs/providers/aws/d/iam_policy_document.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/docs/providers/aws/d/iam_policy_document.html.markdown b/website/source/docs/providers/aws/d/iam_policy_document.html.markdown index f2e01fe13..ac8a2de3f 100644 --- a/website/source/docs/providers/aws/d/iam_policy_document.html.markdown +++ b/website/source/docs/providers/aws/d/iam_policy_document.html.markdown @@ -60,7 +60,7 @@ data "aws_iam_policy_document" "example" { resource "aws_iam_policy" "example" { name = "example_policy" path = "/" - policy = "${data.aws_iam_policy.example.json}" + policy = "${data.aws_iam_policy_document.example.json}" } ``` From 93b2c715447d0e52f27c1b4ed8df3c1e86762f84 Mon Sep 17 00:00:00 2001 From: Evan Brown Date: Thu, 11 Aug 2016 19:35:33 -0700 Subject: [PATCH 0685/1238] providers/google: Add google_compute_image resource (#7960) * providers/google: Add google_compute_image resource This change introduces the google_compute_image resource, which allows Terraform users to create a bootable VM image from a raw disk tarball stored in Google Cloud Storage. The google_compute_image resource may be referenced as a boot image for a google_compute_instance. * providers/google: Support family property in google_compute_image * provider/google: Idiomatic checking for presence of config val * vendor: Update Google client libraries --- builtin/providers/google/provider.go | 1 + .../google/resource_compute_image.go | 176 ++ .../google/resource_compute_image_test.go | 85 + .../api/compute/v1/compute-api.json | 678 ++++--- .../api/compute/v1/compute-gen.go | 1739 +++++++++-------- .../api/container/v1/container-api.json | 281 ++- .../api/container/v1/container-gen.go | 857 +++++++- .../google.golang.org/api/dns/v1/dns-api.json | 6 +- .../google.golang.org/api/dns/v1/dns-gen.go | 149 +- .../api/googleapi/googleapi.go | 10 +- .../api/pubsub/v1/pubsub-api.json | 32 +- .../api/pubsub/v1/pubsub-gen.go | 388 ++-- .../api/sqladmin/v1beta4/sqladmin-api.json | 68 +- .../api/sqladmin/v1beta4/sqladmin-gen.go | 773 +++++--- .../api/storage/v1/storage-api.json | 14 +- .../api/storage/v1/storage-gen.go | 16 +- vendor/vendor.json | 40 +- .../google/r/compute_image.html.markdown | 77 + 18 files changed, 3575 insertions(+), 1815 deletions(-) create mode 100644 builtin/providers/google/resource_compute_image.go create mode 100644 builtin/providers/google/resource_compute_image_test.go create mode 100644 website/source/docs/providers/google/r/compute_image.html.markdown diff --git a/builtin/providers/google/provider.go b/builtin/providers/google/provider.go index 40b2ebe4f..f04b5b222 100644 --- a/builtin/providers/google/provider.go +++ b/builtin/providers/google/provider.go @@ -67,6 +67,7 @@ func Provider() terraform.ResourceProvider { "google_compute_global_forwarding_rule": resourceComputeGlobalForwardingRule(), "google_compute_http_health_check": resourceComputeHttpHealthCheck(), "google_compute_https_health_check": resourceComputeHttpsHealthCheck(), + "google_compute_image": resourceComputeImage(), "google_compute_instance": resourceComputeInstance(), "google_compute_instance_group": resourceComputeInstanceGroup(), "google_compute_instance_group_manager": resourceComputeInstanceGroupManager(), diff --git a/builtin/providers/google/resource_compute_image.go b/builtin/providers/google/resource_compute_image.go new file mode 100644 index 000000000..7aee85023 --- /dev/null +++ b/builtin/providers/google/resource_compute_image.go @@ -0,0 +1,176 @@ +package google + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "google.golang.org/api/compute/v1" + "google.golang.org/api/googleapi" +) + +func resourceComputeImage() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeImageCreate, + Read: resourceComputeImageRead, + Delete: resourceComputeImageDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "description": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "family": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "project": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "raw_disk": &schema.Schema{ + Type: schema.TypeList, + Required: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "source": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "sha1": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "container_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "TAR", + ForceNew: true, + }, + }, + }, + }, + + "self_link": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceComputeImageCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Build the image + image := &compute.Image{ + Name: d.Get("name").(string), + } + + if v, ok := d.GetOk("description"); ok { + image.Description = v.(string) + } + + if v, ok := d.GetOk("family"); ok { + image.Family = v.(string) + } + + rawDiskEle := d.Get("raw_disk").([]interface{})[0].(map[string]interface{}) + imageRawDisk := &compute.ImageRawDisk{ + Source: rawDiskEle["source"].(string), + ContainerType: rawDiskEle["container_type"].(string), + } + if val, ok := rawDiskEle["sha1"]; ok { + imageRawDisk.Sha1Checksum = val.(string) + } + image.RawDisk = imageRawDisk + + // Insert the image + op, err := config.clientCompute.Images.Insert( + project, image).Do() + if err != nil { + return fmt.Errorf("Error creating image: %s", err) + } + + // Store the ID + d.SetId(image.Name) + + err = computeOperationWaitGlobal(config, op, project, "Creating Image") + if err != nil { + return err + } + + return resourceComputeImageRead(d, meta) +} + +func resourceComputeImageRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + image, err := config.clientCompute.Images.Get( + project, d.Id()).Do() + if err != nil { + if gerr, ok := err.(*googleapi.Error); ok && gerr.Code == 404 { + // The resource doesn't exist anymore + log.Printf("[WARN] Removing Image %q because it's gone", d.Get("name").(string)) + d.SetId("") + + return nil + } + + return fmt.Errorf("Error reading image: %s", err) + } + + d.Set("self_link", image.SelfLink) + + return nil +} + +func resourceComputeImageDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + + project, err := getProject(d, config) + if err != nil { + return err + } + + // Delete the image + log.Printf("[DEBUG] image delete request") + op, err := config.clientCompute.Images.Delete( + project, d.Id()).Do() + if err != nil { + return fmt.Errorf("Error deleting image: %s", err) + } + + err = computeOperationWaitGlobal(config, op, project, "Deleting image") + if err != nil { + return err + } + + d.SetId("") + return nil +} diff --git a/builtin/providers/google/resource_compute_image_test.go b/builtin/providers/google/resource_compute_image_test.go new file mode 100644 index 000000000..e5708c44b --- /dev/null +++ b/builtin/providers/google/resource_compute_image_test.go @@ -0,0 +1,85 @@ +package google + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "google.golang.org/api/compute/v1" +) + +func TestAccComputeImage_basic(t *testing.T) { + var image compute.Image + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeImageDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeImage_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeImageExists( + "google_compute_image.foobar", &image), + ), + }, + }, + }) +} + +func testAccCheckComputeImageDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "google_compute_image" { + continue + } + + _, err := config.clientCompute.Images.Get( + config.Project, rs.Primary.ID).Do() + if err == nil { + return fmt.Errorf("Image still exists") + } + } + + return nil +} + +func testAccCheckComputeImageExists(n string, image *compute.Image) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + + found, err := config.clientCompute.Images.Get( + config.Project, rs.Primary.ID).Do() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("Image not found") + } + + *image = *found + + return nil + } +} + +var testAccComputeImage_basic = fmt.Sprintf(` +resource "google_compute_image" "foobar" { + name = "image-test-%s" + raw_disk { + source = "https://storage.googleapis.com/bosh-cpi-artifacts/bosh-stemcell-3262.4-google-kvm-ubuntu-trusty-go_agent-raw.tar.gz" + } +}`, acctest.RandString(10)) diff --git a/vendor/google.golang.org/api/compute/v1/compute-api.json b/vendor/google.golang.org/api/compute/v1/compute-api.json index 7b8cc894c..7f4fee625 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-api.json +++ b/vendor/google.golang.org/api/compute/v1/compute-api.json @@ -1,11 +1,11 @@ { "kind": "discovery#restDescription", - "etag": "\"C5oy1hgQsABtYOYIOXWcR3BgYqU/eASe8C9_MmiRuqi8LLPi5_VjUnQ\"", + "etag": "\"C5oy1hgQsABtYOYIOXWcR3BgYqU/kMrJsitQJ7xtk0C0WaT4LI3dNYA\"", "discoveryVersion": "v1", "id": "compute:v1", "name": "compute", "version": "v1", - "revision": "20160617", + "revision": "20160726", "title": "Compute Engine API", "description": "Creates and runs virtual machines on Google Cloud Platform.", "ownerDomain": "google.com", @@ -94,7 +94,7 @@ "AccessConfig": { "id": "AccessConfig", "type": "object", - "description": "An access configuration attached to an instance's network interface.", + "description": "An access configuration attached to an instance's network interface. Only one access config per instance is supported.", "properties": { "kind": { "type": "string", @@ -396,7 +396,7 @@ }, "source": { "type": "string", - "description": "Specifies a valid partial or full URL to an existing Persistent Disk resource. This field is only applicable for persistent disks." + "description": "Specifies a valid partial or full URL to an existing Persistent Disk resource. This field is only applicable for persistent disks. Note that for InstanceTemplate, it is just disk name, not URL for the disk." }, "type": { "type": "string", @@ -433,7 +433,7 @@ }, "diskType": { "type": "string", - "description": "Specifies the disk type to use to create the instance. If not specified, the default is pd-standard, specified using the full URL. For example:\n\nhttps://www.googleapis.com/compute/v1/projects/project/zones/zone/diskTypes/pd-standard \n\nOther values include pd-ssd and local-ssd. If you define this field, you can provide either the full or partial URL. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/diskTypes/diskType \n- projects/project/zones/zone/diskTypes/diskType \n- zones/zone/diskTypes/diskType" + "description": "Specifies the disk type to use to create the instance. If not specified, the default is pd-standard, specified using the full URL. For example:\n\nhttps://www.googleapis.com/compute/v1/projects/project/zones/zone/diskTypes/pd-standard \n\nOther values include pd-ssd and local-ssd. If you define this field, you can provide either the full or partial URL. For example, the following are valid values: \n- https://www.googleapis.com/compute/v1/projects/project/zones/zone/diskTypes/diskType \n- projects/project/zones/zone/diskTypes/diskType \n- zones/zone/diskTypes/diskType Note that for InstanceTemplate, this is the name of the disk type, not URL." }, "sourceImage": { "type": "string", @@ -734,7 +734,7 @@ "properties": { "balancingMode": { "type": "string", - "description": "Specifies the balancing mode for this backend. For global HTTP(S) load balancing, the default is UTILIZATION. Valid values are UTILIZATION and RATE.", + "description": "Specifies the balancing mode for this backend. For global HTTP(S) load balancing, the default is UTILIZATION. Valid values are UTILIZATION and RATE.\n\nThis cannot be used for internal load balancing.", "enum": [ "RATE", "UTILIZATION" @@ -746,7 +746,7 @@ }, "capacityScaler": { "type": "number", - "description": "A multiplier applied to the group's maximum servicing capacity (either UTILIZATION or RATE). Default value is 1, which means the group will serve up to 100% of its configured CPU or RPS (depending on balancingMode). A setting of 0 means the group is completely drained, offering 0% of its available CPU or RPS. Valid range is [0.0,1.0].", + "description": "A multiplier applied to the group's maximum servicing capacity (either UTILIZATION or RATE). Default value is 1, which means the group will serve up to 100% of its configured CPU or RPS (depending on balancingMode). A setting of 0 means the group is completely drained, offering 0% of its available CPU or RPS. Valid range is [0.0,1.0].\n\nThis cannot be used for internal load balancing.", "format": "float" }, "description": { @@ -755,21 +755,21 @@ }, "group": { "type": "string", - "description": "The fully-qualified URL of a zonal Instance Group resource. This instance group defines the list of instances that serve traffic. Member virtual machine instances from each instance group must live in the same zone as the instance group itself. No two backends in a backend service are allowed to use same Instance Group resource.\n\nNote that you must specify an Instance Group resource using the fully-qualified URL, rather than a partial URL." + "description": "The fully-qualified URL of a zonal Instance Group resource. This instance group defines the list of instances that serve traffic. Member virtual machine instances from each instance group must live in the same zone as the instance group itself. No two backends in a backend service are allowed to use same Instance Group resource.\n\nNote that you must specify an Instance Group resource using the fully-qualified URL, rather than a partial URL.\n\nWhen the BackendService has load balancing scheme INTERNAL, the instance group must be in a zone within the same region as the BackendService." }, "maxRate": { "type": "integer", - "description": "The max requests per second (RPS) of the group. Can be used with either RATE or UTILIZATION balancing modes, but required if RATE mode. For RATE mode, either maxRate or maxRatePerInstance must be set.", + "description": "The max requests per second (RPS) of the group. Can be used with either RATE or UTILIZATION balancing modes, but required if RATE mode. For RATE mode, either maxRate or maxRatePerInstance must be set.\n\nThis cannot be used for internal load balancing.", "format": "int32" }, "maxRatePerInstance": { "type": "number", - "description": "The max requests per second (RPS) that a single backend instance can handle.This is used to calculate the capacity of the group. Can be used in either balancing mode. For RATE mode, either maxRate or maxRatePerInstance must be set.", + "description": "The max requests per second (RPS) that a single backend instance can handle.This is used to calculate the capacity of the group. Can be used in either balancing mode. For RATE mode, either maxRate or maxRatePerInstance must be set.\n\nThis cannot be used for internal load balancing.", "format": "float" }, "maxUtilization": { "type": "number", - "description": "Used when balancingMode is UTILIZATION. This ratio defines the CPU utilization target for the group. The default is 0.8. Valid range is [0.0, 1.0].", + "description": "Used when balancingMode is UTILIZATION. This ratio defines the CPU utilization target for the group. The default is 0.8. Valid range is [0.0, 1.0].\n\nThis cannot be used for internal load balancing.", "format": "float" } } @@ -779,6 +779,11 @@ "type": "object", "description": "A BackendService resource. This resource defines a group of backend virtual machines and their serving capacity.", "properties": { + "affinityCookieTtlSec": { + "type": "integer", + "description": "Lifetime of cookies in seconds if session_affinity is GENERATED_COOKIE. If set to 0, the cookie is non-persistent and lasts only until the end of the browser session (or equivalent). The maximum allowed value for TTL is one day.\n\nWhen the load balancing scheme is INTERNAL, this field is not used.", + "format": "int32" + }, "backends": { "type": "array", "description": "The list of backends that serve this BackendService.", @@ -796,7 +801,7 @@ }, "enableCDN": { "type": "boolean", - "description": "If true, enable Cloud CDN for this BackendService." + "description": "If true, enable Cloud CDN for this BackendService.\n\nWhen the load balancing scheme is INTERNAL, this field is not used." }, "fingerprint": { "type": "string", @@ -805,7 +810,7 @@ }, "healthChecks": { "type": "array", - "description": "The list of URLs to the HttpHealthCheck or HttpsHealthCheck resource for health checking this BackendService. Currently at most one health check can be specified, and a health check is required.", + "description": "The list of URLs to the HttpHealthCheck or HttpsHealthCheck resource for health checking this BackendService. Currently at most one health check can be specified, and a health check is required.\n\nFor internal load balancing, a URL to a HealthCheck resource must be specified instead.", "items": { "type": "string" } @@ -827,16 +832,16 @@ }, "port": { "type": "integer", - "description": "Deprecated in favor of portName. The TCP port to connect on the backend. The default value is 80.", + "description": "Deprecated in favor of portName. The TCP port to connect on the backend. The default value is 80.\n\nThis cannot be used for internal load balancing.", "format": "int32" }, "portName": { "type": "string", - "description": "Name of backend port. The same name should appear in the instance groups referenced by this service. Required." + "description": "Name of backend port. The same name should appear in the instance groups referenced by this service. Required when the load balancing scheme is EXTERNAL.\n\nWhen the load balancing scheme is INTERNAL, this field is not used." }, "protocol": { "type": "string", - "description": "The protocol this BackendService uses to communicate with backends.\n\nPossible values are HTTP, HTTPS, HTTP2, TCP and SSL.", + "description": "The protocol this BackendService uses to communicate with backends.\n\nPossible values are HTTP, HTTPS, HTTP2, TCP and SSL. The default is HTTP.\n\nFor internal load balancing, the possible values are TCP and UDP, and the default is TCP.", "enum": [ "HTTP", "HTTPS" @@ -854,6 +859,22 @@ "type": "string", "description": "[Output Only] Server-defined URL for the resource." }, + "sessionAffinity": { + "type": "string", + "description": "Type of session affinity to use. The default is NONE.\n\nWhen the load balancing scheme is EXTERNAL, can be NONE, CLIENT_IP, or GENERATED_COOKIE.\n\nWhen the load balancing scheme is INTERNAL, can be NONE, CLIENT_IP, CLIENT_IP_PROTO, or CLIENT_IP_PORT_PROTO.\n\nWhen the protocol is UDP, this field is not used.", + "enum": [ + "CLIENT_IP", + "CLIENT_IP_PROTO", + "GENERATED_COOKIE", + "NONE" + ], + "enumDescriptions": [ + "", + "", + "", + "" + ] + }, "timeoutSec": { "type": "integer", "description": "How many seconds to wait for the backend before considering it a failed request. Default is 30 seconds.", @@ -1021,7 +1042,7 @@ }, "licenses": { "type": "array", - "description": "[Output Only] Any applicable publicly visible licenses.", + "description": "Any applicable publicly visible licenses.", "items": { "type": "string" } @@ -1469,17 +1490,17 @@ "properties": { "allowed": { "type": "array", - "description": "The list of rules specified by this firewall. Each rule specifies a protocol and port-range tuple that describes a permitted connection.", + "description": "The list of ALLOW rules specified by this firewall. Each rule specifies a protocol and port-range tuple that describes a permitted connection.", "items": { "type": "object", "properties": { "IPProtocol": { "type": "string", - "description": "The IP protocol that is allowed for this rule. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, sctp), or the IP protocol number." + "description": "The IP protocol to which this rule applies. The protocol type is required when creating a firewall rule. This value can either be one of the following well known protocol strings (tcp, udp, icmp, esp, ah, sctp), or the IP protocol number." }, "ports": { "type": "array", - "description": "An optional list of ports which are allowed. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, connections through any port are allowed\n\nExample inputs include: [\"22\"], [\"80\",\"443\"], and [\"12345-12349\"].", + "description": "An optional list of ports to which this rule applies. This field is only applicable for UDP or TCP protocol. Each entry must be either an integer or a range. If not specified, this rule applies to connections through any port.\n\nExample inputs include: [\"22\"], [\"80\",\"443\"], and [\"12345-12349\"].", "items": { "type": "string" } @@ -1526,14 +1547,14 @@ }, "sourceRanges": { "type": "array", - "description": "The IP address blocks that this rule applies to, expressed in CIDR format. One or both of sourceRanges and sourceTags may be set.\n\nIf both properties are set, an inbound connection is allowed if the range matches the sourceRanges OR the tag of the source matches the sourceTags property. The connection does not need to match both properties.", + "description": "If source ranges are specified, the firewall will apply only to traffic that has source IP address in these ranges. These ranges must be expressed in CIDR format. One or both of sourceRanges and sourceTags may be set. If both properties are set, the firewall will apply to traffic that has source IP address within sourceRanges OR the source IP that belongs to a tag listed in the sourceTags property. The connection does not need to match both properties for the firewall to apply.", "items": { "type": "string" } }, "sourceTags": { "type": "array", - "description": "A list of instance tags which this rule applies to. One or both of sourceRanges and sourceTags may be set.\n\nIf both properties are set, an inbound connection is allowed if the range matches the sourceRanges OR the tag of the source matches the sourceTags property. The connection does not need to match both properties.\n\nSource tags cannot be used to allow access to an instance's external IP address. Because tags are associated with an instance, not an IP address, source tags can only be used to control traffic traveling from an instance inside the same network as the firewall.", + "description": "If source tags are specified, the firewall will apply only to traffic with source IP that belongs to a tag listed in source tags. Source tags cannot be used to control traffic to an instance's external IP address. Because tags are associated with an instance, not an IP address. One or both of sourceRanges and sourceTags may be set. If both properties are set, the firewall will apply to traffic that has source IP address within sourceRanges OR the source IP that belongs to a tag listed in the sourceTags property. The connection does not need to match both properties for the firewall to apply.", "items": { "type": "string" } @@ -1585,11 +1606,11 @@ "properties": { "IPAddress": { "type": "string", - "description": "Value of the reserved IP address that this forwarding rule is serving on behalf of. For global forwarding rules, the address must be a global IP; for regional forwarding rules, the address must live in the same region as the forwarding rule. If left empty (default value), an ephemeral IP from the same scope (global or regional) will be assigned." + "description": "The IP address that this forwarding rule is serving on behalf of.\n\nFor global forwarding rules, the address must be a global IP; for regional forwarding rules, the address must live in the same region as the forwarding rule. By default, this field is empty and an ephemeral IP from the same scope (global or regional) will be assigned.\n\nWhen the load balancing scheme is INTERNAL, this can only be an RFC 1918 IP address belonging to the network/subnetwork configured for the forwarding rule. A reserved address cannot be used. If the field is empty, the IP address will be automatically allocated from the internal IP range of the subnetwork or network configured for this forwarding rule." }, "IPProtocol": { "type": "string", - "description": "The IP protocol to which this rule applies. Valid options are TCP, UDP, ESP, AH, SCTP or ICMP.", + "description": "The IP protocol to which this rule applies. Valid options are TCP, UDP, ESP, AH, SCTP or ICMP.\n\nWhen the load balancing scheme is INTERNAL\u003c/code, only TCP and UDP are valid.", "enum": [ "AH", "ESP", @@ -1630,7 +1651,7 @@ }, "portRange": { "type": "string", - "description": "Applicable only when IPProtocol is TCP, UDP, or SCTP, only packets addressed to ports in the specified range will be forwarded to target. Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint port ranges." + "description": "Applicable only when IPProtocol is TCP, UDP, or SCTP, only packets addressed to ports in the specified range will be forwarded to target. Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint port ranges.\n\nThis field is not used for internal load balancing." }, "region": { "type": "string", @@ -1642,7 +1663,7 @@ }, "target": { "type": "string", - "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must live in the same region as the forwarding rule. For global forwarding rules, this target must be a global TargetHttpProxy or TargetHttpsProxy resource. The forwarded traffic must be of a type appropriate to the target object. For example, TargetHttpProxy requires HTTP traffic, and TargetHttpsProxy requires HTTPS traffic." + "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must live in the same region as the forwarding rule. For global forwarding rules, this target must be a global TargetHttpProxy or TargetHttpsProxy resource. The forwarded traffic must be of a type appropriate to the target object. For example, TargetHttpProxy requires HTTP traffic, and TargetHttpsProxy requires HTTPS traffic.\n\nThis field is not used for internal load balancing." } } }, @@ -2074,7 +2095,7 @@ }, "family": { "type": "string", - "description": "The name of the image family to which this image belongs. You can create disks by specifying an image family instead of a specific image name. The image family always returns its latest image that is not deprecated." + "description": "The name of the image family to which this image belongs. You can create disks by specifying an image family instead of a specific image name. The image family always returns its latest image that is not deprecated. The name of the image family must comply with RFC1035." }, "id": { "type": "string", @@ -2268,7 +2289,7 @@ }, "networkInterfaces": { "type": "array", - "description": "An array of configurations for this interface. This specifies how this interface is configured to interact with other network services, such as connecting to the internet.", + "description": "An array of configurations for this interface. This specifies how this interface is configured to interact with other network services, such as connecting to the internet. Only one interface is supported per instance.", "items": { "$ref": "NetworkInterface" } @@ -2489,7 +2510,7 @@ "InstanceGroupManager": { "id": "InstanceGroupManager", "type": "object", - "description": "An Instance Template Manager resource.", + "description": "An Instance Group Manager resource.", "properties": { "baseInstanceName": { "type": "string", @@ -2591,7 +2612,7 @@ }, "creating": { "type": "integer", - "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be created or are currently being created. If the group fails to create one of these instances, it tries again until it creates the instance successfully.", + "description": "[Output Only] The number of instances in the managed instance group that are scheduled to be created or are currently being created. If the group fails to create any of these instances, it tries again until it creates the instance successfully.\n\nIf you have disabled creation retries, this field will not be populated; instead, the creatingWithoutRetries field will be populated.", "format": "int32" }, "deleting": { @@ -2860,7 +2881,7 @@ "properties": { "id": { "type": "string", - "description": "[Output Only] A unique identifier for this list of instance groups. The server generates this identifier." + "description": "[Output Only] A unique identifier for this list of instances in the specified instance group. The server generates this identifier." }, "items": { "type": "array", @@ -2871,7 +2892,7 @@ }, "kind": { "type": "string", - "description": "[Output Only] The resource type, which is always compute#instanceGroupsListInstances for lists of instance groups.", + "description": "[Output Only] The resource type, which is always compute#instanceGroupsListInstances for the list of instances in the specified instance group.", "default": "compute#instanceGroupsListInstances" }, "nextPageToken": { @@ -2880,7 +2901,7 @@ }, "selfLink": { "type": "string", - "description": "[Output Only] The URL for this list of instance groups. The server generates this URL." + "description": "[Output Only] The URL for this list of instances in the specified instance groups. The server generates this URL." } } }, @@ -3600,7 +3621,7 @@ "properties": { "currentAction": { "type": "string", - "description": "[Output Only] The current action that the managed instance group has scheduled for the instance. Possible values: \n- NONE The instance is running, and the managed instance group does not have any scheduled actions for this instance. \n- CREATING The managed instance group is creating this instance. If the group fails to create this instance, it will try again until it is successful. \n- CREATING_WITHOUT_RETRIES The managed instance group is attempting to create this instance only once. If the group fails to create this instance, it does not try again and the group's target_size value is decreased. \n- RECREATING The managed instance group is recreating this instance. \n- DELETING The managed instance group is permanently deleting this instance. \n- ABANDONING The managed instance group is abandoning this instance. The instance will be removed from the instance group and from any target pools that are associated with this group. \n- RESTARTING The managed instance group is restarting the instance. \n- REFRESHING The managed instance group is applying configuration changes to the instance without stopping it. For example, the group can update the target pool list for an instance without stopping that instance.", + "description": "[Output Only] The current action that the managed instance group has scheduled for the instance. Possible values: \n- NONE The instance is running, and the managed instance group does not have any scheduled actions for this instance. \n- CREATING The managed instance group is creating this instance. If the group fails to create this instance, it will try again until it is successful. \n- CREATING_WITHOUT_RETRIES The managed instance group is attempting to create this instance only once. If the group fails to create this instance, it does not try again and the group's targetSize value is decreased instead. \n- RECREATING The managed instance group is recreating this instance. \n- DELETING The managed instance group is permanently deleting this instance. \n- ABANDONING The managed instance group is abandoning this instance. The instance will be removed from the instance group and from any target pools that are associated with this group. \n- RESTARTING The managed instance group is restarting the instance. \n- REFRESHING The managed instance group is applying configuration changes to the instance without stopping it. For example, the group can update the target pool list for an instance without stopping that instance.", "enum": [ "ABANDONING", "CREATING", @@ -3822,7 +3843,7 @@ "properties": { "accessConfigs": { "type": "array", - "description": "An array of configurations for this interface. Currently, ONE_TO_ONE_NAT is the only access config supported. If there are no accessConfigs specified, then this instance will have no external internet access.", + "description": "An array of configurations for this interface. Currently, only one access config, ONE_TO_ONE_NAT, is supported. If there are no accessConfigs specified, then this instance will have no external internet access.", "items": { "$ref": "AccessConfig" } @@ -4716,10 +4737,12 @@ "description": "Router resource.", "properties": { "bgp": { - "$ref": "RouterBgp" + "$ref": "RouterBgp", + "description": "BGP information specific to this router." }, "bgpPeers": { "type": "array", + "description": "BGP information that needs to be configured into the routing stack to establish the BGP peering. It must specify peer ASN and either interface name, IP, or peer IP. Please refer to RFC4273.", "items": { "$ref": "RouterBgpPeer" } @@ -4739,6 +4762,7 @@ }, "interfaces": { "type": "array", + "description": "Router interfaces. Each interface requires either one linked resource (e.g. linkedVpnTunnel) or IP address and IP address range (e.g. ipRange).", "items": { "$ref": "RouterInterface" } @@ -4823,7 +4847,6 @@ "RouterBgpPeer": { "id": "RouterBgpPeer", "type": "object", - "description": "BGP information that needs to be configured into the routing stack to establish the BGP peering. It must specify peer ASN and either interface name, IP, or peer IP. Reference: https://tools.ietf.org/html/rfc4273", "properties": { "advertisedRoutePriority": { "type": "integer", @@ -4857,7 +4880,6 @@ "RouterInterface": { "id": "RouterInterface", "type": "object", - "description": "Router interfaces. Each interface requires either one linked resource (e.g. linked_vpn_tunnel) or IP address + range (specified in ip_range).", "properties": { "ipRange": { "type": "string", @@ -5002,6 +5024,16 @@ } } }, + "RoutersPreviewResponse": { + "id": "RoutersPreviewResponse", + "type": "object", + "properties": { + "resource": { + "$ref": "Router", + "description": "Preview of given router." + } + } + }, "RoutersScopedList": { "id": "RoutersScopedList", "type": "object", @@ -5970,9 +6002,11 @@ "enum": [ "CLIENT_IP", "CLIENT_IP_PROTO", + "GENERATED_COOKIE", "NONE" ], "enumDescriptions": [ + "", "", "", "" @@ -6984,7 +7018,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -7005,7 +7039,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -7038,7 +7072,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -7079,7 +7113,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -7114,7 +7148,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -7148,7 +7182,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -7169,7 +7203,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -7205,7 +7239,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -7226,7 +7260,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -7259,7 +7293,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -7300,7 +7334,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -7335,7 +7369,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -7369,7 +7403,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -7390,7 +7424,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -7431,7 +7465,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -7474,7 +7508,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -7521,7 +7555,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -7554,7 +7588,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -7587,7 +7621,7 @@ "project": { "type": "string", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -7617,7 +7651,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -7643,7 +7677,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -7664,7 +7698,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -7697,7 +7731,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -7733,7 +7767,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -7764,7 +7798,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -7785,7 +7819,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -7818,7 +7852,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -7851,7 +7885,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -7872,7 +7906,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -7908,7 +7942,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -7929,7 +7963,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -7962,7 +7996,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -8006,7 +8040,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -8047,7 +8081,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -8082,7 +8116,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "sourceImage": { @@ -8121,7 +8155,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -8142,7 +8176,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -8183,7 +8217,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -8231,7 +8265,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8264,7 +8298,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8291,7 +8325,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8317,7 +8351,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -8338,7 +8372,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8371,7 +8405,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8407,7 +8441,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8438,7 +8472,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -8459,7 +8493,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8492,7 +8526,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -8533,7 +8567,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -8568,7 +8602,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -8602,7 +8636,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -8623,7 +8657,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -8664,7 +8698,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -8712,7 +8746,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8745,7 +8779,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8772,7 +8806,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8798,7 +8832,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -8819,7 +8853,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8856,7 +8890,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8889,7 +8923,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8916,7 +8950,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8942,7 +8976,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -8963,7 +8997,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -8996,7 +9030,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9027,7 +9061,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -9048,7 +9082,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9081,7 +9115,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9111,7 +9145,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9136,7 +9170,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -9157,7 +9191,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9194,7 +9228,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9227,7 +9261,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9254,7 +9288,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9280,7 +9314,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -9301,7 +9335,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9334,7 +9368,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9370,7 +9404,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9410,7 +9444,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9443,7 +9477,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9470,7 +9504,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9496,7 +9530,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -9517,7 +9551,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9550,7 +9584,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9586,7 +9620,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9626,7 +9660,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9659,7 +9693,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9695,7 +9729,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9720,7 +9754,7 @@ "parameters": { "family": { "type": "string", - "description": "Name of the image resource to return.", + "description": "Name of the image family to search for.", "required": true, "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", "location": "path" @@ -9729,7 +9763,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9756,7 +9790,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9785,7 +9819,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -9806,7 +9840,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9842,7 +9876,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -9876,7 +9910,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -9897,7 +9931,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -9929,7 +9963,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -9968,7 +10002,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10010,7 +10044,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10044,7 +10078,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10077,7 +10111,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -10098,7 +10132,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10137,7 +10171,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10177,7 +10211,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10219,7 +10253,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "size": { @@ -10266,7 +10300,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10308,7 +10342,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10354,7 +10388,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10388,7 +10422,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -10409,7 +10443,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -10441,7 +10475,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10480,7 +10514,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10514,7 +10548,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10547,7 +10581,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -10568,7 +10602,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10599,7 +10633,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "instanceGroup": { @@ -10626,7 +10660,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10669,7 +10703,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10711,7 +10745,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10758,7 +10792,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -10791,7 +10825,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -10818,7 +10852,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -10844,7 +10878,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -10865,7 +10899,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -10908,7 +10942,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -10944,7 +10978,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -10965,7 +10999,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -10998,7 +11032,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -11042,7 +11076,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -11095,7 +11129,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -11131,7 +11165,6 @@ "type": "string", "description": "Disk device name to detach.", "required": true, - "pattern": "\\w[\\w.-]{0,254}", "location": "query" }, "instance": { @@ -11145,7 +11178,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -11187,7 +11220,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -11238,7 +11271,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -11273,7 +11306,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -11307,7 +11340,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -11328,7 +11361,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -11369,7 +11402,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -11423,7 +11456,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -11466,7 +11499,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -11510,7 +11543,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -11554,7 +11587,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -11598,7 +11631,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -11642,7 +11675,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -11683,7 +11716,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -11727,7 +11760,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -11772,7 +11805,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -11801,7 +11834,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -11822,7 +11855,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -11855,7 +11888,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -11888,7 +11921,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -11909,7 +11942,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -11954,7 +11987,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -11987,7 +12020,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -12014,7 +12047,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -12040,7 +12073,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -12061,7 +12094,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -12091,7 +12124,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -12117,7 +12150,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -12145,7 +12178,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -12173,7 +12206,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -12201,7 +12234,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -12243,7 +12276,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -12281,7 +12314,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -12314,7 +12347,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -12335,7 +12368,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -12373,7 +12406,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -12405,7 +12438,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -12426,7 +12459,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -12454,7 +12487,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -12475,7 +12508,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -12501,7 +12534,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -12542,7 +12575,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -12584,7 +12617,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -12626,7 +12659,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -12660,7 +12693,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -12681,7 +12714,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -12715,7 +12748,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -12749,6 +12782,51 @@ "https://www.googleapis.com/auth/compute" ] }, + "preview": { + "id": "compute.routers.preview", + "path": "{project}/regions/{region}/routers/{router}/preview", + "httpMethod": "POST", + "description": "Preview fields auto-generated during router create and update operations. Calling this method does NOT create or update the router.", + "parameters": { + "project": { + "type": "string", + "description": "Project ID for this request.", + "required": true, + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "location": "path" + }, + "region": { + "type": "string", + "description": "Name of the region for this request.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + }, + "router": { + "type": "string", + "description": "Name of the Router resource to query.", + "required": true, + "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?", + "location": "path" + } + }, + "parameterOrder": [ + "project", + "region", + "router" + ], + "request": { + "$ref": "Router" + }, + "response": { + "$ref": "RoutersPreviewResponse" + }, + "scopes": [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/compute", + "https://www.googleapis.com/auth/compute.readonly" + ] + }, "update": { "id": "compute.routers.update", "path": "{project}/regions/{region}/routers/{router}", @@ -12759,7 +12837,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -12807,7 +12885,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "route": { @@ -12840,7 +12918,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "route": { @@ -12874,7 +12952,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -12900,7 +12978,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -12921,7 +12999,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -12951,7 +13029,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "snapshot": { @@ -12984,7 +13062,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "snapshot": { @@ -13016,7 +13094,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -13037,7 +13115,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -13067,7 +13145,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "sslCertificate": { @@ -13100,7 +13178,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "sslCertificate": { @@ -13134,7 +13212,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -13160,7 +13238,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -13181,7 +13259,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -13209,7 +13287,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -13230,7 +13308,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -13256,7 +13334,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -13297,7 +13375,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -13339,7 +13417,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -13373,7 +13451,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -13394,7 +13472,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -13432,7 +13510,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "targetHttpProxy": { @@ -13465,7 +13543,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "targetHttpProxy": { @@ -13499,7 +13577,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -13525,7 +13603,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -13546,7 +13624,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -13572,7 +13650,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "targetHttpProxy": { @@ -13612,7 +13690,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "targetHttpsProxy": { @@ -13645,7 +13723,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "targetHttpsProxy": { @@ -13679,7 +13757,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -13705,7 +13783,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -13726,7 +13804,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -13752,7 +13830,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "targetHttpsProxy": { @@ -13788,7 +13866,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "targetHttpsProxy": { @@ -13826,7 +13904,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -13847,7 +13925,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -13873,7 +13951,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "targetInstance": { @@ -13914,7 +13992,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "targetInstance": { @@ -13956,7 +14034,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -13990,7 +14068,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -14011,7 +14089,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -14049,7 +14127,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -14093,7 +14171,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -14135,7 +14213,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -14156,7 +14234,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -14182,7 +14260,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -14223,7 +14301,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -14265,7 +14343,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -14310,7 +14388,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -14344,7 +14422,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -14365,7 +14443,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -14399,7 +14477,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -14443,7 +14521,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -14493,7 +14571,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -14539,7 +14617,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -14560,7 +14638,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -14586,7 +14664,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -14627,7 +14705,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -14669,7 +14747,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -14703,7 +14781,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -14724,7 +14802,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -14762,7 +14840,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "urlMap": { @@ -14795,7 +14873,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "urlMap": { @@ -14829,7 +14907,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -14857,7 +14935,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "urlMap": { @@ -14891,7 +14969,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -14912,7 +14990,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -14938,7 +15016,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "urlMap": { @@ -14974,7 +15052,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "urlMap": { @@ -15010,7 +15088,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "urlMap": { @@ -15048,7 +15126,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -15069,7 +15147,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, @@ -15095,7 +15173,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -15136,7 +15214,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -15178,7 +15256,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -15212,7 +15290,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -15233,7 +15311,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "region": { @@ -15278,7 +15356,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -15316,7 +15394,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -15349,7 +15427,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -15370,7 +15448,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -15408,7 +15486,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" }, "zone": { @@ -15440,7 +15518,7 @@ "parameters": { "filter": { "type": "string", - "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nCompute Engine Beta API Only: When filtering in the Beta API, you can also filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nThe Beta API also supports filtering on multiple expressions by providing each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", + "description": "Sets a filter expression for filtering listed resources, in the form filter={expression}. Your {expression} must be in the format: field_name comparison_string literal_string.\n\nThe field_name is the name of the field you want to compare. Only atomic field types are supported (string, number, boolean). The comparison_string must be either eq (equals) or ne (not equals). The literal_string is the string value to filter to. The literal value must be valid for the type of field you are filtering by (string, number, boolean). For string fields, the literal value is interpreted as a regular expression using RE2 syntax. The literal value must match the entire field.\n\nFor example, to filter for instances that do not have a name of example-instance, you would use filter=name ne example-instance.\n\nYou can filter on nested fields. For example, you could filter on instances that have set the scheduling.automaticRestart field to true. Use filtering on nested fields to take advantage of labels to organize and search for results based on label values.\n\nTo filter on multiple expressions, provide each separate expression within parentheses. For example, (scheduling.automaticRestart eq true) (zone eq us-central1-f). Multiple expressions are treated as AND expressions, meaning that resources must match all expressions to pass the filters.", "location": "query" }, "maxResults": { @@ -15461,7 +15539,7 @@ "type": "string", "description": "Project ID for this request.", "required": true, - "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))", + "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z0-9](?:[-a-z0-9]{0,61}[a-z0-9])?))", "location": "path" } }, diff --git a/vendor/google.golang.org/api/compute/v1/compute-gen.go b/vendor/google.golang.org/api/compute/v1/compute-gen.go index 5a16aa354..f0c7c2192 100644 --- a/vendor/google.golang.org/api/compute/v1/compute-gen.go +++ b/vendor/google.golang.org/api/compute/v1/compute-gen.go @@ -532,7 +532,7 @@ type ZonesService struct { } // AccessConfig: An access configuration attached to an instance's -// network interface. +// network interface. Only one access config per instance is supported. type AccessConfig struct { // Kind: [Output Only] Type of the resource. Always compute#accessConfig // for access configs. @@ -917,7 +917,8 @@ type AttachedDisk struct { // Source: Specifies a valid partial or full URL to an existing // Persistent Disk resource. This field is only applicable for - // persistent disks. + // persistent disks. Note that for InstanceTemplate, it is just disk + // name, not URL for the disk. Source string `json:"source,omitempty"` // Type: Specifies the type of the disk, either SCRATCH or PERSISTENT. @@ -972,7 +973,8 @@ type AttachedDiskInitializeParams struct { // - // https://www.googleapis.com/compute/v1/projects/project/zones/zone/diskTypes/diskType // - projects/project/zones/zone/diskTypes/diskType - // - zones/zone/diskTypes/diskType + // - zones/zone/diskTypes/diskType Note that for InstanceTemplate, this + // is the name of the disk type, not URL. DiskType string `json:"diskType,omitempty"` // SourceImage: The source image used to create this disk. If the source @@ -1452,6 +1454,8 @@ type Backend struct { // global HTTP(S) load balancing, the default is UTILIZATION. Valid // values are UTILIZATION and RATE. // + // This cannot be used for internal load balancing. + // // Possible values: // "RATE" // "UTILIZATION" @@ -1463,6 +1467,8 @@ type Backend struct { // (depending on balancingMode). A setting of 0 means the group is // completely drained, offering 0% of its available CPU or RPS. Valid // range is [0.0,1.0]. + // + // This cannot be used for internal load balancing. CapacityScaler float64 `json:"capacityScaler,omitempty"` // Description: An optional description of this resource. Provide this @@ -1478,23 +1484,33 @@ type Backend struct { // // Note that you must specify an Instance Group resource using the // fully-qualified URL, rather than a partial URL. + // + // When the BackendService has load balancing scheme INTERNAL, the + // instance group must be in a zone within the same region as the + // BackendService. Group string `json:"group,omitempty"` // MaxRate: The max requests per second (RPS) of the group. Can be used // with either RATE or UTILIZATION balancing modes, but required if RATE // mode. For RATE mode, either maxRate or maxRatePerInstance must be // set. + // + // This cannot be used for internal load balancing. MaxRate int64 `json:"maxRate,omitempty"` // MaxRatePerInstance: The max requests per second (RPS) that a single // backend instance can handle.This is used to calculate the capacity of // the group. Can be used in either balancing mode. For RATE mode, // either maxRate or maxRatePerInstance must be set. + // + // This cannot be used for internal load balancing. MaxRatePerInstance float64 `json:"maxRatePerInstance,omitempty"` // MaxUtilization: Used when balancingMode is UTILIZATION. This ratio // defines the CPU utilization target for the group. The default is 0.8. // Valid range is [0.0, 1.0]. + // + // This cannot be used for internal load balancing. MaxUtilization float64 `json:"maxUtilization,omitempty"` // ForceSendFields is a list of field names (e.g. "BalancingMode") to @@ -1515,6 +1531,14 @@ func (s *Backend) MarshalJSON() ([]byte, error) { // BackendService: A BackendService resource. This resource defines a // group of backend virtual machines and their serving capacity. type BackendService struct { + // AffinityCookieTtlSec: Lifetime of cookies in seconds if + // session_affinity is GENERATED_COOKIE. If set to 0, the cookie is + // non-persistent and lasts only until the end of the browser session + // (or equivalent). The maximum allowed value for TTL is one day. + // + // When the load balancing scheme is INTERNAL, this field is not used. + AffinityCookieTtlSec int64 `json:"affinityCookieTtlSec,omitempty"` + // Backends: The list of backends that serve this BackendService. Backends []*Backend `json:"backends,omitempty"` @@ -1527,6 +1551,8 @@ type BackendService struct { Description string `json:"description,omitempty"` // EnableCDN: If true, enable Cloud CDN for this BackendService. + // + // When the load balancing scheme is INTERNAL, this field is not used. EnableCDN bool `json:"enableCDN,omitempty"` // Fingerprint: Fingerprint of this resource. A hash of the contents @@ -1539,6 +1565,9 @@ type BackendService struct { // HttpsHealthCheck resource for health checking this BackendService. // Currently at most one health check can be specified, and a health // check is required. + // + // For internal load balancing, a URL to a HealthCheck resource must be + // specified instead. HealthChecks []string `json:"healthChecks,omitempty"` // Id: [Output Only] The unique identifier for the resource. This @@ -1560,16 +1589,25 @@ type BackendService struct { // Port: Deprecated in favor of portName. The TCP port to connect on the // backend. The default value is 80. + // + // This cannot be used for internal load balancing. Port int64 `json:"port,omitempty"` // PortName: Name of backend port. The same name should appear in the - // instance groups referenced by this service. Required. + // instance groups referenced by this service. Required when the load + // balancing scheme is EXTERNAL. + // + // When the load balancing scheme is INTERNAL, this field is not used. PortName string `json:"portName,omitempty"` // Protocol: The protocol this BackendService uses to communicate with // backends. // - // Possible values are HTTP, HTTPS, HTTP2, TCP and SSL. + // Possible values are HTTP, HTTPS, HTTP2, TCP and SSL. The default is + // HTTP. + // + // For internal load balancing, the possible values are TCP and UDP, and + // the default is TCP. // // Possible values: // "HTTP" @@ -1584,6 +1622,24 @@ type BackendService struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` + // SessionAffinity: Type of session affinity to use. The default is + // NONE. + // + // When the load balancing scheme is EXTERNAL, can be NONE, CLIENT_IP, + // or GENERATED_COOKIE. + // + // When the load balancing scheme is INTERNAL, can be NONE, CLIENT_IP, + // CLIENT_IP_PROTO, or CLIENT_IP_PORT_PROTO. + // + // When the protocol is UDP, this field is not used. + // + // Possible values: + // "CLIENT_IP" + // "CLIENT_IP_PROTO" + // "GENERATED_COOKIE" + // "NONE" + SessionAffinity string `json:"sessionAffinity,omitempty"` + // TimeoutSec: How many seconds to wait for the backend before // considering it a failed request. Default is 30 seconds. TimeoutSec int64 `json:"timeoutSec,omitempty"` @@ -1592,12 +1648,13 @@ type BackendService struct { // server. googleapi.ServerResponse `json:"-"` - // ForceSendFields is a list of field names (e.g. "Backends") to - // unconditionally include in API requests. By default, fields with - // empty values are omitted from API requests. However, any non-pointer, - // non-interface field appearing in ForceSendFields will be sent to the - // server regardless of whether the field is empty or not. This may be - // used to include empty fields in Patch requests. + // ForceSendFields is a list of field names (e.g. + // "AffinityCookieTtlSec") to unconditionally include in API requests. + // By default, fields with empty values are omitted from API requests. + // However, any non-pointer, non-interface field appearing in + // ForceSendFields will be sent to the server regardless of whether the + // field is empty or not. This may be used to include empty fields in + // Patch requests. ForceSendFields []string `json:"-"` } @@ -1829,7 +1886,7 @@ type Disk struct { // text format. LastDetachTimestamp string `json:"lastDetachTimestamp,omitempty"` - // Licenses: [Output Only] Any applicable publicly visible licenses. + // Licenses: Any applicable publicly visible licenses. Licenses []string `json:"licenses,omitempty"` // Name: Name of the resource. Provided by the client when the resource @@ -2446,9 +2503,9 @@ func (s *DisksScopedListWarningData) MarshalJSON() ([]byte, error) { // Firewall: Represents a Firewall resource. type Firewall struct { - // Allowed: The list of rules specified by this firewall. Each rule - // specifies a protocol and port-range tuple that describes a permitted - // connection. + // Allowed: The list of ALLOW rules specified by this firewall. Each + // rule specifies a protocol and port-range tuple that describes a + // permitted connection. Allowed []*FirewallAllowed `json:"allowed,omitempty"` // CreationTimestamp: [Output Only] Creation timestamp in RFC3339 text @@ -2492,28 +2549,25 @@ type Firewall struct { // SelfLink: [Output Only] Server-defined URL for the resource. SelfLink string `json:"selfLink,omitempty"` - // SourceRanges: The IP address blocks that this rule applies to, - // expressed in CIDR format. One or both of sourceRanges and sourceTags - // may be set. - // - // If both properties are set, an inbound connection is allowed if the - // range matches the sourceRanges OR the tag of the source matches the - // sourceTags property. The connection does not need to match both - // properties. + // SourceRanges: If source ranges are specified, the firewall will apply + // only to traffic that has source IP address in these ranges. These + // ranges must be expressed in CIDR format. One or both of sourceRanges + // and sourceTags may be set. If both properties are set, the firewall + // will apply to traffic that has source IP address within sourceRanges + // OR the source IP that belongs to a tag listed in the sourceTags + // property. The connection does not need to match both properties for + // the firewall to apply. SourceRanges []string `json:"sourceRanges,omitempty"` - // SourceTags: A list of instance tags which this rule applies to. One - // or both of sourceRanges and sourceTags may be set. - // - // If both properties are set, an inbound connection is allowed if the - // range matches the sourceRanges OR the tag of the source matches the - // sourceTags property. The connection does not need to match both - // properties. - // - // Source tags cannot be used to allow access to an instance's external - // IP address. Because tags are associated with an instance, not an IP - // address, source tags can only be used to control traffic traveling - // from an instance inside the same network as the firewall. + // SourceTags: If source tags are specified, the firewall will apply + // only to traffic with source IP that belongs to a tag listed in source + // tags. Source tags cannot be used to control traffic to an instance's + // external IP address. Because tags are associated with an instance, + // not an IP address. One or both of sourceRanges and sourceTags may be + // set. If both properties are set, the firewall will apply to traffic + // that has source IP address within sourceRanges OR the source IP that + // belongs to a tag listed in the sourceTags property. The connection + // does not need to match both properties for the firewall to apply. SourceTags []string `json:"sourceTags,omitempty"` // TargetTags: A list of instance tags indicating sets of instances @@ -2542,16 +2596,16 @@ func (s *Firewall) MarshalJSON() ([]byte, error) { } type FirewallAllowed struct { - // IPProtocol: The IP protocol that is allowed for this rule. The - // protocol type is required when creating a firewall rule. This value - // can either be one of the following well known protocol strings (tcp, - // udp, icmp, esp, ah, sctp), or the IP protocol number. + // IPProtocol: The IP protocol to which this rule applies. The protocol + // type is required when creating a firewall rule. This value can either + // be one of the following well known protocol strings (tcp, udp, icmp, + // esp, ah, sctp), or the IP protocol number. IPProtocol string `json:"IPProtocol,omitempty"` - // Ports: An optional list of ports which are allowed. This field is - // only applicable for UDP or TCP protocol. Each entry must be either an - // integer or a range. If not specified, connections through any port - // are allowed + // Ports: An optional list of ports to which this rule applies. This + // field is only applicable for UDP or TCP protocol. Each entry must be + // either an integer or a range. If not specified, this rule applies to + // connections through any port. // // Example inputs include: ["22"], ["80","443"], and ["12345-12349"]. Ports []string `json:"ports,omitempty"` @@ -2618,17 +2672,29 @@ func (s *FirewallList) MarshalJSON() ([]byte, error) { // specifies which pool of target virtual machines to forward a packet // to if it matches the given [IPAddress, IPProtocol, portRange] tuple. type ForwardingRule struct { - // IPAddress: Value of the reserved IP address that this forwarding rule - // is serving on behalf of. For global forwarding rules, the address - // must be a global IP; for regional forwarding rules, the address must - // live in the same region as the forwarding rule. If left empty - // (default value), an ephemeral IP from the same scope (global or - // regional) will be assigned. + // IPAddress: The IP address that this forwarding rule is serving on + // behalf of. + // + // For global forwarding rules, the address must be a global IP; for + // regional forwarding rules, the address must live in the same region + // as the forwarding rule. By default, this field is empty and an + // ephemeral IP from the same scope (global or regional) will be + // assigned. + // + // When the load balancing scheme is INTERNAL, this can only be an RFC + // 1918 IP address belonging to the network/subnetwork configured for + // the forwarding rule. A reserved address cannot be used. If the field + // is empty, the IP address will be automatically allocated from the + // internal IP range of the subnetwork or network configured for this + // forwarding rule. IPAddress string `json:"IPAddress,omitempty"` // IPProtocol: The IP protocol to which this rule applies. Valid options // are TCP, UDP, ESP, AH, SCTP or ICMP. // + // When the load balancing scheme is INTERNAL Date: Fri, 12 Aug 2016 12:36:12 +1000 Subject: [PATCH 0686/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 09e02e2a7..b5f1f2672 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ FEATURES: * **New Resource:** `aws_load_balancer_backend_server_policy` [GH-7458] * **New Resource:** `aws_load_balancer_listener_policy` [GH-7458] * **New Resource:** `aws_lb_ssl_negotiation_policy` [GH-8084] + * **New Resource:** `google_compute_image` [GH-7960] * **New Data Source:** `aws_ip_ranges` [GH-7984] * **New Data Source:** `fastly_ip_ranges` [GH-7984] From 168d212e779f477f6937e7f24b829cd7bc2bff67 Mon Sep 17 00:00:00 2001 From: Krzysztof Wilczynski Date: Fri, 12 Aug 2016 12:14:48 +0900 Subject: [PATCH 0687/1238] Fix. Correct how CORS rules are handled. (#8096) This commit fixes an issue where CORS rules would not be read and thus refreshed correctly should there be a change introduced externally e.g. CORS configuration was edited outside of Terraform. Signed-off-by: Krzysztof Wilczynski --- .../providers/aws/resource_aws_s3_bucket.go | 27 +++++++--- .../aws/resource_aws_s3_bucket_test.go | 50 +++++++++++++++++++ 2 files changed, 69 insertions(+), 8 deletions(-) diff --git a/builtin/providers/aws/resource_aws_s3_bucket.go b/builtin/providers/aws/resource_aws_s3_bucket.go index 0978009a9..2a8dfc5cf 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket.go +++ b/builtin/providers/aws/resource_aws_s3_bucket.go @@ -471,20 +471,32 @@ func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error { cors, err := s3conn.GetBucketCors(&s3.GetBucketCorsInput{ Bucket: aws.String(d.Id()), }) - log.Printf("[DEBUG] S3 bucket: %s, read CORS: %v", d.Id(), cors) if err != nil { + // An S3 Bucket might not have CORS configuration set. + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() != "NoSuchCORSConfiguration" { + return err + } + log.Printf("[WARN] S3 bucket: %s, no CORS configuration could be found.", d.Id()) + } + log.Printf("[DEBUG] S3 bucket: %s, read CORS: %v", d.Id(), cors) + if cors.CORSRules != nil { rules := make([]map[string]interface{}, 0, len(cors.CORSRules)) for _, ruleObject := range cors.CORSRules { rule := make(map[string]interface{}) - rule["allowed_headers"] = ruleObject.AllowedHeaders - rule["allowed_methods"] = ruleObject.AllowedMethods - rule["allowed_origins"] = ruleObject.AllowedOrigins - rule["expose_headers"] = ruleObject.ExposeHeaders - rule["max_age_seconds"] = ruleObject.MaxAgeSeconds + rule["allowed_headers"] = flattenStringList(ruleObject.AllowedHeaders) + rule["allowed_methods"] = flattenStringList(ruleObject.AllowedMethods) + rule["allowed_origins"] = flattenStringList(ruleObject.AllowedOrigins) + // Both the "ExposeHeaders" and "MaxAgeSeconds" might not be set. + if ruleObject.AllowedOrigins != nil { + rule["expose_headers"] = flattenStringList(ruleObject.ExposeHeaders) + } + if ruleObject.MaxAgeSeconds != nil { + rule["max_age_seconds"] = int(*ruleObject.MaxAgeSeconds) + } rules = append(rules, rule) } if err := d.Set("cors_rule", rules); err != nil { - return fmt.Errorf("error reading S3 bucket \"%s\" CORS rules: %s", d.Id(), err) + return err } } @@ -567,7 +579,6 @@ func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error { accelerate, err := s3conn.GetBucketAccelerateConfiguration(&s3.GetBucketAccelerateConfigurationInput{ Bucket: aws.String(d.Id()), }) - log.Printf("[DEBUG] S3 bucket: %s, read Acceleration: %v", d.Id(), accelerate) if err != nil { // Amazon S3 Transfer Acceleration might not be supported in the // given region, for example, China (Beijing) and the Government diff --git a/builtin/providers/aws/resource_aws_s3_bucket_test.go b/builtin/providers/aws/resource_aws_s3_bucket_test.go index 25f64479a..5773af4c3 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket_test.go +++ b/builtin/providers/aws/resource_aws_s3_bucket_test.go @@ -395,11 +395,61 @@ func TestAccAWSS3Bucket_Versioning(t *testing.T) { func TestAccAWSS3Bucket_Cors(t *testing.T) { rInt := acctest.RandInt() + + updateBucketCors := func(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := testAccProvider.Meta().(*AWSClient).s3conn + _, err := conn.PutBucketCors(&s3.PutBucketCorsInput{ + Bucket: aws.String(rs.Primary.ID), + CORSConfiguration: &s3.CORSConfiguration{ + CORSRules: []*s3.CORSRule{ + &s3.CORSRule{ + AllowedHeaders: []*string{aws.String("*")}, + AllowedMethods: []*string{aws.String("GET")}, + AllowedOrigins: []*string{aws.String("https://www.example.com")}, + }, + }, + }, + }) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() != "NoSuchCORSConfiguration" { + return err + } + } + return nil + } + } + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAWSS3BucketDestroy, Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSS3BucketConfigWithCORS(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketExists("aws_s3_bucket.bucket"), + testAccCheckAWSS3BucketCors( + "aws_s3_bucket.bucket", + []*s3.CORSRule{ + &s3.CORSRule{ + AllowedHeaders: []*string{aws.String("*")}, + AllowedMethods: []*string{aws.String("PUT"), aws.String("POST")}, + AllowedOrigins: []*string{aws.String("https://www.example.com")}, + ExposeHeaders: []*string{aws.String("x-amz-server-side-encryption"), aws.String("ETag")}, + MaxAgeSeconds: aws.Int64(3000), + }, + }, + ), + updateBucketCors("aws_s3_bucket.bucket"), + ), + ExpectNonEmptyPlan: true, + }, resource.TestStep{ Config: testAccAWSS3BucketConfigWithCORS(rInt), Check: resource.ComposeTestCheckFunc( From 7bc10c62b84f5c3259d994ae3ad5d0d4047e59c3 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Fri, 12 Aug 2016 13:16:59 +1000 Subject: [PATCH 0688/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b5f1f2672..9af43ea8c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -49,6 +49,7 @@ BUG FIXES: * provider/aws: `aws_rds_cluster` creation timeout bumped to 40 minutes [GH-8052] * provider/aws: Fix line ending errors/diffs with IAM Server Certs [GH-8074] * provider/aws: Fixing IAM data source policy generation to prevent spurious diffs [GH-6956] + * provider/aws: Correct how CORS rules are handled in `aws_s3_bucket` [GH-8096] * provider/google: Use resource specific project when making queries/changes [GH-7029] * provider/google: Fix read for the backend service resource [GH-7476] From 90efe68ce3a0ccd8b4d50927ec54d822f6d15fa4 Mon Sep 17 00:00:00 2001 From: Gavin Williams Date: Thu, 11 Aug 2016 17:27:54 +0100 Subject: [PATCH 0689/1238] provider/openstack: Add support for 'value_specs' param on 'openstack_networking_network_v2' provider. This can be used to pass additional custom values to the netowrk resource upon creation. --- ...esource_openstack_networking_network_v2.go | 55 ++++++++++++++++++- .../r/networking_network_v2.html.markdown | 2 + 2 files changed, 54 insertions(+), 3 deletions(-) diff --git a/builtin/providers/openstack/resource_openstack_networking_network_v2.go b/builtin/providers/openstack/resource_openstack_networking_network_v2.go index 8fbd832c7..aab85ffcf 100644 --- a/builtin/providers/openstack/resource_openstack_networking_network_v2.go +++ b/builtin/providers/openstack/resource_openstack_networking_network_v2.go @@ -53,10 +53,50 @@ func resourceNetworkingNetworkV2() *schema.Resource { ForceNew: true, Computed: true, }, + "value_specs": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, }, } } +// NetworkCreateOpts contains all teh values needed to create a new network. +type NetworkCreateOpts struct { + AdminStateUp *bool + Name string + Shared *bool + TenantID string + ValueSpecs map[string]string +} + +// ToNetworkCreateMpa casts a networkCreateOpts struct to a map. +func (opts NetworkCreateOpts) ToNetworkCreateMap() (map[string]interface{}, error) { + n := make(map[string]interface{}) + + if opts.AdminStateUp != nil { + n["admin_state_up"] = &opts.AdminStateUp + } + if opts.Name != "" { + n["name"] = opts.Name + } + if opts.Shared != nil { + n["shared"] = &opts.Shared + } + if opts.TenantID != "" { + n["tenant_id"] = opts.TenantID + } + + if opts.ValueSpecs != nil { + for k, v := range opts.ValueSpecs { + n[k] = v + } + } + + return map[string]interface{}{"network": n}, nil +} + func resourceNetworkingNetworkV2Create(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) networkingClient, err := config.networkingV2Client(d.Get("region").(string)) @@ -64,9 +104,10 @@ func resourceNetworkingNetworkV2Create(d *schema.ResourceData, meta interface{}) return fmt.Errorf("Error creating OpenStack networking client: %s", err) } - createOpts := networks.CreateOpts{ - Name: d.Get("name").(string), - TenantID: d.Get("tenant_id").(string), + createOpts := NetworkCreateOpts{ + Name: d.Get("name").(string), + TenantID: d.Get("tenant_id").(string), + ValueSpecs: networkValueSpecs(d), } asuRaw := d.Get("admin_state_up").(string) @@ -249,3 +290,11 @@ func waitForNetworkDelete(networkingClient *gophercloud.ServiceClient, networkId return n, "ACTIVE", nil } } + +func networkValueSpecs(d *schema.ResourceData) map[string]string { + m := make(map[string]string) + for key, val := range d.Get("value_specs").(map[string]interface{}) { + m[key] = val.(string) + } + return m +} diff --git a/website/source/docs/providers/openstack/r/networking_network_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_network_v2.html.markdown index 96c4cb3b3..3daac8103 100644 --- a/website/source/docs/providers/openstack/r/networking_network_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/networking_network_v2.html.markdown @@ -82,6 +82,8 @@ The following arguments are supported: Acceptable values are "true" and "false". Changing this value updates the state of the existing network. +* `value_specs` - (Optional) Map of additional options. + ## Attributes Reference The following attributes are exported: From 7c4426b674100db60b7d9a91b06df943b611b869 Mon Sep 17 00:00:00 2001 From: Peter McAtominey Date: Fri, 12 Aug 2016 11:44:23 +0100 Subject: [PATCH 0690/1238] provier/azurerm: update Azure SDK to 3.2 Beta Based on SDK: bfc5b4af08f3d3745d908af36b7ed5b9060f0258 | api | version | note | |:----------------------------|:--------------------|:----------| | arm/mediaservices | 2015-10-01 | new | | arm/keyvault | 2015-06-01 | new | | arm/iothub | 2016-02-03 | new | | arm/datalake-store | 2015-12-01 | new | | arm/network | 2016-06-01 | updated | | arm/resources/resources | 2016-07-01 | updated | | arm/resources/policy | 2016-04-01 | updated | | arm/servicebus | 2015-08-01 | updated | - arm: uses go-autorest version v7.1.0. - storage: fix for operating on blobs names containing special characters. - storage: add SetBlobProperties(), update BlobProperties response fields. - storage: make storage client work correctly with read-only secondary account. - storage: add Azure Storage Emulator support. --- .../github.com/Azure/azure-sdk-for-go/LICENSE | 2 +- .../Azure/azure-sdk-for-go/arm/cdn/client.go | 7 +- .../azure-sdk-for-go/arm/cdn/customdomains.go | 8 +- .../azure-sdk-for-go/arm/cdn/endpoints.go | 11 +- .../arm/cdn/nameavailability.go | 8 +- .../azure-sdk-for-go/arm/cdn/operations.go | 8 +- .../Azure/azure-sdk-for-go/arm/cdn/origins.go | 7 +- .../azure-sdk-for-go/arm/cdn/profiles.go | 8 +- .../Azure/azure-sdk-for-go/arm/cdn/version.go | 2 +- .../arm/compute/availabilitysets.go | 8 +- .../azure-sdk-for-go/arm/compute/client.go | 7 +- .../azure-sdk-for-go/arm/compute/models.go | 16 +- .../arm/compute/usageoperations.go | 8 +- .../azure-sdk-for-go/arm/compute/version.go | 2 +- .../compute/virtualmachineextensionimages.go | 8 +- .../arm/compute/virtualmachineextensions.go | 8 +- .../arm/compute/virtualmachineimages.go | 8 +- .../arm/compute/virtualmachines.go | 32 +- .../arm/compute/virtualmachinescalesets.go | 8 +- .../arm/compute/virtualmachinescalesetvms.go | 20 +- .../arm/compute/virtualmachinesizes.go | 8 +- .../arm/network/applicationgateways.go | 12 +- .../azure-sdk-for-go/arm/network/client.go | 11 +- .../expressroutecircuitauthorizations.go | 8 +- .../network/expressroutecircuitpeerings.go | 8 +- .../arm/network/expressroutecircuits.go | 24 +- .../network/expressrouteserviceproviders.go | 10 +- .../arm/network/interfaces.go | 148 +++++++- .../arm/network/loadbalancers.go | 14 +- .../arm/network/localnetworkgateways.go | 12 +- .../azure-sdk-for-go/arm/network/models.go | 278 ++++++++++++-- .../arm/network/publicipaddresses.go | 12 +- .../azure-sdk-for-go/arm/network/routes.go | 9 +- .../arm/network/routetables.go | 8 +- .../arm/network/securitygroups.go | 8 +- .../arm/network/securityrules.go | 10 +- .../azure-sdk-for-go/arm/network/subnets.go | 9 +- .../azure-sdk-for-go/arm/network/usages.go | 7 +- .../azure-sdk-for-go/arm/network/version.go | 4 +- .../virtualnetworkgatewayconnections.go | 8 +- .../arm/network/virtualnetworkgateways.go | 10 +- .../arm/network/virtualnetworkpeerings.go | 342 ++++++++++++++++++ .../arm/network/virtualnetworks.go | 76 +++- .../arm/resources/resources/client.go | 25 +- .../resources/deploymentoperations.go | 8 +- .../arm/resources/resources/deployments.go | 8 +- .../arm/resources/resources/groups.go | 21 +- .../arm/resources/resources/models.go | 23 +- .../arm/resources/resources/providers.go | 34 +- .../arm/resources/resources/resources.go | 21 +- .../arm/resources/resources/tags.go | 7 +- .../arm/resources/resources/version.go | 4 +- .../azure-sdk-for-go/arm/scheduler/client.go | 7 +- .../arm/scheduler/jobcollections.go | 8 +- .../azure-sdk-for-go/arm/scheduler/jobs.go | 7 +- .../azure-sdk-for-go/arm/scheduler/version.go | 2 +- .../azure-sdk-for-go/arm/storage/accounts.go | 11 +- .../azure-sdk-for-go/arm/storage/client.go | 7 +- .../arm/storage/usageoperations.go | 8 +- .../azure-sdk-for-go/arm/storage/version.go | 2 +- .../arm/trafficmanager/client.go | 7 +- .../arm/trafficmanager/endpoints.go | 8 +- .../arm/trafficmanager/models.go | 4 +- .../arm/trafficmanager/profiles.go | 8 +- .../arm/trafficmanager/version.go | 2 +- .../management/vmutils/vmutils.go | 33 +- .../Azure/azure-sdk-for-go/storage/blob.go | 59 ++- .../Azure/azure-sdk-for-go/storage/client.go | 61 +++- .../Azure/azure-sdk-for-go/storage/file.go | 15 + .../Azure/azure-sdk-for-go/storage/util.go | 14 + vendor/vendor.json | 110 +++--- 71 files changed, 1508 insertions(+), 238 deletions(-) create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkpeerings.go diff --git a/vendor/github.com/Azure/azure-sdk-for-go/LICENSE b/vendor/github.com/Azure/azure-sdk-for-go/LICENSE index d64569567..af39a91e7 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/LICENSE +++ b/vendor/github.com/Azure/azure-sdk-for-go/LICENSE @@ -187,7 +187,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2016 Microsoft Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/client.go index 430ed0667..b548b79f1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/client.go @@ -46,9 +46,14 @@ type ManagementClient struct { // New creates an instance of the ManagementClient client. func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { return ManagementClient{ Client: autorest.NewClientWithUserAgent(UserAgent()), - BaseURI: DefaultBaseURI, + BaseURI: baseURI, APIVersion: APIVersion, SubscriptionID: subscriptionID, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/customdomains.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/customdomains.go index 981974f16..dc8fd15a6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/customdomains.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/customdomains.go @@ -36,7 +36,13 @@ type CustomDomainsClient struct { // NewCustomDomainsClient creates an instance of the CustomDomainsClient // client. func NewCustomDomainsClient(subscriptionID string) CustomDomainsClient { - return CustomDomainsClient{New(subscriptionID)} + return NewCustomDomainsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewCustomDomainsClientWithBaseURI creates an instance of the +// CustomDomainsClient client. +func NewCustomDomainsClientWithBaseURI(baseURI string, subscriptionID string) CustomDomainsClient { + return CustomDomainsClient{NewWithBaseURI(baseURI, subscriptionID)} } // Create sends the create request. This method may poll for completion. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/endpoints.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/endpoints.go index 42c674ad4..3df91e188 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/endpoints.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/endpoints.go @@ -19,10 +19,9 @@ package cdn // regenerated. import ( - "net/http" - "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" + "net/http" ) // EndpointsClient is the use these APIs to manage Azure CDN resources through @@ -36,7 +35,13 @@ type EndpointsClient struct { // NewEndpointsClient creates an instance of the EndpointsClient client. func NewEndpointsClient(subscriptionID string) EndpointsClient { - return EndpointsClient{New(subscriptionID)} + return NewEndpointsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewEndpointsClientWithBaseURI creates an instance of the EndpointsClient +// client. +func NewEndpointsClientWithBaseURI(baseURI string, subscriptionID string) EndpointsClient { + return EndpointsClient{NewWithBaseURI(baseURI, subscriptionID)} } // Create sends the create request. This method may poll for completion. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/nameavailability.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/nameavailability.go index ffa1379e9..3b6a69827 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/nameavailability.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/nameavailability.go @@ -36,7 +36,13 @@ type NameAvailabilityClient struct { // NewNameAvailabilityClient creates an instance of the NameAvailabilityClient // client. func NewNameAvailabilityClient(subscriptionID string) NameAvailabilityClient { - return NameAvailabilityClient{New(subscriptionID)} + return NewNameAvailabilityClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewNameAvailabilityClientWithBaseURI creates an instance of the +// NameAvailabilityClient client. +func NewNameAvailabilityClientWithBaseURI(baseURI string, subscriptionID string) NameAvailabilityClient { + return NameAvailabilityClient{NewWithBaseURI(baseURI, subscriptionID)} } // CheckNameAvailability sends the check name availability request. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/operations.go index e0ac7919e..961d0aa78 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/operations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/operations.go @@ -35,7 +35,13 @@ type OperationsClient struct { // NewOperationsClient creates an instance of the OperationsClient client. func NewOperationsClient(subscriptionID string) OperationsClient { - return OperationsClient{New(subscriptionID)} + return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewOperationsClientWithBaseURI creates an instance of the OperationsClient +// client. +func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient { + return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)} } // List sends the list request. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/origins.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/origins.go index e91badc3c..4e22f1004 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/origins.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/origins.go @@ -35,7 +35,12 @@ type OriginsClient struct { // NewOriginsClient creates an instance of the OriginsClient client. func NewOriginsClient(subscriptionID string) OriginsClient { - return OriginsClient{New(subscriptionID)} + return NewOriginsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewOriginsClientWithBaseURI creates an instance of the OriginsClient client. +func NewOriginsClientWithBaseURI(baseURI string, subscriptionID string) OriginsClient { + return OriginsClient{NewWithBaseURI(baseURI, subscriptionID)} } // Create sends the create request. This method may poll for completion. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/profiles.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/profiles.go index fd063445f..17ca1a0ae 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/profiles.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/profiles.go @@ -35,7 +35,13 @@ type ProfilesClient struct { // NewProfilesClient creates an instance of the ProfilesClient client. func NewProfilesClient(subscriptionID string) ProfilesClient { - return ProfilesClient{New(subscriptionID)} + return NewProfilesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewProfilesClientWithBaseURI creates an instance of the ProfilesClient +// client. +func NewProfilesClientWithBaseURI(baseURI string, subscriptionID string) ProfilesClient { + return ProfilesClient{NewWithBaseURI(baseURI, subscriptionID)} } // Create sends the create request. This method may poll for completion. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/version.go index a03ced7ff..bb48eaaa1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/cdn/version.go @@ -24,7 +24,7 @@ import ( const ( major = "3" - minor = "0" + minor = "2" patch = "0" // Always begin a "tag" with a dash (as per http://semver.org) tag = "-beta" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go index d60f15738..013a6af9a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/availabilitysets.go @@ -32,7 +32,13 @@ type AvailabilitySetsClient struct { // NewAvailabilitySetsClient creates an instance of the AvailabilitySetsClient // client. func NewAvailabilitySetsClient(subscriptionID string) AvailabilitySetsClient { - return AvailabilitySetsClient{New(subscriptionID)} + return NewAvailabilitySetsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewAvailabilitySetsClientWithBaseURI creates an instance of the +// AvailabilitySetsClient client. +func NewAvailabilitySetsClientWithBaseURI(baseURI string, subscriptionID string) AvailabilitySetsClient { + return AvailabilitySetsClient{NewWithBaseURI(baseURI, subscriptionID)} } // CreateOrUpdate the operation to create or update the availability set. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go index 64ebfa410..e8f3fb3e6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/client.go @@ -44,9 +44,14 @@ type ManagementClient struct { // New creates an instance of the ManagementClient client. func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { return ManagementClient{ Client: autorest.NewClientWithUserAgent(UserAgent()), - BaseURI: DefaultBaseURI, + BaseURI: baseURI, APIVersion: APIVersion, SubscriptionID: subscriptionID, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/models.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/models.go index 3858f6e57..e38a3a122 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/models.go @@ -333,10 +333,10 @@ const ( StandardGS5 VirtualMachineSizeTypes = "Standard_GS5" ) -// AdditionalUnattendContent is gets or sets additional XML formatted -// information that can be included in the Unattend.xml file, which is used -// by Windows Setup. Contents are defined by setting name, component name, -// and the pass in which the content is a applied. +// AdditionalUnattendContent is additional XML formatted information that can +// be included in the Unattend.xml file, which is used by Windows Setup. +// Contents are defined by setting name, component name, and the pass in +// which the content is a applied. type AdditionalUnattendContent struct { PassName PassNames `json:"passName,omitempty"` ComponentName ComponentNames `json:"componentName,omitempty"` @@ -487,7 +487,7 @@ type LinuxConfiguration struct { type ListUsagesResult struct { autorest.Response `json:"-"` Value *[]Usage `json:"value,omitempty"` - NextLink *string `json:",omitempty"` + NextLink *string `json:"nextLink,omitempty"` } // ListUsagesResultPreparer prepares a request to retrieve the next set of results. It returns @@ -910,7 +910,7 @@ type VirtualMachineScaleSetIPConfigurationProperties struct { type VirtualMachineScaleSetListResult struct { autorest.Response `json:"-"` Value *[]VirtualMachineScaleSet `json:"value,omitempty"` - NextLink *string `json:",omitempty"` + NextLink *string `json:"nextLink,omitempty"` } // VirtualMachineScaleSetListResultPreparer prepares a request to retrieve the next set of results. It returns @@ -930,7 +930,7 @@ func (client VirtualMachineScaleSetListResult) VirtualMachineScaleSetListResultP type VirtualMachineScaleSetListSkusResult struct { autorest.Response `json:"-"` Value *[]VirtualMachineScaleSetSku `json:"value,omitempty"` - NextLink *string `json:",omitempty"` + NextLink *string `json:"nextLink,omitempty"` } // VirtualMachineScaleSetListSkusResultPreparer prepares a request to retrieve the next set of results. It returns @@ -1095,7 +1095,7 @@ type VirtualMachineScaleSetVMInstanceView struct { type VirtualMachineScaleSetVMListResult struct { autorest.Response `json:"-"` Value *[]VirtualMachineScaleSetVM `json:"value,omitempty"` - NextLink *string `json:",omitempty"` + NextLink *string `json:"nextLink,omitempty"` } // VirtualMachineScaleSetVMListResultPreparer prepares a request to retrieve the next set of results. It returns diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usageoperations.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usageoperations.go index e54d8251d..922ae1d98 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usageoperations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/usageoperations.go @@ -32,7 +32,13 @@ type UsageOperationsClient struct { // NewUsageOperationsClient creates an instance of the UsageOperationsClient // client. func NewUsageOperationsClient(subscriptionID string) UsageOperationsClient { - return UsageOperationsClient{New(subscriptionID)} + return NewUsageOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewUsageOperationsClientWithBaseURI creates an instance of the +// UsageOperationsClient client. +func NewUsageOperationsClientWithBaseURI(baseURI string, subscriptionID string) UsageOperationsClient { + return UsageOperationsClient{NewWithBaseURI(baseURI, subscriptionID)} } // List lists compute usages for a subscription. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go index b107a0b64..dffee825d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/version.go @@ -24,7 +24,7 @@ import ( const ( major = "3" - minor = "0" + minor = "2" patch = "0" // Always begin a "tag" with a dash (as per http://semver.org) tag = "-beta" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go index 3438a2555..089ebe10e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensionimages.go @@ -32,7 +32,13 @@ type VirtualMachineExtensionImagesClient struct { // NewVirtualMachineExtensionImagesClient creates an instance of the // VirtualMachineExtensionImagesClient client. func NewVirtualMachineExtensionImagesClient(subscriptionID string) VirtualMachineExtensionImagesClient { - return VirtualMachineExtensionImagesClient{New(subscriptionID)} + return NewVirtualMachineExtensionImagesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineExtensionImagesClientWithBaseURI creates an instance of +// the VirtualMachineExtensionImagesClient client. +func NewVirtualMachineExtensionImagesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineExtensionImagesClient { + return VirtualMachineExtensionImagesClient{NewWithBaseURI(baseURI, subscriptionID)} } // Get gets a virtual machine extension image. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go index a2d43b3c5..826687b04 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineextensions.go @@ -32,7 +32,13 @@ type VirtualMachineExtensionsClient struct { // NewVirtualMachineExtensionsClient creates an instance of the // VirtualMachineExtensionsClient client. func NewVirtualMachineExtensionsClient(subscriptionID string) VirtualMachineExtensionsClient { - return VirtualMachineExtensionsClient{New(subscriptionID)} + return NewVirtualMachineExtensionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineExtensionsClientWithBaseURI creates an instance of the +// VirtualMachineExtensionsClient client. +func NewVirtualMachineExtensionsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineExtensionsClient { + return VirtualMachineExtensionsClient{NewWithBaseURI(baseURI, subscriptionID)} } // CreateOrUpdate the operation to create or update the extension. This method diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go index 5dba9ca7d..50d961460 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachineimages.go @@ -32,7 +32,13 @@ type VirtualMachineImagesClient struct { // NewVirtualMachineImagesClient creates an instance of the // VirtualMachineImagesClient client. func NewVirtualMachineImagesClient(subscriptionID string) VirtualMachineImagesClient { - return VirtualMachineImagesClient{New(subscriptionID)} + return NewVirtualMachineImagesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineImagesClientWithBaseURI creates an instance of the +// VirtualMachineImagesClient client. +func NewVirtualMachineImagesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineImagesClient { + return VirtualMachineImagesClient{NewWithBaseURI(baseURI, subscriptionID)} } // Get gets a virtual machine image. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go index 598eae4f5..a55c845de 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachines.go @@ -32,7 +32,13 @@ type VirtualMachinesClient struct { // NewVirtualMachinesClient creates an instance of the VirtualMachinesClient // client. func NewVirtualMachinesClient(subscriptionID string) VirtualMachinesClient { - return VirtualMachinesClient{New(subscriptionID)} + return NewVirtualMachinesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachinesClientWithBaseURI creates an instance of the +// VirtualMachinesClient client. +func NewVirtualMachinesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachinesClient { + return VirtualMachinesClient{NewWithBaseURI(baseURI, subscriptionID)} } // Capture captures the VM by copying virtual hard disks of the VM and outputs @@ -501,6 +507,30 @@ func (client VirtualMachinesClient) ListResponder(resp *http.Response) (result V return } +// ListNextResults retrieves the next set of results, if any. +func (client VirtualMachinesClient) ListNextResults(lastResults VirtualMachineListResult) (result VirtualMachineListResult, err error) { + req, err := lastResults.VirtualMachineListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "compute.VirtualMachinesClient", "List", resp, "Failure responding to next results request request") + } + + return +} + // ListAll gets the list of Virtual Machines in the subscription. Use nextLink // property in the response to get the next page of Virtual Machines. Do this // till nextLink is not null to fetch all the Virtual Machines. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go index dd16405fa..649e008ee 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesets.go @@ -32,7 +32,13 @@ type VirtualMachineScaleSetsClient struct { // NewVirtualMachineScaleSetsClient creates an instance of the // VirtualMachineScaleSetsClient client. func NewVirtualMachineScaleSetsClient(subscriptionID string) VirtualMachineScaleSetsClient { - return VirtualMachineScaleSetsClient{New(subscriptionID)} + return NewVirtualMachineScaleSetsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineScaleSetsClientWithBaseURI creates an instance of the +// VirtualMachineScaleSetsClient client. +func NewVirtualMachineScaleSetsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineScaleSetsClient { + return VirtualMachineScaleSetsClient{NewWithBaseURI(baseURI, subscriptionID)} } // CreateOrUpdate allows you to create or update a virtual machine scale set diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go index 436e08044..1db4a1360 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinescalesetvms.go @@ -32,15 +32,21 @@ type VirtualMachineScaleSetVMsClient struct { // NewVirtualMachineScaleSetVMsClient creates an instance of the // VirtualMachineScaleSetVMsClient client. func NewVirtualMachineScaleSetVMsClient(subscriptionID string) VirtualMachineScaleSetVMsClient { - return VirtualMachineScaleSetVMsClient{New(subscriptionID)} + return NewVirtualMachineScaleSetVMsClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// Deallocate allows you to deallocate a virtual machine virtual machine scale -// set.Shuts down the virtual machine and releases the compute resources. You -// are not billed for the compute resources that this virtual machine uses. -// This method may poll for completion. Polling can be canceled by passing -// the cancel channel argument. The channel will be used to cancel polling -// and any outstanding HTTP requests. +// NewVirtualMachineScaleSetVMsClientWithBaseURI creates an instance of the +// VirtualMachineScaleSetVMsClient client. +func NewVirtualMachineScaleSetVMsClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineScaleSetVMsClient { + return VirtualMachineScaleSetVMsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Deallocate allows you to deallocate a virtual machine scale set virtual +// machine. Shuts down the virtual machine and releases the compute +// resources. You are not billed for the compute resources that this virtual +// machine uses. This method may poll for completion. Polling can be canceled +// by passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. // // resourceGroupName is the name of the resource group. vmScaleSetName is the // name of the virtual machine scale set. instanceID is the instance id of diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go index 39ee07b0b..3cdeecedb 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/compute/virtualmachinesizes.go @@ -32,7 +32,13 @@ type VirtualMachineSizesClient struct { // NewVirtualMachineSizesClient creates an instance of the // VirtualMachineSizesClient client. func NewVirtualMachineSizesClient(subscriptionID string) VirtualMachineSizesClient { - return VirtualMachineSizesClient{New(subscriptionID)} + return NewVirtualMachineSizesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualMachineSizesClientWithBaseURI creates an instance of the +// VirtualMachineSizesClient client. +func NewVirtualMachineSizesClientWithBaseURI(baseURI string, subscriptionID string) VirtualMachineSizesClient { + return VirtualMachineSizesClient{NewWithBaseURI(baseURI, subscriptionID)} } // List lists all available virtual machine sizes for a subscription in a diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go index b1a128d7f..3c481c5df 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/applicationgateways.go @@ -36,7 +36,13 @@ type ApplicationGatewaysClient struct { // NewApplicationGatewaysClient creates an instance of the // ApplicationGatewaysClient client. func NewApplicationGatewaysClient(subscriptionID string) ApplicationGatewaysClient { - return ApplicationGatewaysClient{New(subscriptionID)} + return NewApplicationGatewaysClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewApplicationGatewaysClientWithBaseURI creates an instance of the +// ApplicationGatewaysClient client. +func NewApplicationGatewaysClientWithBaseURI(baseURI string, subscriptionID string) ApplicationGatewaysClient { + return ApplicationGatewaysClient{NewWithBaseURI(baseURI, subscriptionID)} } // CreateOrUpdate the Put ApplicationGateway operation creates/updates a @@ -240,7 +246,7 @@ func (client ApplicationGatewaysClient) GetResponder(resp *http.Response) (resul return } -// List the List ApplicationGateway opertion retrieves all the +// List the List ApplicationGateway operation retrieves all the // applicationgateways in a resource group. // // resourceGroupName is the name of the resource group. @@ -326,7 +332,7 @@ func (client ApplicationGatewaysClient) ListNextResults(lastResults ApplicationG return } -// ListAll the List applicationgateway opertion retrieves all the +// ListAll the List applicationgateway operation retrieves all the // applicationgateways in a subscription. func (client ApplicationGatewaysClient) ListAll() (result ApplicationGatewayListResult, err error) { req, err := client.ListAllPreparer() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go index 04fdf7285..6a5e007a2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/client.go @@ -1,5 +1,5 @@ // Package network implements the Azure ARM Network service API version -// 2016-03-30. +// 2016-06-01. // // The Microsoft Azure Network management API provides a RESTful set of web // services that interact with Microsoft Azure Networks service to manage @@ -33,7 +33,7 @@ import ( const ( // APIVersion is the version of the Network - APIVersion = "2016-03-30" + APIVersion = "2016-06-01" // DefaultBaseURI is the default URI used for the service Network DefaultBaseURI = "https://management.azure.com" @@ -49,9 +49,14 @@ type ManagementClient struct { // New creates an instance of the ManagementClient client. func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { return ManagementClient{ Client: autorest.NewClientWithUserAgent(UserAgent()), - BaseURI: DefaultBaseURI, + BaseURI: baseURI, APIVersion: APIVersion, SubscriptionID: subscriptionID, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go index f097126a8..df5e8792d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitauthorizations.go @@ -36,7 +36,13 @@ type ExpressRouteCircuitAuthorizationsClient struct { // NewExpressRouteCircuitAuthorizationsClient creates an instance of the // ExpressRouteCircuitAuthorizationsClient client. func NewExpressRouteCircuitAuthorizationsClient(subscriptionID string) ExpressRouteCircuitAuthorizationsClient { - return ExpressRouteCircuitAuthorizationsClient{New(subscriptionID)} + return NewExpressRouteCircuitAuthorizationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewExpressRouteCircuitAuthorizationsClientWithBaseURI creates an instance +// of the ExpressRouteCircuitAuthorizationsClient client. +func NewExpressRouteCircuitAuthorizationsClientWithBaseURI(baseURI string, subscriptionID string) ExpressRouteCircuitAuthorizationsClient { + return ExpressRouteCircuitAuthorizationsClient{NewWithBaseURI(baseURI, subscriptionID)} } // CreateOrUpdate the Put Authorization operation creates/updates an diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go index b78544fa4..28c093506 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuitpeerings.go @@ -36,7 +36,13 @@ type ExpressRouteCircuitPeeringsClient struct { // NewExpressRouteCircuitPeeringsClient creates an instance of the // ExpressRouteCircuitPeeringsClient client. func NewExpressRouteCircuitPeeringsClient(subscriptionID string) ExpressRouteCircuitPeeringsClient { - return ExpressRouteCircuitPeeringsClient{New(subscriptionID)} + return NewExpressRouteCircuitPeeringsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewExpressRouteCircuitPeeringsClientWithBaseURI creates an instance of the +// ExpressRouteCircuitPeeringsClient client. +func NewExpressRouteCircuitPeeringsClientWithBaseURI(baseURI string, subscriptionID string) ExpressRouteCircuitPeeringsClient { + return ExpressRouteCircuitPeeringsClient{NewWithBaseURI(baseURI, subscriptionID)} } // CreateOrUpdate the Put Pering operation creates/updates an peering in the diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go index fa7bbec25..979bee99e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressroutecircuits.go @@ -36,7 +36,13 @@ type ExpressRouteCircuitsClient struct { // NewExpressRouteCircuitsClient creates an instance of the // ExpressRouteCircuitsClient client. func NewExpressRouteCircuitsClient(subscriptionID string) ExpressRouteCircuitsClient { - return ExpressRouteCircuitsClient{New(subscriptionID)} + return NewExpressRouteCircuitsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewExpressRouteCircuitsClientWithBaseURI creates an instance of the +// ExpressRouteCircuitsClient client. +func NewExpressRouteCircuitsClientWithBaseURI(baseURI string, subscriptionID string) ExpressRouteCircuitsClient { + return ExpressRouteCircuitsClient{NewWithBaseURI(baseURI, subscriptionID)} } // CreateOrUpdate the Put ExpressRouteCircuit operation creates/updates a @@ -240,7 +246,7 @@ func (client ExpressRouteCircuitsClient) GetResponder(resp *http.Response) (resu return } -// GetPeeringStats the Liststats ExpressRouteCircuit opertion retrieves all +// GetPeeringStats the Liststats ExpressRouteCircuit operation retrieves all // the stats from a ExpressRouteCircuits in a resource group. // // resourceGroupName is the name of the resource group. circuitName is the @@ -305,8 +311,8 @@ func (client ExpressRouteCircuitsClient) GetPeeringStatsResponder(resp *http.Res return } -// GetStats the Liststats ExpressRouteCircuit opertion retrieves all the stats -// from a ExpressRouteCircuits in a resource group. +// GetStats the Liststats ExpressRouteCircuit operation retrieves all the +// stats from a ExpressRouteCircuits in a resource group. // // resourceGroupName is the name of the resource group. circuitName is the // name of the circuit. @@ -369,7 +375,7 @@ func (client ExpressRouteCircuitsClient) GetStatsResponder(resp *http.Response) return } -// List the List ExpressRouteCircuit opertion retrieves all the +// List the List ExpressRouteCircuit operation retrieves all the // ExpressRouteCircuits in a resource group. // // resourceGroupName is the name of the resource group. @@ -455,7 +461,7 @@ func (client ExpressRouteCircuitsClient) ListNextResults(lastResults ExpressRout return } -// ListAll the List ExpressRouteCircuit opertion retrieves all the +// ListAll the List ExpressRouteCircuit operation retrieves all the // ExpressRouteCircuits in a subscription. func (client ExpressRouteCircuitsClient) ListAll() (result ExpressRouteCircuitListResult, err error) { req, err := client.ListAllPreparer() @@ -538,7 +544,7 @@ func (client ExpressRouteCircuitsClient) ListAllNextResults(lastResults ExpressR return } -// ListArpTable the ListArpTable from ExpressRouteCircuit opertion retrieves +// ListArpTable the ListArpTable from ExpressRouteCircuit operation retrieves // the currently advertised arp table associated with the // ExpressRouteCircuits in a resource group. This method may poll for // completion. Polling can be canceled by passing the cancel channel @@ -610,7 +616,7 @@ func (client ExpressRouteCircuitsClient) ListArpTableResponder(resp *http.Respon return } -// ListRoutesTable the ListRoutesTable from ExpressRouteCircuit opertion +// ListRoutesTable the ListRoutesTable from ExpressRouteCircuit operation // retrieves the currently advertised routes table associated with the // ExpressRouteCircuits in a resource group. This method may poll for // completion. Polling can be canceled by passing the cancel channel @@ -683,7 +689,7 @@ func (client ExpressRouteCircuitsClient) ListRoutesTableResponder(resp *http.Res } // ListRoutesTableSummary the ListRoutesTable from ExpressRouteCircuit -// opertion retrieves the currently advertised routes table associated with +// operation retrieves the currently advertised routes table associated with // the ExpressRouteCircuits in a resource group. This method may poll for // completion. Polling can be canceled by passing the cancel channel // argument. The channel will be used to cancel polling and any outstanding diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go index 2f1452600..4943835d0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/expressrouteserviceproviders.go @@ -36,10 +36,16 @@ type ExpressRouteServiceProvidersClient struct { // NewExpressRouteServiceProvidersClient creates an instance of the // ExpressRouteServiceProvidersClient client. func NewExpressRouteServiceProvidersClient(subscriptionID string) ExpressRouteServiceProvidersClient { - return ExpressRouteServiceProvidersClient{New(subscriptionID)} + return NewExpressRouteServiceProvidersClientWithBaseURI(DefaultBaseURI, subscriptionID) } -// List the List ExpressRouteServiceProvider opertion retrieves all the +// NewExpressRouteServiceProvidersClientWithBaseURI creates an instance of the +// ExpressRouteServiceProvidersClient client. +func NewExpressRouteServiceProvidersClientWithBaseURI(baseURI string, subscriptionID string) ExpressRouteServiceProvidersClient { + return ExpressRouteServiceProvidersClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List the List ExpressRouteServiceProvider operation retrieves all the // available ExpressRouteServiceProviders. func (client ExpressRouteServiceProvidersClient) List() (result ExpressRouteServiceProviderListResult, err error) { req, err := client.ListPreparer() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go index 59f3b6c34..d198f37f4 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/interfaces.go @@ -35,7 +35,13 @@ type InterfacesClient struct { // NewInterfacesClient creates an instance of the InterfacesClient client. func NewInterfacesClient(subscriptionID string) InterfacesClient { - return InterfacesClient{New(subscriptionID)} + return NewInterfacesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewInterfacesClientWithBaseURI creates an instance of the InterfacesClient +// client. +func NewInterfacesClientWithBaseURI(baseURI string, subscriptionID string) InterfacesClient { + return InterfacesClient{NewWithBaseURI(baseURI, subscriptionID)} } // CreateOrUpdate the Put NetworkInterface operation creates/updates a @@ -243,6 +249,74 @@ func (client InterfacesClient) GetResponder(resp *http.Response) (result Interfa return } +// GetEffectiveRouteTable the get effective routetable operation retrieves all +// the route tables applied on a networkInterface. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. networkInterfaceName +// is the name of the network interface. +func (client InterfacesClient) GetEffectiveRouteTable(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.GetEffectiveRouteTablePreparer(resourceGroupName, networkInterfaceName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "GetEffectiveRouteTable", nil, "Failure preparing request") + } + + resp, err := client.GetEffectiveRouteTableSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "GetEffectiveRouteTable", resp, "Failure sending request") + } + + result, err = client.GetEffectiveRouteTableResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "GetEffectiveRouteTable", resp, "Failure responding to request") + } + + return +} + +// GetEffectiveRouteTablePreparer prepares the GetEffectiveRouteTable request. +func (client InterfacesClient) GetEffectiveRouteTablePreparer(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkInterfaceName": autorest.Encode("path", networkInterfaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveRouteTable", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// GetEffectiveRouteTableSender sends the GetEffectiveRouteTable request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) GetEffectiveRouteTableSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// GetEffectiveRouteTableResponder handles the response to the GetEffectiveRouteTable request. The method always +// closes the http.Response Body. +func (client InterfacesClient) GetEffectiveRouteTableResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + // GetVirtualMachineScaleSetNetworkInterface the Get ntework interface // operation retreives information about the specified network interface in a // virtual machine scale set. @@ -315,7 +389,7 @@ func (client InterfacesClient) GetVirtualMachineScaleSetNetworkInterfaceResponde return } -// List the List networkInterfaces opertion retrieves all the +// List the List networkInterfaces operation retrieves all the // networkInterfaces in a resource group. // // resourceGroupName is the name of the resource group. @@ -401,7 +475,7 @@ func (client InterfacesClient) ListNextResults(lastResults InterfaceListResult) return } -// ListAll the List networkInterfaces opertion retrieves all the +// ListAll the List networkInterfaces operation retrieves all the // networkInterfaces in a subscription. func (client InterfacesClient) ListAll() (result InterfaceListResult, err error) { req, err := client.ListAllPreparer() @@ -484,6 +558,74 @@ func (client InterfacesClient) ListAllNextResults(lastResults InterfaceListResul return } +// ListEffectiveNetworkSecurityGroups the list effective network security +// group operation retrieves all the network security groups applied on a +// networkInterface. This method may poll for completion. Polling can be +// canceled by passing the cancel channel argument. The channel will be used +// to cancel polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. networkInterfaceName +// is the name of the network interface. +func (client InterfacesClient) ListEffectiveNetworkSecurityGroups(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.ListEffectiveNetworkSecurityGroupsPreparer(resourceGroupName, networkInterfaceName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListEffectiveNetworkSecurityGroups", nil, "Failure preparing request") + } + + resp, err := client.ListEffectiveNetworkSecurityGroupsSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.InterfacesClient", "ListEffectiveNetworkSecurityGroups", resp, "Failure sending request") + } + + result, err = client.ListEffectiveNetworkSecurityGroupsResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.InterfacesClient", "ListEffectiveNetworkSecurityGroups", resp, "Failure responding to request") + } + + return +} + +// ListEffectiveNetworkSecurityGroupsPreparer prepares the ListEffectiveNetworkSecurityGroups request. +func (client InterfacesClient) ListEffectiveNetworkSecurityGroupsPreparer(resourceGroupName string, networkInterfaceName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "networkInterfaceName": autorest.Encode("path", networkInterfaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}/effectiveNetworkSecurityGroups", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// ListEffectiveNetworkSecurityGroupsSender sends the ListEffectiveNetworkSecurityGroups request. The method will close the +// http.Response Body if it receives an error. +func (client InterfacesClient) ListEffectiveNetworkSecurityGroupsSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// ListEffectiveNetworkSecurityGroupsResponder handles the response to the ListEffectiveNetworkSecurityGroups request. The method always +// closes the http.Response Body. +func (client InterfacesClient) ListEffectiveNetworkSecurityGroupsResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + // ListVirtualMachineScaleSetNetworkInterfaces the list network interface // operation retrieves information about all network interfaces in a virtual // machine scale set. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go index 92fc83b88..80b99183c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/loadbalancers.go @@ -36,7 +36,13 @@ type LoadBalancersClient struct { // NewLoadBalancersClient creates an instance of the LoadBalancersClient // client. func NewLoadBalancersClient(subscriptionID string) LoadBalancersClient { - return LoadBalancersClient{New(subscriptionID)} + return NewLoadBalancersClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewLoadBalancersClientWithBaseURI creates an instance of the +// LoadBalancersClient client. +func NewLoadBalancersClientWithBaseURI(baseURI string, subscriptionID string) LoadBalancersClient { + return LoadBalancersClient{NewWithBaseURI(baseURI, subscriptionID)} } // CreateOrUpdate the Put LoadBalancer operation creates/updates a @@ -243,7 +249,7 @@ func (client LoadBalancersClient) GetResponder(resp *http.Response) (result Load return } -// List the List loadBalancer opertion retrieves all the loadbalancers in a +// List the List loadBalancer operation retrieves all the loadbalancers in a // resource group. // // resourceGroupName is the name of the resource group. @@ -329,8 +335,8 @@ func (client LoadBalancersClient) ListNextResults(lastResults LoadBalancerListRe return } -// ListAll the List loadBalancer opertion retrieves all the loadbalancers in a -// subscription. +// ListAll the List loadBalancer operation retrieves all the loadbalancers in +// a subscription. func (client LoadBalancersClient) ListAll() (result LoadBalancerListResult, err error) { req, err := client.ListAllPreparer() if err != nil { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go index 498cb416c..679e9b35c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/localnetworkgateways.go @@ -36,7 +36,13 @@ type LocalNetworkGatewaysClient struct { // NewLocalNetworkGatewaysClient creates an instance of the // LocalNetworkGatewaysClient client. func NewLocalNetworkGatewaysClient(subscriptionID string) LocalNetworkGatewaysClient { - return LocalNetworkGatewaysClient{New(subscriptionID)} + return NewLocalNetworkGatewaysClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewLocalNetworkGatewaysClientWithBaseURI creates an instance of the +// LocalNetworkGatewaysClient client. +func NewLocalNetworkGatewaysClientWithBaseURI(baseURI string, subscriptionID string) LocalNetworkGatewaysClient { + return LocalNetworkGatewaysClient{NewWithBaseURI(baseURI, subscriptionID)} } // CreateOrUpdate the Put LocalNetworkGateway operation creates/updates a @@ -243,8 +249,8 @@ func (client LocalNetworkGatewaysClient) GetResponder(resp *http.Response) (resu return } -// List the List LocalNetworkGateways opertion retrieves all the local network -// gateways stored. +// List the List LocalNetworkGateways operation retrieves all the local +// network gateways stored. // // resourceGroupName is the name of the resource group. func (client LocalNetworkGatewaysClient) List(resourceGroupName string) (result LocalNetworkGatewayListResult, err error) { diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/models.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/models.go index 79bfafc3a..c09c58112 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/models.go @@ -96,6 +96,22 @@ const ( StandardSmall ApplicationGatewaySkuName = "Standard_Small" ) +// ApplicationGatewaySslProtocol enumerates the values for application gateway +// ssl protocol. +type ApplicationGatewaySslProtocol string + +const ( + // TLSv10 specifies the tl sv 10 state for application gateway ssl + // protocol. + TLSv10 ApplicationGatewaySslProtocol = "TLSv1_0" + // TLSv11 specifies the tl sv 11 state for application gateway ssl + // protocol. + TLSv11 ApplicationGatewaySslProtocol = "TLSv1_1" + // TLSv12 specifies the tl sv 12 state for application gateway ssl + // protocol. + TLSv12 ApplicationGatewaySslProtocol = "TLSv1_2" +) + // ApplicationGatewayTier enumerates the values for application gateway tier. type ApplicationGatewayTier string @@ -114,6 +130,34 @@ const ( InUse AuthorizationUseStatus = "InUse" ) +// EffectiveRouteSource enumerates the values for effective route source. +type EffectiveRouteSource string + +const ( + // EffectiveRouteSourceDefault specifies the effective route source + // default state for effective route source. + EffectiveRouteSourceDefault EffectiveRouteSource = "Default" + // EffectiveRouteSourceUnknown specifies the effective route source + // unknown state for effective route source. + EffectiveRouteSourceUnknown EffectiveRouteSource = "Unknown" + // EffectiveRouteSourceUser specifies the effective route source user + // state for effective route source. + EffectiveRouteSourceUser EffectiveRouteSource = "User" + // EffectiveRouteSourceVirtualNetworkGateway specifies the effective route + // source virtual network gateway state for effective route source. + EffectiveRouteSourceVirtualNetworkGateway EffectiveRouteSource = "VirtualNetworkGateway" +) + +// EffectiveRouteState enumerates the values for effective route state. +type EffectiveRouteState string + +const ( + // Active specifies the active state for effective route state. + Active EffectiveRouteState = "Active" + // Invalid specifies the invalid state for effective route state. + Invalid EffectiveRouteState = "Invalid" +) + // ExpressRouteCircuitPeeringAdvertisedPublicPrefixState enumerates the values // for express route circuit peering advertised public prefix state. type ExpressRouteCircuitPeeringAdvertisedPublicPrefixState string @@ -426,6 +470,22 @@ const ( VirtualNetworkGatewayTypeVpn VirtualNetworkGatewayType = "Vpn" ) +// VirtualNetworkPeeringState enumerates the values for virtual network +// peering state. +type VirtualNetworkPeeringState string + +const ( + // VirtualNetworkPeeringStateConnected specifies the virtual network + // peering state connected state for virtual network peering state. + VirtualNetworkPeeringStateConnected VirtualNetworkPeeringState = "Connected" + // VirtualNetworkPeeringStateDisconnected specifies the virtual network + // peering state disconnected state for virtual network peering state. + VirtualNetworkPeeringStateDisconnected VirtualNetworkPeeringState = "Disconnected" + // VirtualNetworkPeeringStateInitiated specifies the virtual network + // peering state initiated state for virtual network peering state. + VirtualNetworkPeeringStateInitiated VirtualNetworkPeeringState = "Initiated" +) + // VpnType enumerates the values for vpn type. type VpnType string @@ -454,6 +514,22 @@ type ApplicationGateway struct { Etag *string `json:"etag,omitempty"` } +// ApplicationGatewayAuthenticationCertificate is authentication certificates +// of application gateway +type ApplicationGatewayAuthenticationCertificate struct { + ID *string `json:"id,omitempty"` + Properties *ApplicationGatewayAuthenticationCertificatePropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ApplicationGatewayAuthenticationCertificatePropertiesFormat is properties +// of Authentication certificates of application gateway +type ApplicationGatewayAuthenticationCertificatePropertiesFormat struct { + Data *string `json:"data,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + // ApplicationGatewayBackendAddress is backend Address of application gateway type ApplicationGatewayBackendAddress struct { Fqdn *string `json:"fqdn,omitempty"` @@ -489,12 +565,13 @@ type ApplicationGatewayBackendHTTPSettings struct { // ApplicationGatewayBackendHTTPSettingsPropertiesFormat is properties of // Backend address pool settings of application gateway type ApplicationGatewayBackendHTTPSettingsPropertiesFormat struct { - Port *int32 `json:"port,omitempty"` - Protocol ApplicationGatewayProtocol `json:"protocol,omitempty"` - CookieBasedAffinity ApplicationGatewayCookieBasedAffinity `json:"cookieBasedAffinity,omitempty"` - RequestTimeout *int32 `json:"requestTimeout,omitempty"` - Probe *SubResource `json:"probe,omitempty"` - ProvisioningState *string `json:"provisioningState,omitempty"` + Port *int32 `json:"port,omitempty"` + Protocol ApplicationGatewayProtocol `json:"protocol,omitempty"` + CookieBasedAffinity ApplicationGatewayCookieBasedAffinity `json:"cookieBasedAffinity,omitempty"` + RequestTimeout *int32 `json:"requestTimeout,omitempty"` + Probe *SubResource `json:"probe,omitempty"` + AuthenticationCertificates *[]SubResource `json:"authenticationCertificates,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` } // ApplicationGatewayFrontendIPConfiguration is frontend IP configuration of @@ -566,8 +643,8 @@ type ApplicationGatewayIPConfigurationPropertiesFormat struct { ProvisioningState *string `json:"provisioningState,omitempty"` } -// ApplicationGatewayListResult is response for ListLoadBalancers Api service -// call +// ApplicationGatewayListResult is response for ListApplicationGateways Api +// service call type ApplicationGatewayListResult struct { autorest.Response `json:"-"` Value *[]ApplicationGateway `json:"value,omitempty"` @@ -626,20 +703,22 @@ type ApplicationGatewayProbePropertiesFormat struct { // ApplicationGatewayPropertiesFormat is properties of Application Gateway type ApplicationGatewayPropertiesFormat struct { - Sku *ApplicationGatewaySku `json:"sku,omitempty"` - OperationalState ApplicationGatewayOperationalState `json:"operationalState,omitempty"` - GatewayIPConfigurations *[]ApplicationGatewayIPConfiguration `json:"gatewayIPConfigurations,omitempty"` - SslCertificates *[]ApplicationGatewaySslCertificate `json:"sslCertificates,omitempty"` - FrontendIPConfigurations *[]ApplicationGatewayFrontendIPConfiguration `json:"frontendIPConfigurations,omitempty"` - FrontendPorts *[]ApplicationGatewayFrontendPort `json:"frontendPorts,omitempty"` - Probes *[]ApplicationGatewayProbe `json:"probes,omitempty"` - BackendAddressPools *[]ApplicationGatewayBackendAddressPool `json:"backendAddressPools,omitempty"` - BackendHTTPSettingsCollection *[]ApplicationGatewayBackendHTTPSettings `json:"backendHttpSettingsCollection,omitempty"` - HTTPListeners *[]ApplicationGatewayHTTPListener `json:"httpListeners,omitempty"` - URLPathMaps *[]ApplicationGatewayURLPathMap `json:"urlPathMaps,omitempty"` - RequestRoutingRules *[]ApplicationGatewayRequestRoutingRule `json:"requestRoutingRules,omitempty"` - ResourceGUID *string `json:"resourceGuid,omitempty"` - ProvisioningState *string `json:"provisioningState,omitempty"` + Sku *ApplicationGatewaySku `json:"sku,omitempty"` + SslPolicy *ApplicationGatewaySslPolicy `json:"sslPolicy,omitempty"` + OperationalState ApplicationGatewayOperationalState `json:"operationalState,omitempty"` + GatewayIPConfigurations *[]ApplicationGatewayIPConfiguration `json:"gatewayIPConfigurations,omitempty"` + AuthenticationCertificates *[]ApplicationGatewayAuthenticationCertificate `json:"authenticationCertificates,omitempty"` + SslCertificates *[]ApplicationGatewaySslCertificate `json:"sslCertificates,omitempty"` + FrontendIPConfigurations *[]ApplicationGatewayFrontendIPConfiguration `json:"frontendIPConfigurations,omitempty"` + FrontendPorts *[]ApplicationGatewayFrontendPort `json:"frontendPorts,omitempty"` + Probes *[]ApplicationGatewayProbe `json:"probes,omitempty"` + BackendAddressPools *[]ApplicationGatewayBackendAddressPool `json:"backendAddressPools,omitempty"` + BackendHTTPSettingsCollection *[]ApplicationGatewayBackendHTTPSettings `json:"backendHttpSettingsCollection,omitempty"` + HTTPListeners *[]ApplicationGatewayHTTPListener `json:"httpListeners,omitempty"` + URLPathMaps *[]ApplicationGatewayURLPathMap `json:"urlPathMaps,omitempty"` + RequestRoutingRules *[]ApplicationGatewayRequestRoutingRule `json:"requestRoutingRules,omitempty"` + ResourceGUID *string `json:"resourceGuid,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` } // ApplicationGatewayRequestRoutingRule is request routing rule of application @@ -686,6 +765,11 @@ type ApplicationGatewaySslCertificatePropertiesFormat struct { ProvisioningState *string `json:"provisioningState,omitempty"` } +// ApplicationGatewaySslPolicy is application gateway SSL policy +type ApplicationGatewaySslPolicy struct { + DisabledSslProtocols *[]ApplicationGatewaySslProtocol `json:"disabledSslProtocols,omitempty"` +} + // ApplicationGatewayURLPathMap is urlPathMap of application gateway type ApplicationGatewayURLPathMap struct { ID *string `json:"id,omitempty"` @@ -694,7 +778,7 @@ type ApplicationGatewayURLPathMap struct { Etag *string `json:"etag,omitempty"` } -// ApplicationGatewayURLPathMapPropertiesFormat is properties of probe of +// ApplicationGatewayURLPathMapPropertiesFormat is properties of UrlPathMap of // application gateway type ApplicationGatewayURLPathMapPropertiesFormat struct { DefaultBackendAddressPool *SubResource `json:"defaultBackendAddressPool,omitempty"` @@ -799,6 +883,61 @@ type DNSNameAvailabilityResult struct { Available *bool `json:"available,omitempty"` } +// EffectiveNetworkSecurityGroup is effective NetworkSecurityGroup +type EffectiveNetworkSecurityGroup struct { + NetworkSecurityGroup *SubResource `json:"networkSecurityGroup,omitempty"` + Association *EffectiveNetworkSecurityGroupAssociation `json:"association,omitempty"` + EffectiveSecurityRules *[]EffectiveNetworkSecurityRule `json:"effectiveSecurityRules,omitempty"` +} + +// EffectiveNetworkSecurityGroupAssociation is effective NetworkSecurityGroup +// association +type EffectiveNetworkSecurityGroupAssociation struct { + Subnet *SubResource `json:"subnet,omitempty"` + NetworkInterface *SubResource `json:"networkInterface,omitempty"` +} + +// EffectiveNetworkSecurityGroupListResult is response for list effective +// network security groups api servive call +type EffectiveNetworkSecurityGroupListResult struct { + autorest.Response `json:"-"` + Value *[]EffectiveNetworkSecurityGroup `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// EffectiveNetworkSecurityRule is effective NetworkSecurityRules +type EffectiveNetworkSecurityRule struct { + Name *string `json:"name,omitempty"` + Protocol SecurityRuleProtocol `json:"protocol,omitempty"` + SourcePortRange *string `json:"sourcePortRange,omitempty"` + DestinationPortRange *string `json:"destinationPortRange,omitempty"` + SourceAddressPrefix *string `json:"sourceAddressPrefix,omitempty"` + DestinationAddressPrefix *string `json:"destinationAddressPrefix,omitempty"` + ExpandedSourceAddressPrefix *[]string `json:"expandedSourceAddressPrefix,omitempty"` + ExpandedDestinationAddressPrefix *[]string `json:"expandedDestinationAddressPrefix,omitempty"` + Access SecurityRuleAccess `json:"access,omitempty"` + Priority *int32 `json:"priority,omitempty"` + Direction SecurityRuleDirection `json:"direction,omitempty"` +} + +// EffectiveRoute is effective Route +type EffectiveRoute struct { + Name *string `json:"name,omitempty"` + Source EffectiveRouteSource `json:"source,omitempty"` + State EffectiveRouteState `json:"state,omitempty"` + AddressPrefix *[]string `json:"addressPrefix,omitempty"` + NextHopIPAddress *[]string `json:"nextHopIpAddress,omitempty"` + NextHopType RouteNextHopType `json:"nextHopType,omitempty"` +} + +// EffectiveRouteListResult is response for list effective route api servive +// call +type EffectiveRouteListResult struct { + autorest.Response `json:"-"` + Value *[]EffectiveRoute `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + // Error is type Error struct { Code *string `json:"code,omitempty"` @@ -919,6 +1058,8 @@ type ExpressRouteCircuitPeeringPropertiesFormat struct { MicrosoftPeeringConfig *ExpressRouteCircuitPeeringConfig `json:"microsoftPeeringConfig,omitempty"` Stats *ExpressRouteCircuitStats `json:"stats,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` + GatewayManagerEtag *string `json:"gatewayManagerEtag,omitempty"` + LastModifiedBy *string `json:"lastModifiedBy,omitempty"` } // ExpressRouteCircuitPropertiesFormat is properties of ExpressRouteCircuit @@ -932,6 +1073,7 @@ type ExpressRouteCircuitPropertiesFormat struct { ServiceProviderNotes *string `json:"serviceProviderNotes,omitempty"` ServiceProviderProperties *ExpressRouteCircuitServiceProviderProperties `json:"serviceProviderProperties,omitempty"` ProvisioningState *string `json:"provisioningState,omitempty"` + GatewayManagerEtag *string `json:"gatewayManagerEtag,omitempty"` } // ExpressRouteCircuitRoutesTable is the routes table associated with the @@ -1119,7 +1261,7 @@ type Interface struct { Etag *string `json:"etag,omitempty"` } -// InterfaceDNSSettings is dns Settings of a network interface +// InterfaceDNSSettings is dns settings of a network interface type InterfaceDNSSettings struct { DNSServers *[]string `json:"dnsServers,omitempty"` AppliedDNSServers *[]string `json:"appliedDnsServers,omitempty"` @@ -1182,6 +1324,14 @@ type InterfacePropertiesFormat struct { ProvisioningState *string `json:"provisioningState,omitempty"` } +// IPAddressAvailabilityResult is response for CheckIPAddressAvailability Api +// service call +type IPAddressAvailabilityResult struct { + autorest.Response `json:"-"` + Available *bool `json:"available,omitempty"` + AvailableIPAddresses *[]string `json:"availableIPAddresses,omitempty"` +} + // IPConfiguration is iPConfiguration type IPConfiguration struct { ID *string `json:"id,omitempty"` @@ -1402,6 +1552,21 @@ type Resource struct { Tags *map[string]*string `json:"tags,omitempty"` } +// ResourceNavigationLink is resourceNavigationLink resource +type ResourceNavigationLink struct { + ID *string `json:"id,omitempty"` + Properties *ResourceNavigationLinkFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// ResourceNavigationLinkFormat is properties of ResourceNavigationLink +type ResourceNavigationLinkFormat struct { + LinkedResourceType *string `json:"linkedResourceType,omitempty"` + Link *string `json:"link,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + // Route is route resource type Route struct { autorest.Response `json:"-"` @@ -1598,11 +1763,12 @@ func (client SubnetListResult) SubnetListResultPreparer() (*http.Request, error) // SubnetPropertiesFormat is type SubnetPropertiesFormat struct { - AddressPrefix *string `json:"addressPrefix,omitempty"` - NetworkSecurityGroup *SecurityGroup `json:"networkSecurityGroup,omitempty"` - RouteTable *RouteTable `json:"routeTable,omitempty"` - IPConfigurations *[]IPConfiguration `json:"ipConfigurations,omitempty"` - ProvisioningState *string `json:"provisioningState,omitempty"` + AddressPrefix *string `json:"addressPrefix,omitempty"` + NetworkSecurityGroup *SecurityGroup `json:"networkSecurityGroup,omitempty"` + RouteTable *RouteTable `json:"routeTable,omitempty"` + IPConfigurations *[]IPConfiguration `json:"ipConfigurations,omitempty"` + ResourceNavigationLinks *[]ResourceNavigationLink `json:"resourceNavigationLinks,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` } // SubResource is @@ -1628,7 +1794,7 @@ type UsageName struct { type UsagesListResult struct { autorest.Response `json:"-"` Value *[]Usage `json:"value,omitempty"` - NextLink *string `json:",omitempty"` + NextLink *string `json:"nextLink,omitempty"` } // UsagesListResultPreparer prepares a request to retrieve the next set of results. It returns @@ -1731,7 +1897,6 @@ type VirtualNetworkGatewayIPConfiguration struct { // VirtualNetworkGatewayIPConfigurationPropertiesFormat is properties of // VirtualNetworkGatewayIPConfiguration type VirtualNetworkGatewayIPConfigurationPropertiesFormat struct { - PrivateIPAddress *string `json:"privateIPAddress,omitempty"` PrivateIPAllocationMethod IPAllocationMethod `json:"privateIPAllocationMethod,omitempty"` Subnet *SubResource `json:"subnet,omitempty"` PublicIPAddress *SubResource `json:"publicIPAddress,omitempty"` @@ -1799,13 +1964,54 @@ func (client VirtualNetworkListResult) VirtualNetworkListResultPreparer() (*http autorest.WithBaseURL(to.String(client.NextLink))) } +// VirtualNetworkPeering is peerings in a VirtualNework resource +type VirtualNetworkPeering struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Properties *VirtualNetworkPeeringPropertiesFormat `json:"properties,omitempty"` + Name *string `json:"name,omitempty"` + Etag *string `json:"etag,omitempty"` +} + +// VirtualNetworkPeeringListResult is response for ListSubnets Api service +// callRetrieves all subnet that belongs to a virtual network +type VirtualNetworkPeeringListResult struct { + autorest.Response `json:"-"` + Value *[]VirtualNetworkPeering `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// VirtualNetworkPeeringListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client VirtualNetworkPeeringListResult) VirtualNetworkPeeringListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// VirtualNetworkPeeringPropertiesFormat is +type VirtualNetworkPeeringPropertiesFormat struct { + AllowVirtualNetworkAccess *bool `json:"allowVirtualNetworkAccess,omitempty"` + AllowForwardedTraffic *bool `json:"allowForwardedTraffic,omitempty"` + AllowGatewayTransit *bool `json:"allowGatewayTransit,omitempty"` + UseRemoteGateways *bool `json:"useRemoteGateways,omitempty"` + RemoteVirtualNetwork *SubResource `json:"remoteVirtualNetwork,omitempty"` + PeeringState VirtualNetworkPeeringState `json:"peeringState,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` +} + // VirtualNetworkPropertiesFormat is type VirtualNetworkPropertiesFormat struct { - AddressSpace *AddressSpace `json:"addressSpace,omitempty"` - DhcpOptions *DhcpOptions `json:"dhcpOptions,omitempty"` - Subnets *[]Subnet `json:"subnets,omitempty"` - ResourceGUID *string `json:"resourceGuid,omitempty"` - ProvisioningState *string `json:"provisioningState,omitempty"` + AddressSpace *AddressSpace `json:"addressSpace,omitempty"` + DhcpOptions *DhcpOptions `json:"dhcpOptions,omitempty"` + Subnets *[]Subnet `json:"subnets,omitempty"` + VirtualNetworkPeerings *[]VirtualNetworkPeering `json:"VirtualNetworkPeerings,omitempty"` + ResourceGUID *string `json:"resourceGuid,omitempty"` + ProvisioningState *string `json:"provisioningState,omitempty"` } // VpnClientConfiguration is vpnClientConfiguration for P2S client diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go index 0dda5e7fd..41a8d0e33 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/publicipaddresses.go @@ -36,7 +36,13 @@ type PublicIPAddressesClient struct { // NewPublicIPAddressesClient creates an instance of the // PublicIPAddressesClient client. func NewPublicIPAddressesClient(subscriptionID string) PublicIPAddressesClient { - return PublicIPAddressesClient{New(subscriptionID)} + return NewPublicIPAddressesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewPublicIPAddressesClientWithBaseURI creates an instance of the +// PublicIPAddressesClient client. +func NewPublicIPAddressesClientWithBaseURI(baseURI string, subscriptionID string) PublicIPAddressesClient { + return PublicIPAddressesClient{NewWithBaseURI(baseURI, subscriptionID)} } // CreateOrUpdate the Put PublicIPAddress operation creates/updates a @@ -243,7 +249,7 @@ func (client PublicIPAddressesClient) GetResponder(resp *http.Response) (result return } -// List the List publicIpAddress opertion retrieves all the publicIpAddresses +// List the List publicIpAddress operation retrieves all the publicIpAddresses // in a resource group. // // resourceGroupName is the name of the resource group. @@ -329,7 +335,7 @@ func (client PublicIPAddressesClient) ListNextResults(lastResults PublicIPAddres return } -// ListAll the List publicIpAddress opertion retrieves all the +// ListAll the List publicIpAddress operation retrieves all the // publicIpAddresses in a subscription. func (client PublicIPAddressesClient) ListAll() (result PublicIPAddressListResult, err error) { req, err := client.ListAllPreparer() diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go index 706b8c29a..8fd59b7b2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routes.go @@ -35,7 +35,12 @@ type RoutesClient struct { // NewRoutesClient creates an instance of the RoutesClient client. func NewRoutesClient(subscriptionID string) RoutesClient { - return RoutesClient{New(subscriptionID)} + return NewRoutesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewRoutesClientWithBaseURI creates an instance of the RoutesClient client. +func NewRoutesClientWithBaseURI(baseURI string, subscriptionID string) RoutesClient { + return RoutesClient{NewWithBaseURI(baseURI, subscriptionID)} } // CreateOrUpdate the Put route operation creates/updates a route in the @@ -242,7 +247,7 @@ func (client RoutesClient) GetResponder(resp *http.Response) (result Route, err return } -// List the List network security rule opertion retrieves all the routes in a +// List the List network security rule operation retrieves all the routes in a // route table. // // resourceGroupName is the name of the resource group. routeTableName is the diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go index 6d5d335fe..f3473b908 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/routetables.go @@ -35,7 +35,13 @@ type RouteTablesClient struct { // NewRouteTablesClient creates an instance of the RouteTablesClient client. func NewRouteTablesClient(subscriptionID string) RouteTablesClient { - return RouteTablesClient{New(subscriptionID)} + return NewRouteTablesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewRouteTablesClientWithBaseURI creates an instance of the +// RouteTablesClient client. +func NewRouteTablesClientWithBaseURI(baseURI string, subscriptionID string) RouteTablesClient { + return RouteTablesClient{NewWithBaseURI(baseURI, subscriptionID)} } // CreateOrUpdate the Put RouteTable operation creates/updates a route tablein diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go index 02ee8a177..e6ccc5814 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securitygroups.go @@ -36,7 +36,13 @@ type SecurityGroupsClient struct { // NewSecurityGroupsClient creates an instance of the SecurityGroupsClient // client. func NewSecurityGroupsClient(subscriptionID string) SecurityGroupsClient { - return SecurityGroupsClient{New(subscriptionID)} + return NewSecurityGroupsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSecurityGroupsClientWithBaseURI creates an instance of the +// SecurityGroupsClient client. +func NewSecurityGroupsClientWithBaseURI(baseURI string, subscriptionID string) SecurityGroupsClient { + return SecurityGroupsClient{NewWithBaseURI(baseURI, subscriptionID)} } // CreateOrUpdate the Put NetworkSecurityGroup operation creates/updates a diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go index e4abcb16c..3a2cff19c 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/securityrules.go @@ -36,7 +36,13 @@ type SecurityRulesClient struct { // NewSecurityRulesClient creates an instance of the SecurityRulesClient // client. func NewSecurityRulesClient(subscriptionID string) SecurityRulesClient { - return SecurityRulesClient{New(subscriptionID)} + return NewSecurityRulesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSecurityRulesClientWithBaseURI creates an instance of the +// SecurityRulesClient client. +func NewSecurityRulesClientWithBaseURI(baseURI string, subscriptionID string) SecurityRulesClient { + return SecurityRulesClient{NewWithBaseURI(baseURI, subscriptionID)} } // CreateOrUpdate the Put network security rule operation creates/updates a @@ -248,7 +254,7 @@ func (client SecurityRulesClient) GetResponder(resp *http.Response) (result Secu return } -// List the List network security rule opertion retrieves all the security +// List the List network security rule operation retrieves all the security // rules in a network security group. // // resourceGroupName is the name of the resource group. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go index c5acdf72c..7a28d50fa 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/subnets.go @@ -35,7 +35,12 @@ type SubnetsClient struct { // NewSubnetsClient creates an instance of the SubnetsClient client. func NewSubnetsClient(subscriptionID string) SubnetsClient { - return SubnetsClient{New(subscriptionID)} + return NewSubnetsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSubnetsClientWithBaseURI creates an instance of the SubnetsClient client. +func NewSubnetsClientWithBaseURI(baseURI string, subscriptionID string) SubnetsClient { + return SubnetsClient{NewWithBaseURI(baseURI, subscriptionID)} } // CreateOrUpdate the Put Subnet operation creates/updates a subnet in @@ -247,7 +252,7 @@ func (client SubnetsClient) GetResponder(resp *http.Response) (result Subnet, er return } -// List the List subnets opertion retrieves all the subnets in a virtual +// List the List subnets operation retrieves all the subnets in a virtual // network. // // resourceGroupName is the name of the resource group. virtualNetworkName is diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go index 95ad01e48..291172422 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/usages.go @@ -35,7 +35,12 @@ type UsagesClient struct { // NewUsagesClient creates an instance of the UsagesClient client. func NewUsagesClient(subscriptionID string) UsagesClient { - return UsagesClient{New(subscriptionID)} + return NewUsagesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewUsagesClientWithBaseURI creates an instance of the UsagesClient client. +func NewUsagesClientWithBaseURI(baseURI string, subscriptionID string) UsagesClient { + return UsagesClient{NewWithBaseURI(baseURI, subscriptionID)} } // List lists compute usages for a subscription. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go index 7eb65d692..5a9c3b43f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/version.go @@ -24,7 +24,7 @@ import ( const ( major = "3" - minor = "0" + minor = "2" patch = "0" // Always begin a "tag" with a dash (as per http://semver.org) tag = "-beta" @@ -34,7 +34,7 @@ const ( // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return fmt.Sprintf(userAgentFormat, Version(), "network", "2016-03-30") + return fmt.Sprintf(userAgentFormat, Version(), "network", "2016-06-01") } // Version returns the semantic version (see http://semver.org) of the client. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go index eacc11c8b..9a9ef51bf 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgatewayconnections.go @@ -36,7 +36,13 @@ type VirtualNetworkGatewayConnectionsClient struct { // NewVirtualNetworkGatewayConnectionsClient creates an instance of the // VirtualNetworkGatewayConnectionsClient client. func NewVirtualNetworkGatewayConnectionsClient(subscriptionID string) VirtualNetworkGatewayConnectionsClient { - return VirtualNetworkGatewayConnectionsClient{New(subscriptionID)} + return NewVirtualNetworkGatewayConnectionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualNetworkGatewayConnectionsClientWithBaseURI creates an instance of +// the VirtualNetworkGatewayConnectionsClient client. +func NewVirtualNetworkGatewayConnectionsClientWithBaseURI(baseURI string, subscriptionID string) VirtualNetworkGatewayConnectionsClient { + return VirtualNetworkGatewayConnectionsClient{NewWithBaseURI(baseURI, subscriptionID)} } // CreateOrUpdate the Put VirtualNetworkGatewayConnection operation diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go index f272ccc72..792b0455d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkgateways.go @@ -36,7 +36,13 @@ type VirtualNetworkGatewaysClient struct { // NewVirtualNetworkGatewaysClient creates an instance of the // VirtualNetworkGatewaysClient client. func NewVirtualNetworkGatewaysClient(subscriptionID string) VirtualNetworkGatewaysClient { - return VirtualNetworkGatewaysClient{New(subscriptionID)} + return NewVirtualNetworkGatewaysClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualNetworkGatewaysClientWithBaseURI creates an instance of the +// VirtualNetworkGatewaysClient client. +func NewVirtualNetworkGatewaysClientWithBaseURI(baseURI string, subscriptionID string) VirtualNetworkGatewaysClient { + return VirtualNetworkGatewaysClient{NewWithBaseURI(baseURI, subscriptionID)} } // CreateOrUpdate the Put VirtualNetworkGateway operation creates/updates a @@ -312,7 +318,7 @@ func (client VirtualNetworkGatewaysClient) GetResponder(resp *http.Response) (re return } -// List the List VirtualNetworkGateways opertion retrieves all the virtual +// List the List VirtualNetworkGateways operation retrieves all the virtual // network gateways stored. // // resourceGroupName is the name of the resource group. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkpeerings.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkpeerings.go new file mode 100644 index 000000000..dd513a4c1 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworkpeerings.go @@ -0,0 +1,342 @@ +package network + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// VirtualNetworkPeeringsClient is the the Microsoft Azure Network management +// API provides a RESTful set of web services that interact with Microsoft +// Azure Networks service to manage your network resrources. The API has +// entities that capture the relationship between an end user and the +// Microsoft Azure Networks service. +type VirtualNetworkPeeringsClient struct { + ManagementClient +} + +// NewVirtualNetworkPeeringsClient creates an instance of the +// VirtualNetworkPeeringsClient client. +func NewVirtualNetworkPeeringsClient(subscriptionID string) VirtualNetworkPeeringsClient { + return NewVirtualNetworkPeeringsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualNetworkPeeringsClientWithBaseURI creates an instance of the +// VirtualNetworkPeeringsClient client. +func NewVirtualNetworkPeeringsClientWithBaseURI(baseURI string, subscriptionID string) VirtualNetworkPeeringsClient { + return VirtualNetworkPeeringsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the Put virtual network peering operation creates/updates a +// peering in the specified virtual network This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. virtualNetworkName is +// the name of the virtual network. virtualNetworkPeeringName is the name of +// the peering. virtualNetworkPeeringParameters is parameters supplied to the +// create/update virtual network peering operation +func (client VirtualNetworkPeeringsClient) CreateOrUpdate(resourceGroupName string, virtualNetworkName string, virtualNetworkPeeringName string, virtualNetworkPeeringParameters VirtualNetworkPeering, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, virtualNetworkName, virtualNetworkPeeringName, virtualNetworkPeeringParameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client VirtualNetworkPeeringsClient) CreateOrUpdatePreparer(resourceGroupName string, virtualNetworkName string, virtualNetworkPeeringName string, virtualNetworkPeeringParameters VirtualNetworkPeering, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), + "virtualNetworkPeeringName": autorest.Encode("path", virtualNetworkPeeringName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}", pathParameters), + autorest.WithJSON(virtualNetworkPeeringParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkPeeringsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client VirtualNetworkPeeringsClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByClosing()) + result.Response = resp + return +} + +// Delete the delete virtual network peering operation deletes the specified +// peering. This method may poll for completion. Polling can be canceled by +// passing the cancel channel argument. The channel will be used to cancel +// polling and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. virtualNetworkName is +// the name of the virtual network. virtualNetworkPeeringName is the name of +// the virtual network peering. +func (client VirtualNetworkPeeringsClient) Delete(resourceGroupName string, virtualNetworkName string, virtualNetworkPeeringName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, virtualNetworkName, virtualNetworkPeeringName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client VirtualNetworkPeeringsClient) DeletePreparer(resourceGroupName string, virtualNetworkName string, virtualNetworkPeeringName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), + "virtualNetworkPeeringName": autorest.Encode("path", virtualNetworkPeeringName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkPeeringsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client VirtualNetworkPeeringsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get the Get virtual network peering operation retreives information about +// the specified virtual network peering. +// +// resourceGroupName is the name of the resource group. virtualNetworkName is +// the name of the virtual network. virtualNetworkPeeringName is the name of +// the virtual network peering. +func (client VirtualNetworkPeeringsClient) Get(resourceGroupName string, virtualNetworkName string, virtualNetworkPeeringName string) (result VirtualNetworkPeering, err error) { + req, err := client.GetPreparer(resourceGroupName, virtualNetworkName, virtualNetworkPeeringName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client VirtualNetworkPeeringsClient) GetPreparer(resourceGroupName string, virtualNetworkName string, virtualNetworkPeeringName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), + "virtualNetworkPeeringName": autorest.Encode("path", virtualNetworkPeeringName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkPeeringsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client VirtualNetworkPeeringsClient) GetResponder(resp *http.Response) (result VirtualNetworkPeering, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List the List virtual network peerings operation retrieves all the peerings +// in a virtual network. +// +// resourceGroupName is the name of the resource group. virtualNetworkName is +// the name of the virtual network. +func (client VirtualNetworkPeeringsClient) List(resourceGroupName string, virtualNetworkName string) (result VirtualNetworkPeeringListResult, err error) { + req, err := client.ListPreparer(resourceGroupName, virtualNetworkName) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "List", nil, "Failure preparing request") + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "List", resp, "Failure sending request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client VirtualNetworkPeeringsClient) ListPreparer(resourceGroupName string, virtualNetworkName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworkPeeringsClient) ListSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client VirtualNetworkPeeringsClient) ListResponder(resp *http.Response) (result VirtualNetworkPeeringListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListNextResults retrieves the next set of results, if any. +func (client VirtualNetworkPeeringsClient) ListNextResults(lastResults VirtualNetworkPeeringListResult) (result VirtualNetworkPeeringListResult, err error) { + req, err := lastResults.VirtualNetworkPeeringListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "List", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "List", resp, "Failure sending next results request request") + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworkPeeringsClient", "List", resp, "Failure responding to next results request request") + } + + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go index 9272610dd..a48b83180 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/network/virtualnetworks.go @@ -36,7 +36,81 @@ type VirtualNetworksClient struct { // NewVirtualNetworksClient creates an instance of the VirtualNetworksClient // client. func NewVirtualNetworksClient(subscriptionID string) VirtualNetworksClient { - return VirtualNetworksClient{New(subscriptionID)} + return NewVirtualNetworksClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVirtualNetworksClientWithBaseURI creates an instance of the +// VirtualNetworksClient client. +func NewVirtualNetworksClientWithBaseURI(baseURI string, subscriptionID string) VirtualNetworksClient { + return VirtualNetworksClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CheckIPAddressAvailability checks whether a private Ip address is available +// for use. +// +// resourceGroupName is the name of the resource group. virtualNetworkName is +// the name of the virtual network. ipAddress is the private IP address to be +// verified. +func (client VirtualNetworksClient) CheckIPAddressAvailability(resourceGroupName string, virtualNetworkName string, ipAddress string) (result IPAddressAvailabilityResult, err error) { + req, err := client.CheckIPAddressAvailabilityPreparer(resourceGroupName, virtualNetworkName, ipAddress) + if err != nil { + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CheckIPAddressAvailability", nil, "Failure preparing request") + } + + resp, err := client.CheckIPAddressAvailabilitySender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CheckIPAddressAvailability", resp, "Failure sending request") + } + + result, err = client.CheckIPAddressAvailabilityResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "network.VirtualNetworksClient", "CheckIPAddressAvailability", resp, "Failure responding to request") + } + + return +} + +// CheckIPAddressAvailabilityPreparer prepares the CheckIPAddressAvailability request. +func (client VirtualNetworksClient) CheckIPAddressAvailabilityPreparer(resourceGroupName string, virtualNetworkName string, ipAddress string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "virtualNetworkName": autorest.Encode("path", virtualNetworkName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + if len(ipAddress) > 0 { + queryParameters["ipAddress"] = autorest.Encode("query", ipAddress) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/CheckIPAddressAvailability", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CheckIPAddressAvailabilitySender sends the CheckIPAddressAvailability request. The method will close the +// http.Response Body if it receives an error. +func (client VirtualNetworksClient) CheckIPAddressAvailabilitySender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CheckIPAddressAvailabilityResponder handles the response to the CheckIPAddressAvailability request. The method always +// closes the http.Response Body. +func (client VirtualNetworksClient) CheckIPAddressAvailabilityResponder(resp *http.Response) (result IPAddressAvailabilityResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return } // CreateOrUpdate the Put VirtualNetwork operation creates/updates a virtual diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/client.go index 04d969646..92a459285 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/client.go @@ -1,5 +1,5 @@ // Package resources implements the Azure ARM Resources service API version -// 2016-02-01. +// 2016-07-01. // package resources @@ -29,7 +29,7 @@ import ( const ( // APIVersion is the version of the Resources - APIVersion = "2016-02-01" + APIVersion = "2016-07-01" // DefaultBaseURI is the default URI used for the service Resources DefaultBaseURI = "https://management.azure.com" @@ -45,9 +45,14 @@ type ManagementClient struct { // New creates an instance of the ManagementClient client. func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { return ManagementClient{ Client: autorest.NewClientWithUserAgent(UserAgent()), - BaseURI: DefaultBaseURI, + BaseURI: baseURI, APIVersion: APIVersion, SubscriptionID: subscriptionID, } @@ -328,10 +333,11 @@ func (client ManagementClient) GetResponder(resp *http.Response) (result Generic // List get all of the resources under a subscription. // -// filter is the filter to apply on the operation. top is query parameters. If -// null is passed returns all resource groups. -func (client ManagementClient) List(filter string, top *int32) (result ResourceListResult, err error) { - req, err := client.ListPreparer(filter, top) +// filter is the filter to apply on the operation. expand is the $expand query +// parameter. top is query parameters. If null is passed returns all resource +// groups. +func (client ManagementClient) List(filter string, expand string, top *int32) (result ResourceListResult, err error) { + req, err := client.ListPreparer(filter, expand, top) if err != nil { return result, autorest.NewErrorWithError(err, "resources.ManagementClient", "List", nil, "Failure preparing request") } @@ -351,7 +357,7 @@ func (client ManagementClient) List(filter string, top *int32) (result ResourceL } // ListPreparer prepares the List request. -func (client ManagementClient) ListPreparer(filter string, top *int32) (*http.Request, error) { +func (client ManagementClient) ListPreparer(filter string, expand string, top *int32) (*http.Request, error) { pathParameters := map[string]interface{}{ "subscriptionId": autorest.Encode("path", client.SubscriptionID), } @@ -362,6 +368,9 @@ func (client ManagementClient) ListPreparer(filter string, top *int32) (*http.Re if len(filter) > 0 { queryParameters["$filter"] = autorest.Encode("query", filter) } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } if top != nil { queryParameters["$top"] = autorest.Encode("query", *top) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/deploymentoperations.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/deploymentoperations.go index e598722c3..8c66f0514 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/deploymentoperations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/deploymentoperations.go @@ -33,7 +33,13 @@ type DeploymentOperationsClient struct { // NewDeploymentOperationsClient creates an instance of the // DeploymentOperationsClient client. func NewDeploymentOperationsClient(subscriptionID string) DeploymentOperationsClient { - return DeploymentOperationsClient{New(subscriptionID)} + return NewDeploymentOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewDeploymentOperationsClientWithBaseURI creates an instance of the +// DeploymentOperationsClient client. +func NewDeploymentOperationsClientWithBaseURI(baseURI string, subscriptionID string) DeploymentOperationsClient { + return DeploymentOperationsClient{NewWithBaseURI(baseURI, subscriptionID)} } // Get get a list of deployments operations. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/deployments.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/deployments.go index 9fa764774..97c4ab347 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/deployments.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/deployments.go @@ -32,7 +32,13 @@ type DeploymentsClient struct { // NewDeploymentsClient creates an instance of the DeploymentsClient client. func NewDeploymentsClient(subscriptionID string) DeploymentsClient { - return DeploymentsClient{New(subscriptionID)} + return NewDeploymentsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewDeploymentsClientWithBaseURI creates an instance of the +// DeploymentsClient client. +func NewDeploymentsClientWithBaseURI(baseURI string, subscriptionID string) DeploymentsClient { + return DeploymentsClient{NewWithBaseURI(baseURI, subscriptionID)} } // Cancel cancel a currently running template deployment. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/groups.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/groups.go index 5ad1c8ac9..587fb20aa 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/groups.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/groups.go @@ -31,7 +31,12 @@ type GroupsClient struct { // NewGroupsClient creates an instance of the GroupsClient client. func NewGroupsClient(subscriptionID string) GroupsClient { - return GroupsClient{New(subscriptionID)} + return NewGroupsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewGroupsClientWithBaseURI creates an instance of the GroupsClient client. +func NewGroupsClientWithBaseURI(baseURI string, subscriptionID string) GroupsClient { + return GroupsClient{NewWithBaseURI(baseURI, subscriptionID)} } // CheckExistence checks whether resource group exists. @@ -446,10 +451,11 @@ func (client GroupsClient) ListNextResults(lastResults ResourceGroupListResult) // ListResources get all of the resources under a subscription. // // resourceGroupName is query parameters. If null is passed returns all -// resource groups. filter is the filter to apply on the operation. top is -// query parameters. If null is passed returns all resource groups. -func (client GroupsClient) ListResources(resourceGroupName string, filter string, top *int32) (result ResourceListResult, err error) { - req, err := client.ListResourcesPreparer(resourceGroupName, filter, top) +// resource groups. filter is the filter to apply on the operation. expand is +// the $expand query parameter top is query parameters. If null is passed +// returns all resource groups. +func (client GroupsClient) ListResources(resourceGroupName string, filter string, expand string, top *int32) (result ResourceListResult, err error) { + req, err := client.ListResourcesPreparer(resourceGroupName, filter, expand, top) if err != nil { return result, autorest.NewErrorWithError(err, "resources.GroupsClient", "ListResources", nil, "Failure preparing request") } @@ -469,7 +475,7 @@ func (client GroupsClient) ListResources(resourceGroupName string, filter string } // ListResourcesPreparer prepares the ListResources request. -func (client GroupsClient) ListResourcesPreparer(resourceGroupName string, filter string, top *int32) (*http.Request, error) { +func (client GroupsClient) ListResourcesPreparer(resourceGroupName string, filter string, expand string, top *int32) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), @@ -481,6 +487,9 @@ func (client GroupsClient) ListResourcesPreparer(resourceGroupName string, filte if len(filter) > 0 { queryParameters["$filter"] = autorest.Encode("query", filter) } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } if top != nil { queryParameters["$top"] = autorest.Encode("query", *top) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/models.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/models.go index d89075425..32e5b1b37 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/models.go @@ -44,6 +44,18 @@ const ( SystemAssigned ResourceIdentityType = "SystemAssigned" ) +// AliasPathType is the type of the paths for alias. +type AliasPathType struct { + Path *string `json:"path,omitempty"` + APIVersions *[]string `json:"apiVersions,omitempty"` +} + +// AliasType is the alias type. +type AliasType struct { + Name *string `json:"name,omitempty"` + Paths *[]AliasPathType `json:"paths,omitempty"` +} + // BasicDependency is deployment dependency information. type BasicDependency struct { ID *string `json:"id,omitempty"` @@ -69,7 +81,7 @@ type Deployment struct { Properties *DeploymentProperties `json:"properties,omitempty"` } -// DeploymentExportResult is +// DeploymentExportResult is the deployment export result. type DeploymentExportResult struct { autorest.Response `json:"-"` Template *map[string]interface{} `json:"template,omitempty"` @@ -208,7 +220,6 @@ type GenericResourceFilter struct { ResourceType *string `json:"resourceType,omitempty"` Tagname *string `json:"tagname,omitempty"` Tagvalue *string `json:"tagvalue,omitempty"` - Expand *string `json:"expand,omitempty"` } // HTTPMessage is @@ -276,7 +287,9 @@ func (client ProviderListResult) ProviderListResultPreparer() (*http.Request, er type ProviderResourceType struct { ResourceType *string `json:"resourceType,omitempty"` Locations *[]string `json:"locations,omitempty"` + Aliases *[]AliasType `json:"aliases,omitempty"` APIVersions *[]string `json:"apiVersions,omitempty"` + ZoneMappings *[]ZoneMappingType `json:"zoneMappings,omitempty"` Properties *map[string]*string `json:"properties,omitempty"` } @@ -442,3 +455,9 @@ type TemplateLink struct { URI *string `json:"uri,omitempty"` ContentVersion *string `json:"contentVersion,omitempty"` } + +// ZoneMappingType is zone mapping type. +type ZoneMappingType struct { + Location *string `json:"location,omitempty"` + Zones *[]string `json:"zones,omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/providers.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/providers.go index 5a19dd70e..b722b838d 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/providers.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/providers.go @@ -32,14 +32,22 @@ type ProvidersClient struct { // NewProvidersClient creates an instance of the ProvidersClient client. func NewProvidersClient(subscriptionID string) ProvidersClient { - return ProvidersClient{New(subscriptionID)} + return NewProvidersClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewProvidersClientWithBaseURI creates an instance of the ProvidersClient +// client. +func NewProvidersClientWithBaseURI(baseURI string, subscriptionID string) ProvidersClient { + return ProvidersClient{NewWithBaseURI(baseURI, subscriptionID)} } // Get gets a resource provider. // -// resourceProviderNamespace is namespace of the resource provider. -func (client ProvidersClient) Get(resourceProviderNamespace string) (result Provider, err error) { - req, err := client.GetPreparer(resourceProviderNamespace) +// resourceProviderNamespace is namespace of the resource provider. expand is +// the $expand query parameter. e.g. To include property aliases in response, +// use $expand=resourceTypes/aliases. +func (client ProvidersClient) Get(resourceProviderNamespace string, expand string) (result Provider, err error) { + req, err := client.GetPreparer(resourceProviderNamespace, expand) if err != nil { return result, autorest.NewErrorWithError(err, "resources.ProvidersClient", "Get", nil, "Failure preparing request") } @@ -59,7 +67,7 @@ func (client ProvidersClient) Get(resourceProviderNamespace string) (result Prov } // GetPreparer prepares the Get request. -func (client ProvidersClient) GetPreparer(resourceProviderNamespace string) (*http.Request, error) { +func (client ProvidersClient) GetPreparer(resourceProviderNamespace string, expand string) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), "subscriptionId": autorest.Encode("path", client.SubscriptionID), @@ -68,6 +76,9 @@ func (client ProvidersClient) GetPreparer(resourceProviderNamespace string) (*ht queryParameters := map[string]interface{}{ "api-version": client.APIVersion, } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } preparer := autorest.CreatePreparer( autorest.AsGet(), @@ -98,9 +109,11 @@ func (client ProvidersClient) GetResponder(resp *http.Response) (result Provider // List gets a list of resource providers. // -// top is query parameters. If null is passed returns all deployments. -func (client ProvidersClient) List(top *int32) (result ProviderListResult, err error) { - req, err := client.ListPreparer(top) +// top is query parameters. If null is passed returns all deployments. expand +// is the $expand query parameter. e.g. To include property aliases in +// response, use $expand=resourceTypes/aliases. +func (client ProvidersClient) List(top *int32, expand string) (result ProviderListResult, err error) { + req, err := client.ListPreparer(top, expand) if err != nil { return result, autorest.NewErrorWithError(err, "resources.ProvidersClient", "List", nil, "Failure preparing request") } @@ -120,7 +133,7 @@ func (client ProvidersClient) List(top *int32) (result ProviderListResult, err e } // ListPreparer prepares the List request. -func (client ProvidersClient) ListPreparer(top *int32) (*http.Request, error) { +func (client ProvidersClient) ListPreparer(top *int32, expand string) (*http.Request, error) { pathParameters := map[string]interface{}{ "subscriptionId": autorest.Encode("path", client.SubscriptionID), } @@ -131,6 +144,9 @@ func (client ProvidersClient) ListPreparer(top *int32) (*http.Request, error) { if top != nil { queryParameters["$top"] = autorest.Encode("query", *top) } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } preparer := autorest.CreatePreparer( autorest.AsGet(), diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/resources.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/resources.go index fc9b427b3..067f33855 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/resources.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/resources.go @@ -31,7 +31,12 @@ type Client struct { // NewClient creates an instance of the Client client. func NewClient(subscriptionID string) Client { - return Client{New(subscriptionID)} + return NewClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewClientWithBaseURI creates an instance of the Client client. +func NewClientWithBaseURI(baseURI string, subscriptionID string) Client { + return Client{NewWithBaseURI(baseURI, subscriptionID)} } // CheckExistence checks whether resource exists. @@ -309,10 +314,11 @@ func (client Client) GetResponder(resp *http.Response) (result GenericResource, // List get all of the resources under a subscription. // -// filter is the filter to apply on the operation. top is query parameters. If -// null is passed returns all resource groups. -func (client Client) List(filter string, top *int32) (result ResourceListResult, err error) { - req, err := client.ListPreparer(filter, top) +// filter is the filter to apply on the operation. expand is the $expand query +// parameter. top is query parameters. If null is passed returns all resource +// groups. +func (client Client) List(filter string, expand string, top *int32) (result ResourceListResult, err error) { + req, err := client.ListPreparer(filter, expand, top) if err != nil { return result, autorest.NewErrorWithError(err, "resources.Client", "List", nil, "Failure preparing request") } @@ -332,7 +338,7 @@ func (client Client) List(filter string, top *int32) (result ResourceListResult, } // ListPreparer prepares the List request. -func (client Client) ListPreparer(filter string, top *int32) (*http.Request, error) { +func (client Client) ListPreparer(filter string, expand string, top *int32) (*http.Request, error) { pathParameters := map[string]interface{}{ "subscriptionId": autorest.Encode("path", client.SubscriptionID), } @@ -343,6 +349,9 @@ func (client Client) ListPreparer(filter string, top *int32) (*http.Request, err if len(filter) > 0 { queryParameters["$filter"] = autorest.Encode("query", filter) } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } if top != nil { queryParameters["$top"] = autorest.Encode("query", *top) } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/tags.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/tags.go index 5f7ad5031..ff4ad4485 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/tags.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/tags.go @@ -31,7 +31,12 @@ type TagsClient struct { // NewTagsClient creates an instance of the TagsClient client. func NewTagsClient(subscriptionID string) TagsClient { - return TagsClient{New(subscriptionID)} + return NewTagsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewTagsClientWithBaseURI creates an instance of the TagsClient client. +func NewTagsClientWithBaseURI(baseURI string, subscriptionID string) TagsClient { + return TagsClient{NewWithBaseURI(baseURI, subscriptionID)} } // CreateOrUpdate create a subscription resource tag. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/version.go index 4ed5907b3..bf439d987 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/resources/resources/version.go @@ -24,7 +24,7 @@ import ( const ( major = "3" - minor = "0" + minor = "2" patch = "0" // Always begin a "tag" with a dash (as per http://semver.org) tag = "-beta" @@ -34,7 +34,7 @@ const ( // UserAgent returns the UserAgent string to use when sending http.Requests. func UserAgent() string { - return fmt.Sprintf(userAgentFormat, Version(), "resources", "2016-02-01") + return fmt.Sprintf(userAgentFormat, Version(), "resources", "2016-07-01") } // Version returns the semantic version (see http://semver.org) of the client. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/client.go index 7d0a70804..110f1ceb1 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/client.go @@ -43,9 +43,14 @@ type ManagementClient struct { // New creates an instance of the ManagementClient client. func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { return ManagementClient{ Client: autorest.NewClientWithUserAgent(UserAgent()), - BaseURI: DefaultBaseURI, + BaseURI: baseURI, APIVersion: APIVersion, SubscriptionID: subscriptionID, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/jobcollections.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/jobcollections.go index 73554f0b2..7cd77ac1e 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/jobcollections.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/jobcollections.go @@ -33,7 +33,13 @@ type JobCollectionsClient struct { // NewJobCollectionsClient creates an instance of the JobCollectionsClient // client. func NewJobCollectionsClient(subscriptionID string) JobCollectionsClient { - return JobCollectionsClient{New(subscriptionID)} + return NewJobCollectionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewJobCollectionsClientWithBaseURI creates an instance of the +// JobCollectionsClient client. +func NewJobCollectionsClientWithBaseURI(baseURI string, subscriptionID string) JobCollectionsClient { + return JobCollectionsClient{NewWithBaseURI(baseURI, subscriptionID)} } // CreateOrUpdate provisions a new job collection or updates an existing job diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/jobs.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/jobs.go index e8e395524..3d7220f43 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/jobs.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/jobs.go @@ -31,7 +31,12 @@ type JobsClient struct { // NewJobsClient creates an instance of the JobsClient client. func NewJobsClient(subscriptionID string) JobsClient { - return JobsClient{New(subscriptionID)} + return NewJobsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewJobsClientWithBaseURI creates an instance of the JobsClient client. +func NewJobsClientWithBaseURI(baseURI string, subscriptionID string) JobsClient { + return JobsClient{NewWithBaseURI(baseURI, subscriptionID)} } // CreateOrUpdate provisions a new job or updates an existing job. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/version.go index 5b3a2b234..140af6e91 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/scheduler/version.go @@ -24,7 +24,7 @@ import ( const ( major = "3" - minor = "0" + minor = "2" patch = "0" // Always begin a "tag" with a dash (as per http://semver.org) tag = "-beta" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go index 5cfa2cc32..39e50fe0f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/accounts.go @@ -19,10 +19,9 @@ package storage // regenerated. import ( - "net/http" - "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" + "net/http" ) // AccountsClient is the the Storage Management Client. @@ -32,7 +31,13 @@ type AccountsClient struct { // NewAccountsClient creates an instance of the AccountsClient client. func NewAccountsClient(subscriptionID string) AccountsClient { - return AccountsClient{New(subscriptionID)} + return NewAccountsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewAccountsClientWithBaseURI creates an instance of the AccountsClient +// client. +func NewAccountsClientWithBaseURI(baseURI string, subscriptionID string) AccountsClient { + return AccountsClient{NewWithBaseURI(baseURI, subscriptionID)} } // CheckNameAvailability checks that account name is valid and is not in use. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go index 3e69c26e1..68708dbf2 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/client.go @@ -44,9 +44,14 @@ type ManagementClient struct { // New creates an instance of the ManagementClient client. func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { return ManagementClient{ Client: autorest.NewClientWithUserAgent(UserAgent()), - BaseURI: DefaultBaseURI, + BaseURI: baseURI, APIVersion: APIVersion, SubscriptionID: subscriptionID, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usageoperations.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usageoperations.go index 85b5c049d..866efc9c3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usageoperations.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/usageoperations.go @@ -32,7 +32,13 @@ type UsageOperationsClient struct { // NewUsageOperationsClient creates an instance of the UsageOperationsClient // client. func NewUsageOperationsClient(subscriptionID string) UsageOperationsClient { - return UsageOperationsClient{New(subscriptionID)} + return NewUsageOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewUsageOperationsClientWithBaseURI creates an instance of the +// UsageOperationsClient client. +func NewUsageOperationsClientWithBaseURI(baseURI string, subscriptionID string) UsageOperationsClient { + return UsageOperationsClient{NewWithBaseURI(baseURI, subscriptionID)} } // List gets the current usage count and the limit for the resources under the diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go index dbba7ffe4..61b71b170 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/storage/version.go @@ -24,7 +24,7 @@ import ( const ( major = "3" - minor = "0" + minor = "2" patch = "0" // Always begin a "tag" with a dash (as per http://semver.org) tag = "-beta" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/client.go index 9a65c768c..0d922d918 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/client.go @@ -43,9 +43,14 @@ type ManagementClient struct { // New creates an instance of the ManagementClient client. func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { return ManagementClient{ Client: autorest.NewClientWithUserAgent(UserAgent()), - BaseURI: DefaultBaseURI, + BaseURI: baseURI, APIVersion: APIVersion, SubscriptionID: subscriptionID, } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/endpoints.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/endpoints.go index f8fc7d1b6..91a34c43f 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/endpoints.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/endpoints.go @@ -32,7 +32,13 @@ type EndpointsClient struct { // NewEndpointsClient creates an instance of the EndpointsClient client. func NewEndpointsClient(subscriptionID string) EndpointsClient { - return EndpointsClient{New(subscriptionID)} + return NewEndpointsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewEndpointsClientWithBaseURI creates an instance of the EndpointsClient +// client. +func NewEndpointsClientWithBaseURI(baseURI string, subscriptionID string) EndpointsClient { + return EndpointsClient{NewWithBaseURI(baseURI, subscriptionID)} } // CreateOrUpdate create or update a Traffic Manager endpoint. diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/models.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/models.go index 349bee2d5..e6042cbc6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/models.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/models.go @@ -36,7 +36,7 @@ type DNSConfig struct { TTL *int64 `json:"ttl,omitempty"` } -// Endpoint is class respresenting a Traffic Manager endpoint. +// Endpoint is class representing a Traffic Manager endpoint. type Endpoint struct { autorest.Response `json:"-"` ID *string `json:"id,omitempty"` @@ -45,7 +45,7 @@ type Endpoint struct { Properties *EndpointProperties `json:"properties,omitempty"` } -// EndpointProperties is class respresenting a Traffic Manager endpoint +// EndpointProperties is class representing a Traffic Manager endpoint // properties. type EndpointProperties struct { TargetResourceID *string `json:"targetResourceId,omitempty"` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/profiles.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/profiles.go index eb25c9df0..95e4199c0 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/profiles.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/profiles.go @@ -32,7 +32,13 @@ type ProfilesClient struct { // NewProfilesClient creates an instance of the ProfilesClient client. func NewProfilesClient(subscriptionID string) ProfilesClient { - return ProfilesClient{New(subscriptionID)} + return NewProfilesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewProfilesClientWithBaseURI creates an instance of the ProfilesClient +// client. +func NewProfilesClientWithBaseURI(baseURI string, subscriptionID string) ProfilesClient { + return ProfilesClient{NewWithBaseURI(baseURI, subscriptionID)} } // CheckTrafficManagerRelativeDNSNameAvailability checks the availability of a diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/version.go index 7b1fb23d0..e5954d32a 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/version.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/trafficmanager/version.go @@ -24,7 +24,7 @@ import ( const ( major = "3" - minor = "0" + minor = "2" patch = "0" // Always begin a "tag" with a dash (as per http://semver.org) tag = "-beta" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/vmutils.go b/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/vmutils.go index 7643699df..1845663c8 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/vmutils.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/management/vmutils/vmutils.go @@ -22,7 +22,7 @@ func NewVMConfiguration(name string, roleSize string) vm.Role { } } -// ConfigureForLinux adds configuration for when deploying a generalized Linux +// ConfigureForLinux adds configuration when deploying a generalized Linux // image. If "password" is left empty, SSH password security will be disabled by // default. Certificates with SSH public keys should already be uploaded to the // cloud service where the VM will be deployed and referenced here only by their @@ -57,7 +57,7 @@ func ConfigureForLinux(role *vm.Role, hostname, user, password string, sshPubkey return nil } -// ConfigureForWindows adds configuration for when deploying a generalized +// ConfigureForWindows adds configuration when deploying a generalized // Windows image. timeZone can be left empty. For a complete list of supported // time zone entries, you can either refer to the values listed in the registry // entry "HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time @@ -80,6 +80,35 @@ func ConfigureForWindows(role *vm.Role, hostname, user, password string, enableA return nil } +// ConfigureWithCustomDataForLinux configures custom data for Linux-based images. +// The customData contains either cloud-init or shell script to be executed upon start. +// +// The function expects the customData to be base64-encoded. +func ConfigureWithCustomDataForLinux(role *vm.Role, customData string) error { + return configureWithCustomData(role, customData, vm.ConfigurationSetTypeLinuxProvisioning) +} + +// ConfigureWithCustomDataForWindows configures custom data for Windows-based images. +// The customData contains either cloud-init or shell script to be executed upon start. +// +// The function expects the customData to be base64-encoded. +func ConfigureWithCustomDataForWindows(role *vm.Role, customData string) error { + return configureWithCustomData(role, customData, vm.ConfigurationSetTypeWindowsProvisioning) +} + +func configureWithCustomData(role *vm.Role, customData string, typ vm.ConfigurationSetType) error { + if role == nil { + return fmt.Errorf(errParamNotSpecified, "role") + } + + role.ConfigurationSets = updateOrAddConfig(role.ConfigurationSets, typ, + func(config *vm.ConfigurationSet) { + config.CustomData = customData + }) + + return nil +} + // ConfigureWindowsToJoinDomain adds configuration to join a new Windows vm to a // domain. "username" must be in UPN form (user@domain.com), "machineOU" can be // left empty diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go index 8b2b6f69f..4207cfec6 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/blob.go @@ -111,6 +111,8 @@ type BlobProperties struct { ContentLength int64 `xml:"Content-Length"` ContentType string `xml:"Content-Type"` ContentEncoding string `xml:"Content-Encoding"` + CacheControl string `xml:"Cache-Control"` + ContentLanguage string `xml:"Cache-Language"` BlobType BlobType `xml:"x-ms-blob-blob-type"` SequenceNumber int64 `xml:"x-ms-blob-sequence-number"` CopyID string `xml:"CopyId"` @@ -122,6 +124,16 @@ type BlobProperties struct { LeaseStatus string `xml:"LeaseStatus"` } +// BlobHeaders contains various properties of a blob and is an entry +// in SetBlobProperties +type BlobHeaders struct { + ContentMD5 string `header:"x-ms-blob-content-md5"` + ContentLanguage string `header:"x-ms-blob-content-language"` + ContentEncoding string `header:"x-ms-blob-content-encoding"` + ContentType string `header:"x-ms-blob-content-type"` + CacheControl string `header:"x-ms-blob-cache-control"` +} + // BlobListResponse contains the response fields from ListBlobs call. // // See https://msdn.microsoft.com/en-us/library/azure/dd135734.aspx @@ -474,7 +486,6 @@ func (b BlobStorageClient) ListBlobs(container string, params ListBlobsParameter func (b BlobStorageClient) BlobExists(container, name string) (bool, error) { verb := "HEAD" uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), url.Values{}) - headers := b.client.getStandardHeaders() resp, err := b.client.exec(verb, uri, headers, nil) if resp != nil { @@ -590,6 +601,9 @@ func (b BlobStorageClient) GetBlobProperties(container, name string) (*BlobPrope ContentMD5: resp.headers.Get("Content-MD5"), ContentLength: contentLength, ContentEncoding: resp.headers.Get("Content-Encoding"), + ContentType: resp.headers.Get("Content-Type"), + CacheControl: resp.headers.Get("Cache-Control"), + ContentLanguage: resp.headers.Get("Content-Language"), SequenceNumber: sequenceNum, CopyCompletionTime: resp.headers.Get("x-ms-copy-completion-time"), CopyStatusDescription: resp.headers.Get("x-ms-copy-status-description"), @@ -602,6 +616,34 @@ func (b BlobStorageClient) GetBlobProperties(container, name string) (*BlobPrope }, nil } +// SetBlobProperties replaces the BlobHeaders for the specified blob. +// +// Some keys may be converted to Camel-Case before sending. All keys +// are returned in lower case by GetBlobProperties. HTTP header names +// are case-insensitive so case munging should not matter to other +// applications either. +// +// See https://msdn.microsoft.com/en-us/library/azure/ee691966.aspx +func (b BlobStorageClient) SetBlobProperties(container, name string, blobHeaders BlobHeaders) error { + params := url.Values{"comp": {"properties"}} + uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params) + headers := b.client.getStandardHeaders() + + extraHeaders := headersFromStruct(blobHeaders) + + for k, v := range extraHeaders { + headers[k] = v + } + + resp, err := b.client.exec("PUT", uri, headers, nil) + if err != nil { + return err + } + defer resp.body.Close() + + return checkRespCode(resp.statusCode, []int{http.StatusOK}) +} + // SetBlobMetadata replaces the metadata for the specified blob. // // Some keys may be converted to Camel-Case before sending. All keys @@ -1033,9 +1075,24 @@ func (b BlobStorageClient) GetBlobSASURI(container, name string, expiry time.Tim blobURL = b.GetBlobURL(container, name) ) canonicalizedResource, err := b.client.buildCanonicalizedResource(blobURL) + if err != nil { return "", err } + + // "The canonicalizedresouce portion of the string is a canonical path to the signed resource. + // It must include the service name (blob, table, queue or file) for version 2015-02-21 or + // later, the storage account name, and the resource name, and must be URL-decoded. + // -- https://msdn.microsoft.com/en-us/library/azure/dn140255.aspx + + // We need to replace + with %2b first to avoid being treated as a space (which is correct for query strings, but not the path component). + canonicalizedResource = strings.Replace(canonicalizedResource, "+", "%2b", -1) + + canonicalizedResource, err = url.QueryUnescape(canonicalizedResource) + if err != nil { + return "", err + } + signedExpiry := expiry.UTC().Format(time.RFC3339) signedResource := "b" diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go index 4cc7e1124..2816e03ec 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/client.go @@ -29,10 +29,20 @@ const ( defaultUseHTTPS = true + // StorageEmulatorAccountName is the fixed storage account used by Azure Storage Emulator + StorageEmulatorAccountName = "devstoreaccount1" + + // StorageEmulatorAccountKey is the the fixed storage account used by Azure Storage Emulator + StorageEmulatorAccountKey = "Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==" + blobServiceName = "blob" tableServiceName = "table" queueServiceName = "queue" fileServiceName = "file" + + storageEmulatorBlob = "127.0.0.1:10000" + storageEmulatorTable = "127.0.0.1:10002" + storageEmulatorQueue = "127.0.0.1:10001" ) // Client is the object that needs to be constructed to perform @@ -114,9 +124,18 @@ func (e UnexpectedStatusCodeError) Got() int { // NewBasicClient constructs a Client with given storage service name and // key. func NewBasicClient(accountName, accountKey string) (Client, error) { + if accountName == StorageEmulatorAccountName { + return NewEmulatorClient() + } return NewClient(accountName, accountKey, DefaultBaseURL, DefaultAPIVersion, defaultUseHTTPS) } +//NewEmulatorClient contructs a Client intended to only work with Azure +//Storage Emulator +func NewEmulatorClient() (Client, error) { + return NewClient(StorageEmulatorAccountName, StorageEmulatorAccountKey, DefaultBaseURL, DefaultAPIVersion, false) +} + // NewClient constructs a Client. This should be used if the caller wants // to specify whether to use HTTPS, a specific REST API version or a custom // storage endpoint than Azure Public Cloud. @@ -149,8 +168,19 @@ func (c Client) getBaseURL(service string) string { if c.useHTTPS { scheme = "https" } - - host := fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL) + host := "" + if c.accountName == StorageEmulatorAccountName { + switch service { + case blobServiceName: + host = storageEmulatorBlob + case tableServiceName: + host = storageEmulatorTable + case queueServiceName: + host = storageEmulatorQueue + } + } else { + host = fmt.Sprintf("%s.%s.%s", c.accountName, service, c.baseURL) + } u := &url.URL{ Scheme: scheme, @@ -165,8 +195,13 @@ func (c Client) getEndpoint(service, path string, params url.Values) string { panic(err) } - if path == "" { - path = "/" // API doesn't accept path segments not starting with '/' + // API doesn't accept path segments not starting with '/' + if !strings.HasPrefix(path, "/") { + path = fmt.Sprintf("/%v", path) + } + + if c.accountName == StorageEmulatorAccountName { + path = fmt.Sprintf("/%v%v", StorageEmulatorAccountName, path) } u.Path = path @@ -200,7 +235,7 @@ func (c Client) GetFileService() FileServiceClient { func (c Client) createAuthorizationHeader(canonicalizedString string) string { signature := c.computeHmac256(canonicalizedString) - return fmt.Sprintf("%s %s:%s", "SharedKey", c.accountName, signature) + return fmt.Sprintf("%s %s:%s", "SharedKey", c.getCanonicalizedAccountName(), signature) } func (c Client) getAuthorizationHeader(verb, url string, headers map[string]string) (string, error) { @@ -220,6 +255,12 @@ func (c Client) getStandardHeaders() map[string]string { } } +func (c Client) getCanonicalizedAccountName() string { + // since we may be trying to access a secondary storage account, we need to + // remove the -secondary part of the storage name + return strings.TrimSuffix(c.accountName, "-secondary") +} + func (c Client) buildCanonicalizedHeader(headers map[string]string) string { cm := make(map[string]string) @@ -261,7 +302,7 @@ func (c Client) buildCanonicalizedResourceTable(uri string) (string, error) { return "", fmt.Errorf(errMsg, err.Error()) } - cr := "/" + c.accountName + cr := "/" + c.getCanonicalizedAccountName() if len(u.Path) > 0 { cr += u.Path @@ -277,10 +318,13 @@ func (c Client) buildCanonicalizedResource(uri string) (string, error) { return "", fmt.Errorf(errMsg, err.Error()) } - cr := "/" + c.accountName + cr := "/" + c.getCanonicalizedAccountName() if len(u.Path) > 0 { - cr += u.Path + // Any portion of the CanonicalizedResource string that is derived from + // the resource's URI should be encoded exactly as it is in the URI. + // -- https://msdn.microsoft.com/en-gb/library/azure/dd179428.aspx + cr += u.EscapedPath() } params, err := url.ParseQuery(u.RawQuery) @@ -343,7 +387,6 @@ func (c Client) exec(verb, url string, headers map[string]string, body io.Reader return nil, err } headers["Authorization"] = authHeader - if err != nil { return nil, err } diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go index ede4e21be..4710fbad3 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/file.go @@ -47,6 +47,9 @@ func (f FileServiceClient) CreateShareIfNotExists(name string) (bool, error) { // CreateShare creates a Azure File Share and returns its response func (f FileServiceClient) createShare(name string) (*storageResponse, error) { + if err := f.checkForStorageEmulator(); err != nil { + return nil, err + } uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{"restype": {"share"}}) headers := f.client.getStandardHeaders() return f.client.exec("PUT", uri, headers, nil) @@ -86,6 +89,18 @@ func (f FileServiceClient) DeleteShareIfExists(name string) (bool, error) { // deleteShare makes the call to Delete Share operation endpoint and returns // the response func (f FileServiceClient) deleteShare(name string) (*storageResponse, error) { + if err := f.checkForStorageEmulator(); err != nil { + return nil, err + } uri := f.client.getEndpoint(fileServiceName, pathForFileShare(name), url.Values{"restype": {"share"}}) return f.client.exec("DELETE", uri, f.client.getStandardHeaders(), nil) } + +//checkForStorageEmulator determines if the client is setup for use with +//Azure Storage Emulator, and returns a relevant error +func (f FileServiceClient) checkForStorageEmulator() error { + if f.client.accountName == StorageEmulatorAccountName { + return fmt.Errorf("Error: File service is not currently supported by Azure Storage Emulator") + } + return nil +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go b/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go index 33155af7f..d71c6ce55 100644 --- a/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go +++ b/vendor/github.com/Azure/azure-sdk-for-go/storage/util.go @@ -11,6 +11,7 @@ import ( "io/ioutil" "net/http" "net/url" + "reflect" "time" ) @@ -69,3 +70,16 @@ func xmlMarshal(v interface{}) (io.Reader, int, error) { } return bytes.NewReader(b), len(b), nil } + +func headersFromStruct(v interface{}) map[string]string { + headers := make(map[string]string) + value := reflect.ValueOf(v) + for i := 0; i < value.NumField(); i++ { + key := value.Type().Field(i).Tag.Get("header") + val := value.Field(i).String() + if val != "" { + headers[key] = val + } + } + return headers +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 890036fe6..93ced2a7d 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -3,164 +3,164 @@ "ignore": "appengine test", "package": [ { - "checksumSHA1": "rJgU6MbpmtRBZH6JNByWvzNNKlM=", + "checksumSHA1": "wdXWOKasg/7Apy5AVOHZHpSAZhc=", "comment": "v2.1.1-beta-8-gca4d906", "path": "github.com/Azure/azure-sdk-for-go/arm/cdn", - "revision": "2cdbb8553a20830507e4178b4d0803794136dde7", - "revisionTime": "2016-06-29T16:19:23Z" + "revision": "bfc5b4af08f3d3745d908af36b7ed5b9060f0258", + "revisionTime": "2016-08-11T22:07:13Z" }, { - "checksumSHA1": "akhrj2PzJv19enRg4Ar6rE6FWLk=", + "checksumSHA1": "88TAbW2kN6NighEaL9X8IKNp3Go=", "comment": "v2.1.1-beta-8-gca4d906", "path": "github.com/Azure/azure-sdk-for-go/arm/compute", - "revision": "2cdbb8553a20830507e4178b4d0803794136dde7", - "revisionTime": "2016-06-29T16:19:23Z" + "revision": "bfc5b4af08f3d3745d908af36b7ed5b9060f0258", + "revisionTime": "2016-08-11T22:07:13Z" }, { - "checksumSHA1": "opJ3PtP7GCxGbMGTdqDIvAL30X0=", + "checksumSHA1": "AdWY+YN439QSBtAFqG/dIV93J38=", "comment": "v2.1.1-beta-8-gca4d906", "path": "github.com/Azure/azure-sdk-for-go/arm/network", - "revision": "2cdbb8553a20830507e4178b4d0803794136dde7", - "revisionTime": "2016-06-29T16:19:23Z" + "revision": "bfc5b4af08f3d3745d908af36b7ed5b9060f0258", + "revisionTime": "2016-08-11T22:07:13Z" }, { - "checksumSHA1": "hBg0HREYbzIAzab2qpevBiYQ8V4=", + "checksumSHA1": "EdND5GRzWDkSwl18UVWJJgsnOG4=", "comment": "v2.1.1-beta-8-gca4d906", "path": "github.com/Azure/azure-sdk-for-go/arm/resources/resources", - "revision": "2cdbb8553a20830507e4178b4d0803794136dde7", - "revisionTime": "2016-06-29T16:19:23Z" + "revision": "bfc5b4af08f3d3745d908af36b7ed5b9060f0258", + "revisionTime": "2016-08-11T22:07:13Z" }, { - "checksumSHA1": "HTn8BviQEiF6Tp98QWlov90du90=", + "checksumSHA1": "iFV4QVdtS6bx3fNhjDAyS/Eiw+Y=", "comment": "v2.1.1-beta-8-gca4d906", "path": "github.com/Azure/azure-sdk-for-go/arm/scheduler", - "revision": "2cdbb8553a20830507e4178b4d0803794136dde7", - "revisionTime": "2016-06-29T16:19:23Z" + "revision": "bfc5b4af08f3d3745d908af36b7ed5b9060f0258", + "revisionTime": "2016-08-11T22:07:13Z" }, { - "checksumSHA1": "XtI6atfC23rgxiObn0Da6fvXL94=", + "checksumSHA1": "jh7wjswBwwVeY/P8wtqtqBR58y4=", "comment": "v2.1.1-beta-8-gca4d906", "path": "github.com/Azure/azure-sdk-for-go/arm/storage", - "revision": "2cdbb8553a20830507e4178b4d0803794136dde7", - "revisionTime": "2016-06-29T16:19:23Z" + "revision": "bfc5b4af08f3d3745d908af36b7ed5b9060f0258", + "revisionTime": "2016-08-11T22:07:13Z" }, { - "checksumSHA1": "2qCsDmxUSe6LzkuC9+rTK9wEPBg=", + "checksumSHA1": "PLyDrzfgTsbkk7HsuJxbj8QmTC4=", "path": "github.com/Azure/azure-sdk-for-go/arm/trafficmanager", - "revision": "2cdbb8553a20830507e4178b4d0803794136dde7", - "revisionTime": "2016-06-29T16:19:23Z" + "revision": "bfc5b4af08f3d3745d908af36b7ed5b9060f0258", + "revisionTime": "2016-08-11T22:07:13Z" }, { "checksumSHA1": "Q+0Zz0iylSKMck4JhYc8XR83i8M=", "comment": "v2.1.1-beta-8-gca4d906", "path": "github.com/Azure/azure-sdk-for-go/core/http", - "revision": "2cdbb8553a20830507e4178b4d0803794136dde7", - "revisionTime": "2016-06-29T16:19:23Z" + "revision": "bfc5b4af08f3d3745d908af36b7ed5b9060f0258", + "revisionTime": "2016-08-11T22:07:13Z" }, { "checksumSHA1": "F2fqk+OPM3drSkK0G6So5ASayyA=", "comment": "v2.1.1-beta-8-gca4d906", "path": "github.com/Azure/azure-sdk-for-go/core/tls", - "revision": "2cdbb8553a20830507e4178b4d0803794136dde7", - "revisionTime": "2016-06-29T16:19:23Z" + "revision": "bfc5b4af08f3d3745d908af36b7ed5b9060f0258", + "revisionTime": "2016-08-11T22:07:13Z" }, { "checksumSHA1": "SGOuRzxuQpJChBvq6SsNojKvcKw=", "comment": "v2.1.1-beta-8-gca4d906", "path": "github.com/Azure/azure-sdk-for-go/management", - "revision": "2cdbb8553a20830507e4178b4d0803794136dde7", - "revisionTime": "2016-06-29T16:19:23Z" + "revision": "bfc5b4af08f3d3745d908af36b7ed5b9060f0258", + "revisionTime": "2016-08-11T22:07:13Z" }, { "checksumSHA1": "TcQ6KXoBkvUhCYeggJ/bwcz+QaQ=", "comment": "v2.1.1-beta-8-gca4d906", "path": "github.com/Azure/azure-sdk-for-go/management/affinitygroup", - "revision": "2cdbb8553a20830507e4178b4d0803794136dde7", - "revisionTime": "2016-06-29T16:19:23Z" + "revision": "bfc5b4af08f3d3745d908af36b7ed5b9060f0258", + "revisionTime": "2016-08-11T22:07:13Z" }, { "checksumSHA1": "HfjyhRfmKBsVgWLTOfWVcxe8Z88=", "comment": "v2.1.1-beta-8-gca4d906", "path": "github.com/Azure/azure-sdk-for-go/management/hostedservice", - "revision": "2cdbb8553a20830507e4178b4d0803794136dde7", - "revisionTime": "2016-06-29T16:19:23Z" + "revision": "bfc5b4af08f3d3745d908af36b7ed5b9060f0258", + "revisionTime": "2016-08-11T22:07:13Z" }, { "checksumSHA1": "4otMhU6xZ41HfmiGZFYtV93GdcI=", "comment": "v2.1.1-beta-8-gca4d906", "path": "github.com/Azure/azure-sdk-for-go/management/location", - "revision": "2cdbb8553a20830507e4178b4d0803794136dde7", - "revisionTime": "2016-06-29T16:19:23Z" + "revision": "bfc5b4af08f3d3745d908af36b7ed5b9060f0258", + "revisionTime": "2016-08-11T22:07:13Z" }, { "checksumSHA1": "hxivwm3D13cqFGOlOS3q8HD7DN0=", "comment": "v2.1.1-beta-8-gca4d906", "path": "github.com/Azure/azure-sdk-for-go/management/networksecuritygroup", - "revision": "2cdbb8553a20830507e4178b4d0803794136dde7", - "revisionTime": "2016-06-29T16:19:23Z" + "revision": "bfc5b4af08f3d3745d908af36b7ed5b9060f0258", + "revisionTime": "2016-08-11T22:07:13Z" }, { "checksumSHA1": "2USoeYg8k1tA1QNLRByEnP/asqs=", "comment": "v2.1.1-beta-8-gca4d906", "path": "github.com/Azure/azure-sdk-for-go/management/osimage", - "revision": "2cdbb8553a20830507e4178b4d0803794136dde7", - "revisionTime": "2016-06-29T16:19:23Z" + "revision": "bfc5b4af08f3d3745d908af36b7ed5b9060f0258", + "revisionTime": "2016-08-11T22:07:13Z" }, { "checksumSHA1": "hzwziaU5QlMlFcFPdbEmW18oV3Y=", "comment": "v2.1.1-beta-8-gca4d906", "path": "github.com/Azure/azure-sdk-for-go/management/sql", - "revision": "2cdbb8553a20830507e4178b4d0803794136dde7", - "revisionTime": "2016-06-29T16:19:23Z" + "revision": "bfc5b4af08f3d3745d908af36b7ed5b9060f0258", + "revisionTime": "2016-08-11T22:07:13Z" }, { "checksumSHA1": "YoAhDE0X6hSFuPpXbpfqcTC0Zvw=", "comment": "v2.1.1-beta-8-gca4d906", "path": "github.com/Azure/azure-sdk-for-go/management/storageservice", - "revision": "2cdbb8553a20830507e4178b4d0803794136dde7", - "revisionTime": "2016-06-29T16:19:23Z" + "revision": "bfc5b4af08f3d3745d908af36b7ed5b9060f0258", + "revisionTime": "2016-08-11T22:07:13Z" }, { "checksumSHA1": "6xEiZL4a9rr5YbnY0RdzuzhEF1Q=", "comment": "v2.1.1-beta-8-gca4d906", "path": "github.com/Azure/azure-sdk-for-go/management/virtualmachine", - "revision": "2cdbb8553a20830507e4178b4d0803794136dde7", - "revisionTime": "2016-06-29T16:19:23Z" + "revision": "bfc5b4af08f3d3745d908af36b7ed5b9060f0258", + "revisionTime": "2016-08-11T22:07:13Z" }, { "checksumSHA1": "xcBM3zQtfcE3VHNBACJJGEesCBI=", "comment": "v2.1.1-beta-8-gca4d906", "path": "github.com/Azure/azure-sdk-for-go/management/virtualmachinedisk", - "revision": "2cdbb8553a20830507e4178b4d0803794136dde7", - "revisionTime": "2016-06-29T16:19:23Z" + "revision": "bfc5b4af08f3d3745d908af36b7ed5b9060f0258", + "revisionTime": "2016-08-11T22:07:13Z" }, { "checksumSHA1": "0bfdkDZ2JFV7bol6GQFfC0g+lP4=", "comment": "v2.1.1-beta-8-gca4d906", "path": "github.com/Azure/azure-sdk-for-go/management/virtualmachineimage", - "revision": "2cdbb8553a20830507e4178b4d0803794136dde7", - "revisionTime": "2016-06-29T16:19:23Z" + "revision": "bfc5b4af08f3d3745d908af36b7ed5b9060f0258", + "revisionTime": "2016-08-11T22:07:13Z" }, { "checksumSHA1": "IhjDqm84VDVSIoHyiGvUzuljG3s=", "comment": "v2.1.1-beta-8-gca4d906", "path": "github.com/Azure/azure-sdk-for-go/management/virtualnetwork", - "revision": "2cdbb8553a20830507e4178b4d0803794136dde7", - "revisionTime": "2016-06-29T16:19:23Z" + "revision": "bfc5b4af08f3d3745d908af36b7ed5b9060f0258", + "revisionTime": "2016-08-11T22:07:13Z" }, { - "checksumSHA1": "KkKaKnxZ+I5qG0V0eg8vjNctI+E=", + "checksumSHA1": "+ykSkHo40/f6VK6/zXDqzF8Lh14=", "comment": "v2.1.1-beta-8-gca4d906", "path": "github.com/Azure/azure-sdk-for-go/management/vmutils", - "revision": "2cdbb8553a20830507e4178b4d0803794136dde7", - "revisionTime": "2016-06-29T16:19:23Z" + "revision": "bfc5b4af08f3d3745d908af36b7ed5b9060f0258", + "revisionTime": "2016-08-11T22:07:13Z" }, { - "checksumSHA1": "O6OHu5bxX1FAHpKt0TDSwPLvmzA=", + "checksumSHA1": "w1X4Sxcdx4WhCqVZdPWoUuMPn9U=", "comment": "v2.1.1-beta-8-gca4d906", "path": "github.com/Azure/azure-sdk-for-go/storage", - "revision": "2cdbb8553a20830507e4178b4d0803794136dde7", - "revisionTime": "2016-06-29T16:19:23Z" + "revision": "bfc5b4af08f3d3745d908af36b7ed5b9060f0258", + "revisionTime": "2016-08-11T22:07:13Z" }, { "checksumSHA1": "pi00alAztMy9MGxJmvg9qC+tsGk=", From cdb80f68a8e20369d57e0f9c61abe1a18681cdec Mon Sep 17 00:00:00 2001 From: James Bardin Date: Wed, 10 Aug 2016 15:47:25 -0400 Subject: [PATCH 0691/1238] Ensure better state normalization Fix checksum issue with remote state If we read a state file with "null" objects in a module and they become initialized to an empty map the state file may be written out with empty objects rather than "null", changing the checksum. If we can detect this, increment the serial number to prevent a conflict in atlas. Our fakeAtlas test server now needs to decode the state directly rather than using the ReadState function, so as to be able to read the state unaltered. The terraform.State data structures have initialization spread out throughout the package. More thoroughly initialize State during ReadState, and add a call to init() during WriteState as another normalization safeguard. Expose State.init through an exported Init() method, so that a new State can be completely realized outside of the terraform package. Additionally, the internal init now completely walks all internal state structures ensuring that all maps and slices are initialized. While it was mentioned before that the `init()` methods are problematic with too many call sites, expanding this out better exposes the entry points that will need to be refactored later for improved concurrency handling. The State structures had a mix of `omitempty` fields. Remove omitempty for all maps and slices as part of this normalization process. Make Lineage mandatory, which is now explicitly set in some tests. --- command/apply_test.go | 1 + command/command_test.go | 4 +- command/refresh_test.go | 25 +++++-- state/remote/atlas_test.go | 22 ++++-- state/testing.go | 11 ++- terraform/state.go | 104 +++++++++++++++++++++++++--- terraform/state_test.go | 20 +++++- terraform/state_upgrade_v1_to_v2.go | 1 + terraform/upgrade_state_v1_test.go | 3 +- 9 files changed, 162 insertions(+), 29 deletions(-) diff --git a/command/apply_test.go b/command/apply_test.go index ca0816ba7..1b6e87069 100644 --- a/command/apply_test.go +++ b/command/apply_test.go @@ -1181,6 +1181,7 @@ func TestApply_backup(t *testing.T) { }, }, } + originalState.Init() statePath := testStateFile(t, originalState) backupPath := testTempFile(t) diff --git a/command/command_test.go b/command/command_test.go index 174d439ac..01863ee39 100644 --- a/command/command_test.go +++ b/command/command_test.go @@ -131,7 +131,7 @@ func testReadPlan(t *testing.T, path string) *terraform.Plan { // testState returns a test State structure that we use for a lot of tests. func testState() *terraform.State { - return &terraform.State{ + state := &terraform.State{ Version: 2, Modules: []*terraform.ModuleState{ &terraform.ModuleState{ @@ -148,6 +148,8 @@ func testState() *terraform.State { }, }, } + state.Init() + return state } func testStateFile(t *testing.T, s *terraform.State) string { diff --git a/command/refresh_test.go b/command/refresh_test.go index 91ef22b17..c7d0ea6eb 100644 --- a/command/refresh_test.go +++ b/command/refresh_test.go @@ -173,7 +173,7 @@ func TestRefresh_defaultState(t *testing.T) { } p.RefreshFn = nil - p.RefreshReturn = &terraform.InstanceState{ID: "yes"} + p.RefreshReturn = newInstanceState("yes") args := []string{ testFixturePath("refresh"), @@ -200,7 +200,8 @@ func TestRefresh_defaultState(t *testing.T) { actual := newState.RootModule().Resources["test_instance.foo"].Primary expected := p.RefreshReturn if !reflect.DeepEqual(actual, expected) { - t.Fatalf("bad: %#v", actual) + t.Logf("expected:\n%#v", expected) + t.Fatalf("bad:\n%#v", actual) } f, err = os.Open(statePath + DefaultBackupExtension) @@ -347,7 +348,7 @@ func TestRefresh_outPath(t *testing.T) { } p.RefreshFn = nil - p.RefreshReturn = &terraform.InstanceState{ID: "yes"} + p.RefreshReturn = newInstanceState("yes") args := []string{ "-state", statePath, @@ -577,7 +578,7 @@ func TestRefresh_backup(t *testing.T) { } p.RefreshFn = nil - p.RefreshReturn = &terraform.InstanceState{ID: "yes"} + p.RefreshReturn = newInstanceState("yes") args := []string{ "-state", statePath, @@ -662,7 +663,7 @@ func TestRefresh_disableBackup(t *testing.T) { } p.RefreshFn = nil - p.RefreshReturn = &terraform.InstanceState{ID: "yes"} + p.RefreshReturn = newInstanceState("yes") args := []string{ "-state", statePath, @@ -742,6 +743,20 @@ func TestRefresh_displaysOutputs(t *testing.T) { } } +// When creating an InstaneState for direct comparison to one contained in +// terraform.State, all fields must be inintialized (duplicating the +// InstanceState.init() method) +func newInstanceState(id string) *terraform.InstanceState { + return &terraform.InstanceState{ + ID: id, + Attributes: make(map[string]string), + Ephemeral: terraform.EphemeralState{ + ConnInfo: make(map[string]string), + }, + Meta: make(map[string]string), + } +} + const refreshVarFile = ` foo = "bar" ` diff --git a/state/remote/atlas_test.go b/state/remote/atlas_test.go index 060d79455..f5fe127d6 100644 --- a/state/remote/atlas_test.go +++ b/state/remote/atlas_test.go @@ -5,6 +5,7 @@ import ( "crypto/md5" "crypto/tls" "crypto/x509" + "encoding/json" "net/http" "net/http/httptest" "net/url" @@ -161,6 +162,9 @@ func TestAtlasClient_LegitimateConflict(t *testing.T) { t.Fatalf("err: %s", err) } + var buf bytes.Buffer + terraform.WriteState(state, &buf) + // Changing the state but not the serial. Should generate a conflict. state.RootModule().Outputs["drift"] = &terraform.OutputState{ Type: "string", @@ -244,7 +248,10 @@ func (f *fakeAtlas) Server() *httptest.Server { } func (f *fakeAtlas) CurrentState() *terraform.State { - currentState, err := terraform.ReadState(bytes.NewReader(f.state)) + // we read the state manually here, because terraform may alter state + // during read + currentState := &terraform.State{} + err := json.Unmarshal(f.state, currentState) if err != nil { f.t.Fatalf("err: %s", err) } @@ -288,10 +295,15 @@ func (f *fakeAtlas) handler(resp http.ResponseWriter, req *http.Request) { var buf bytes.Buffer buf.ReadFrom(req.Body) sum := md5.Sum(buf.Bytes()) - state, err := terraform.ReadState(&buf) + + // we read the state manually here, because terraform may alter state + // during read + state := &terraform.State{} + err := json.Unmarshal(buf.Bytes(), state) if err != nil { f.t.Fatalf("err: %s", err) } + conflict := f.CurrentSerial() == state.Serial && f.CurrentSum() != sum conflict = conflict || f.alwaysConflict if conflict { @@ -351,7 +363,8 @@ var testStateModuleOrderChange = []byte( var testStateSimple = []byte( `{ "version": 3, - "serial": 1, + "serial": 2, + "lineage": "c00ad9ac-9b35-42fe-846e-b06f0ef877e9", "modules": [ { "path": [ @@ -364,7 +377,8 @@ var testStateSimple = []byte( "value": "bar" } }, - "resources": null + "resources": {}, + "depends_on": [] } ] } diff --git a/state/testing.go b/state/testing.go index 207dda7d7..bf73ba4ff 100644 --- a/state/testing.go +++ b/state/testing.go @@ -1,7 +1,6 @@ package state import ( - "bytes" "reflect" "testing" @@ -29,12 +28,12 @@ func TestState(t *testing.T, s interface{}) { // Check that the initial state is correct if state := reader.State(); !current.Equal(state) { - t.Fatalf("not initial: %#v\n\n%#v", state, current) + t.Fatalf("not initial:\n%#v\n\n%#v", state, current) } // Write a new state and verify that we have it if ws, ok := s.(StateWriter); ok { - current.Modules = append(current.Modules, &terraform.ModuleState{ + current.AddModuleState(&terraform.ModuleState{ Path: []string{"root"}, Outputs: map[string]*terraform.OutputState{ "bar": &terraform.OutputState{ @@ -50,7 +49,7 @@ func TestState(t *testing.T, s interface{}) { } if actual := reader.State(); !actual.Equal(current) { - t.Fatalf("bad: %#v\n\n%#v", actual, current) + t.Fatalf("bad:\n%#v\n\n%#v", actual, current) } } @@ -146,7 +145,7 @@ func TestStateInitial() *terraform.State { }, } - var scratch bytes.Buffer - terraform.WriteState(initial, &scratch) + initial.Init() + return initial } diff --git a/terraform/state.go b/terraform/state.go index 473a8adbc..d6dc17194 100644 --- a/terraform/state.go +++ b/terraform/state.go @@ -70,7 +70,7 @@ type State struct { // Apart from the guarantee that collisions between two lineages // are very unlikely, this value is opaque and external callers // should only compare lineage strings byte-for-byte for equality. - Lineage string `json:"lineage,omitempty"` + Lineage string `json:"lineage"` // Remote is used to track the metadata required to // pull and push state files from a remote storage endpoint. @@ -113,7 +113,13 @@ func (s *State) Children(path []string) []*ModuleState { // This should be the preferred method to add module states since it // allows us to optimize lookups later as well as control sorting. func (s *State) AddModule(path []string) *ModuleState { - m := &ModuleState{Path: path} + // check if the module exists first + m := s.ModuleByPath(path) + if m != nil { + return m + } + + m = &ModuleState{Path: path} m.init() s.Modules = append(s.Modules, m) s.sort() @@ -498,6 +504,10 @@ func (s *State) FromFutureTerraform() bool { return SemVersion.LessThan(v) } +func (s *State) Init() { + s.init() +} + func (s *State) init() { if s.Version == 0 { s.Version = StateVersion @@ -506,6 +516,14 @@ func (s *State) init() { s.AddModule(rootModulePath) } s.EnsureHasLineage() + + for _, mod := range s.Modules { + mod.init() + } + + if s.Remote != nil { + s.Remote.init() + } } func (s *State) EnsureHasLineage() { @@ -517,6 +535,21 @@ func (s *State) EnsureHasLineage() { } } +// AddModuleState insert this module state and override any existing ModuleState +func (s *State) AddModuleState(mod *ModuleState) { + mod.init() + + for i, m := range s.Modules { + if reflect.DeepEqual(m.Path, mod.Path) { + s.Modules[i] = mod + return + } + } + + s.Modules = append(s.Modules, mod) + s.sort() +} + // prune is used to remove any resources that are no longer required func (s *State) prune() { if s == nil { @@ -586,6 +619,12 @@ type RemoteState struct { Config map[string]string `json:"config"` } +func (r *RemoteState) init() { + if r.Config == nil { + r.Config = make(map[string]string) + } +} + func (r *RemoteState) deepcopy() *RemoteState { confCopy := make(map[string]string, len(r.Config)) for k, v := range r.Config { @@ -713,7 +752,7 @@ type ModuleState struct { // Terraform. If Terraform doesn't find a matching ID in the // overall state, then it assumes it isn't managed and doesn't // worry about it. - Dependencies []string `json:"depends_on,omitempty"` + Dependencies []string `json:"depends_on"` } // Equal tests whether one module state is equal to another. @@ -817,12 +856,23 @@ func (m *ModuleState) View(id string) *ModuleState { } func (m *ModuleState) init() { + if m.Path == nil { + m.Path = []string{} + } if m.Outputs == nil { m.Outputs = make(map[string]*OutputState) } if m.Resources == nil { m.Resources = make(map[string]*ResourceState) } + + if m.Dependencies == nil { + m.Dependencies = make([]string, 0) + } + + for _, rs := range m.Resources { + rs.init() + } } func (m *ModuleState) deepcopy() *ModuleState { @@ -1095,7 +1145,7 @@ type ResourceState struct { // Terraform. If Terraform doesn't find a matching ID in the // overall state, then it assumes it isn't managed and doesn't // worry about it. - Dependencies []string `json:"depends_on,omitempty"` + Dependencies []string `json:"depends_on"` // Primary is the current active instance for this resource. // It can be replaced but only after a successful creation. @@ -1113,13 +1163,13 @@ type ResourceState struct { // // An instance will remain in the Deposed list until it is successfully // destroyed and purged. - Deposed []*InstanceState `json:"deposed,omitempty"` + Deposed []*InstanceState `json:"deposed"` // Provider is used when a resource is connected to a provider with an alias. // If this string is empty, the resource is connected to the default provider, // e.g. "aws_instance" goes with the "aws" provider. // If the resource block contained a "provider" key, that value will be set here. - Provider string `json:"provider,omitempty"` + Provider string `json:"provider"` } // Equal tests whether two ResourceStates are equal. @@ -1171,6 +1221,18 @@ func (r *ResourceState) init() { r.Primary = &InstanceState{} } r.Primary.init() + + if r.Dependencies == nil { + r.Dependencies = []string{} + } + + if r.Deposed == nil { + r.Deposed = make([]*InstanceState, 0) + } + + for _, dep := range r.Deposed { + dep.init() + } } func (r *ResourceState) deepcopy() *ResourceState { @@ -1222,7 +1284,7 @@ type InstanceState struct { // Attributes are basic information about the resource. Any keys here // are accessible in variable format within Terraform configurations: // ${resourcetype.name.attribute}. - Attributes map[string]string `json:"attributes,omitempty"` + Attributes map[string]string `json:"attributes"` // Ephemeral is used to store any state associated with this instance // that is necessary for the Terraform run to complete, but is not @@ -1232,10 +1294,10 @@ type InstanceState struct { // Meta is a simple K/V map that is persisted to the State but otherwise // ignored by Terraform core. It's meant to be used for accounting by // external client code. - Meta map[string]string `json:"meta,omitempty"` + Meta map[string]string `json:"meta"` // Tainted is used to mark a resource for recreation. - Tainted bool `json:"tainted,omitempty"` + Tainted bool `json:"tainted"` } func (i *InstanceState) init() { @@ -1498,6 +1560,7 @@ func ReadState(src io.Reader) (*State, error) { return nil, fmt.Errorf("Terraform %s does not support state version %d, please update.", SemVersion.String(), versionIdentifier.Version) } + } func ReadStateV1(jsonBytes []byte) (*stateV1, error) { @@ -1543,6 +1606,9 @@ func ReadStateV2(jsonBytes []byte) (*State, error) { // Sort it state.sort() + // catch any unitialized fields in the state + state.init() + return state, nil } @@ -1575,6 +1641,23 @@ func ReadStateV3(jsonBytes []byte) (*State, error) { // Sort it state.sort() + // catch any unitialized fields in the state + state.init() + + // Now we write the state back out to detect any changes in normaliztion. + // If our state is now written out differently, bump the serial number to + // prevent conflicts. + var buf bytes.Buffer + err := WriteState(state, &buf) + if err != nil { + return nil, err + } + + if !bytes.Equal(jsonBytes, buf.Bytes()) { + log.Println("[INFO] state modified during read or write. incrementing serial number") + state.Serial++ + } + return state, nil } @@ -1583,6 +1666,9 @@ func WriteState(d *State, dst io.Writer) error { // Make sure it is sorted d.sort() + // make sure we have no uninitialized fields + d.init() + // Ensure the version is set d.Version = StateVersion diff --git a/terraform/state_test.go b/terraform/state_test.go index b9e727457..41e5f6bcf 100644 --- a/terraform/state_test.go +++ b/terraform/state_test.go @@ -90,6 +90,7 @@ func TestStateOutputTypeRoundTrip(t *testing.T) { }, }, } + state.init() buf := new(bytes.Buffer) if err := WriteState(state, buf); err != nil { @@ -102,7 +103,8 @@ func TestStateOutputTypeRoundTrip(t *testing.T) { } if !reflect.DeepEqual(state, roundTripped) { - t.Fatalf("bad: %#v", roundTripped) + t.Logf("expected:\n%#v", state) + t.Fatalf("got:\n%#v", roundTripped) } } @@ -121,6 +123,8 @@ func TestStateModuleOrphans(t *testing.T) { }, } + state.init() + config := testModule(t, "state-module-orphans").Config() actual := state.ModuleOrphans(RootModulePath, config) expected := [][]string{ @@ -144,6 +148,8 @@ func TestStateModuleOrphans_nested(t *testing.T) { }, } + state.init() + actual := state.ModuleOrphans(RootModulePath, nil) expected := [][]string{ []string{RootModuleName, "foo"}, @@ -169,6 +175,8 @@ func TestStateModuleOrphans_nilConfig(t *testing.T) { }, } + state.init() + actual := state.ModuleOrphans(RootModulePath, nil) expected := [][]string{ []string{RootModuleName, "foo"}, @@ -195,6 +203,8 @@ func TestStateModuleOrphans_deepNestedNilConfig(t *testing.T) { }, } + state.init() + actual := state.ModuleOrphans(RootModulePath, nil) expected := [][]string{ []string{RootModuleName, "parent"}, @@ -1279,7 +1289,8 @@ func TestInstanceState_MergeDiff_nilDiff(t *testing.T) { func TestReadWriteState(t *testing.T) { state := &State{ - Serial: 9, + Serial: 9, + Lineage: "5d1ad1a1-4027-4665-a908-dbe6adff11d8", Remote: &RemoteState{ Type: "http", Config: map[string]string{ @@ -1309,6 +1320,7 @@ func TestReadWriteState(t *testing.T) { }, }, } + state.init() buf := new(bytes.Buffer) if err := WriteState(state, buf); err != nil { @@ -1328,9 +1340,11 @@ func TestReadWriteState(t *testing.T) { // ReadState should not restore sensitive information! mod := state.RootModule() mod.Resources["foo"].Primary.Ephemeral = EphemeralState{} + mod.Resources["foo"].Primary.Ephemeral.init() if !reflect.DeepEqual(actual, state) { - t.Fatalf("bad: %#v", actual) + t.Logf("expected:\n%#v", state) + t.Fatalf("got:\n%#v", actual) } } diff --git a/terraform/state_upgrade_v1_to_v2.go b/terraform/state_upgrade_v1_to_v2.go index d4bf9f8f0..038615336 100644 --- a/terraform/state_upgrade_v1_to_v2.go +++ b/terraform/state_upgrade_v1_to_v2.go @@ -38,6 +38,7 @@ func upgradeStateV1ToV2(old *stateV1) (*State, error) { } newState.sort() + newState.init() return newState, nil } diff --git a/terraform/upgrade_state_v1_test.go b/terraform/upgrade_state_v1_test.go index ad6d57409..405cba949 100644 --- a/terraform/upgrade_state_v1_test.go +++ b/terraform/upgrade_state_v1_test.go @@ -35,7 +35,8 @@ func TestReadUpgradeStateV1toV3(t *testing.T) { } if !reflect.DeepEqual(actual, roundTripped) { - t.Fatalf("bad: %#v", actual) + t.Logf("actual:\n%#v", actual) + t.Fatalf("roundTripped:\n%#v", roundTripped) } } From ef9f3a45b1804ec34390da500d02417191637648 Mon Sep 17 00:00:00 2001 From: Renier Morales Date: Fri, 12 Aug 2016 12:52:12 -0400 Subject: [PATCH 0692/1238] Add S3 endpoint override ability and expose S3 path style option (#7871) * Overriding S3 endpoint - Enable specifying your own S3 api endpoint to override the default one, under endpoints. * Force S3 path style - Expose this option from the aws-sdk-go configuration to the provider. --- builtin/providers/aws/config.go | 13 +++++++---- builtin/providers/aws/provider.go | 23 +++++++++++++++++++ .../docs/providers/aws/index.html.markdown | 8 +++++++ 3 files changed, 39 insertions(+), 5 deletions(-) diff --git a/builtin/providers/aws/config.go b/builtin/providers/aws/config.go index c3763f5d3..6a7602ba6 100644 --- a/builtin/providers/aws/config.go +++ b/builtin/providers/aws/config.go @@ -80,6 +80,7 @@ type Config struct { SkipCredsValidation bool SkipRequestingAccountId bool SkipMetadataApiCheck bool + S3ForcePathStyle bool } type AWSClient struct { @@ -163,10 +164,11 @@ func (c *Config) Client() (interface{}, error) { log.Printf("[INFO] AWS Auth provider used: %q", cp.ProviderName) awsConfig := &aws.Config{ - Credentials: creds, - Region: aws.String(c.Region), - MaxRetries: aws.Int(c.MaxRetries), - HTTPClient: cleanhttp.DefaultClient(), + Credentials: creds, + Region: aws.String(c.Region), + MaxRetries: aws.Int(c.MaxRetries), + HTTPClient: cleanhttp.DefaultClient(), + S3ForcePathStyle: aws.Bool(c.S3ForcePathStyle), } if logging.IsDebugOrHigher() { @@ -199,6 +201,7 @@ func (c *Config) Client() (interface{}, error) { awsEc2Sess := sess.Copy(&aws.Config{Endpoint: aws.String(c.Ec2Endpoint)}) awsElbSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.ElbEndpoint)}) awsIamSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.IamEndpoint)}) + awsS3Sess := sess.Copy(&aws.Config{Endpoint: aws.String(c.S3Endpoint)}) dynamoSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.DynamoDBEndpoint)}) kinesisSess := sess.Copy(&aws.Config{Endpoint: aws.String(c.KinesisEndpoint)}) @@ -259,7 +262,7 @@ func (c *Config) Client() (interface{}, error) { client.rdsconn = rds.New(sess) client.redshiftconn = redshift.New(sess) client.simpledbconn = simpledb.New(sess) - client.s3conn = s3.New(sess) + client.s3conn = s3.New(awsS3Sess) client.sesConn = ses.New(sess) client.snsconn = sns.New(sess) client.sqsconn = sqs.New(sess) diff --git a/builtin/providers/aws/provider.go b/builtin/providers/aws/provider.go index 438a738da..c4fb0561e 100644 --- a/builtin/providers/aws/provider.go +++ b/builtin/providers/aws/provider.go @@ -130,6 +130,13 @@ func Provider() terraform.ResourceProvider { Default: false, Description: descriptions["skip_metadata_api_check"], }, + + "s3_force_path_style": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + Description: descriptions["s3_force_path_style"], + }, }, DataSourcesMap: map[string]*schema.Resource{ @@ -353,6 +360,8 @@ func init() { "elb_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", + "s3_endpoint": "Use this to override the default endpoint URL constructed from the `region`.\n", + "insecure": "Explicitly allow the provider to perform \"insecure\" SSL requests. If omitted," + "default value is `false`", @@ -364,6 +373,11 @@ func init() { "skip_medatadata_api_check": "Skip the AWS Metadata API check. " + "Used for AWS API implementations that do not have a metadata api endpoint.", + + "s3_force_path_style": "Set this to true to force the request to use path-style addressing,\n" + + "i.e., http://s3.amazonaws.com/BUCKET/KEY. By default, the S3 client will\n" + + "use virtual hosted bucket addressing when possible\n" + + "(http://BUCKET.s3.amazonaws.com/KEY). Specific to the Amazon S3 service.", } } @@ -382,6 +396,7 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) { SkipCredsValidation: d.Get("skip_credentials_validation").(bool), SkipRequestingAccountId: d.Get("skip_requesting_account_id").(bool), SkipMetadataApiCheck: d.Get("skip_metadata_api_check").(bool), + S3ForcePathStyle: d.Get("s3_force_path_style").(bool), } endpointsSet := d.Get("endpoints").(*schema.Set) @@ -391,6 +406,7 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) { config.IamEndpoint = endpoints["iam"].(string) config.Ec2Endpoint = endpoints["ec2"].(string) config.ElbEndpoint = endpoints["elb"].(string) + config.S3Endpoint = endpoints["s3"].(string) } if v, ok := d.GetOk("allowed_account_ids"); ok { @@ -433,6 +449,12 @@ func endpointsSchema() *schema.Schema { Default: "", Description: descriptions["elb_endpoint"], }, + "s3": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "", + Description: descriptions["s3_endpoint"], + }, }, }, Set: endpointsToHash, @@ -445,6 +467,7 @@ func endpointsToHash(v interface{}) int { buf.WriteString(fmt.Sprintf("%s-", m["iam"].(string))) buf.WriteString(fmt.Sprintf("%s-", m["ec2"].(string))) buf.WriteString(fmt.Sprintf("%s-", m["elb"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["s3"].(string))) return hashcode.String(buf.String()) } diff --git a/website/source/docs/providers/aws/index.html.markdown b/website/source/docs/providers/aws/index.html.markdown index 90fe99584..853347735 100644 --- a/website/source/docs/providers/aws/index.html.markdown +++ b/website/source/docs/providers/aws/index.html.markdown @@ -182,6 +182,10 @@ The following arguments are supported in the `provider` block: `true` prevents Terraform from authenticating via Metadata API - i.e. you may need to use other auth methods (static credentials set as ENV vars or config) +* `s3_force_path_style` - (Optional) set this to true to force the request to use + path-style adressing, i.e., http://s3.amazonaws.com/BUCKET/KEY. By default, the + S3 client will use virtual hosted bucket addressing when possible + (http://BUCKET.s3.amazonaws.com/KEY). Specific to the Amazon S3 service. Nested `endpoints` block supports the followings: @@ -197,6 +201,10 @@ Nested `endpoints` block supports the followings: URL constructed from the `region`. It's typically used to connect to custom elb endpoints. +* `s3` - (Optional) Use this to override the default endpoint + URL constructed from the `region`. It's typically used to connect to + custom s3 endpoints. + ## Getting the Account ID If you use either `allowed_account_ids` or `forbidden_account_ids`, From 02fee114773c2bea7a88925fa5153aef2667f5d1 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Fri, 12 Aug 2016 17:53:53 +0100 Subject: [PATCH 0693/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9af43ea8c..6ce00755f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -23,6 +23,7 @@ IMPROVEMENTS * provider/aws: Allow skipping credentials validation, requesting Account ID and/or metadata API check [GH-7874] * provider/aws: API gateway request/response parameters can now be specified as map, original `*_in_json` parameters deprecated [GH-7794] * provider/aws: Add support for `promotion_tier` to `aws_rds_cluster_instance` [GH-8087] + * provider/aws: Allow specifying custom S3 endpoint and enforcing S3 path style URLs via new provider options [GH-7871] * provider/azurerm: Adds support for uploading blobs to azure storage from local source [GH-7994] * provider/google: allows atomic Cloud DNS record changes [GH-6575] * provider/google: Move URLMap hosts to TypeSet from TypeList [GH-7472] From 191d7c100792cc9c64e5332b4e718bce780a6269 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Fri, 12 Aug 2016 14:19:42 -0400 Subject: [PATCH 0694/1238] Fix panic when showing empty state Fixes a nil dereference when there's no state to print. Fix some slice declaration to use the recommended style when allocations don't matter. --- command/state_show.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/command/state_show.go b/command/state_show.go index d9a4f1d08..1f5096e63 100644 --- a/command/state_show.go +++ b/command/state_show.go @@ -45,22 +45,27 @@ func (c *StateShowCommand) Run(args []string) int { return 1 } + if len(results) == 0 { + return 0 + } + instance, err := c.filterInstance(results) if err != nil { c.Ui.Error(err.Error()) return 1 } + is := instance.Value.(*terraform.InstanceState) // Sort the keys - keys := make([]string, 0, len(is.Attributes)) + var keys []string for k, _ := range is.Attributes { keys = append(keys, k) } sort.Strings(keys) // Build the output - output := make([]string, 0, len(is.Attributes)+1) + var output []string output = append(output, fmt.Sprintf("id | %s", is.ID)) for _, k := range keys { if k != "id" { From 1be5a650fc60fe30e2c09fd6948492e1619b1c16 Mon Sep 17 00:00:00 2001 From: James Bardin Date: Fri, 12 Aug 2016 15:01:25 -0400 Subject: [PATCH 0695/1238] Add test for showing an empty state Make sure we don't panic if there's no state to print --- command/state_show_test.go | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/command/state_show_test.go b/command/state_show_test.go index 1e2dba08c..cbcfb46d8 100644 --- a/command/state_show_test.go +++ b/command/state_show_test.go @@ -126,6 +126,29 @@ func TestStateShow_noState(t *testing.T) { } } +func TestStateShow_emptyState(t *testing.T) { + state := &terraform.State{} + + statePath := testStateFile(t, state) + + p := testProvider() + ui := new(cli.MockUi) + c := &StateShowCommand{ + Meta: Meta{ + ContextOpts: testCtxConfig(p), + Ui: ui, + }, + } + + args := []string{ + "-state", statePath, + "test_instance.foo", + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } +} + const testStateShowOutput = ` id = bar bar = value From 3dccfa0cc9cd2bd642295cd27d9fb45ff741fe2a Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Fri, 12 Aug 2016 17:20:09 -0500 Subject: [PATCH 0696/1238] terraform: diffs with only tainted set are non-empty Fixes issue where a resource marked as tainted with no other attribute diffs would never show up in the plan or apply as needing to be replaced. One unrelated test needed updating due to a quirk in the testDiffFn logic - it adds a "type" field diff if the diff is non-Empty. NBD --- helper/schema/schema_test.go | 20 ++++++++++++++++++++ terraform/context_plan_test.go | 3 ++- terraform/diff.go | 2 +- terraform/diff_test.go | 13 +++++++++++++ 4 files changed, 36 insertions(+), 2 deletions(-) diff --git a/helper/schema/schema_test.go b/helper/schema/schema_test.go index d9d023d6a..5700aba2a 100644 --- a/helper/schema/schema_test.go +++ b/helper/schema/schema_test.go @@ -2400,6 +2400,26 @@ func TestSchemaMap_Diff(t *testing.T) { Err: false, }, + + "tainted in state w/ no attr changes is still a replacement": { + Schema: map[string]*Schema{}, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "id": "someid", + }, + Tainted: true, + }, + + Config: map[string]interface{}{}, + + Diff: &terraform.InstanceDiff{ + Attributes: map[string]*terraform.ResourceAttrDiff{}, + DestroyTainted: true, + }, + + Err: false, + }, } for tn, tc := range cases { diff --git a/terraform/context_plan_test.go b/terraform/context_plan_test.go index 65ba03566..96a40119a 100644 --- a/terraform/context_plan_test.go +++ b/terraform/context_plan_test.go @@ -1949,6 +1949,7 @@ func TestContext2Plan_taintDestroyInterpolatedCountRace(t *testing.T) { DIFF: DESTROY/CREATE: aws_instance.foo.0 + type: "" => "aws_instance" STATE: @@ -1960,7 +1961,7 @@ aws_instance.foo.2: ID = bar `) if actual != expected { - t.Fatalf("bad:\n%s", actual) + t.Fatalf("[%d] bad:\n%s\nexpected:\n%s\n", i, actual, expected) } } } diff --git a/terraform/diff.go b/terraform/diff.go index f3e7a092f..5de15a24a 100644 --- a/terraform/diff.go +++ b/terraform/diff.go @@ -361,7 +361,7 @@ func (d *InstanceDiff) Empty() bool { d.mu.Lock() defer d.mu.Unlock() - return !d.Destroy && len(d.Attributes) == 0 + return !d.Destroy && !d.DestroyTainted && len(d.Attributes) == 0 } func (d *InstanceDiff) GoString() string { diff --git a/terraform/diff_test.go b/terraform/diff_test.go index bf1c1c9f2..faffcbbcc 100644 --- a/terraform/diff_test.go +++ b/terraform/diff_test.go @@ -27,6 +27,19 @@ func TestDiffEmpty(t *testing.T) { } } +func TestDiffEmpty_taintedIsNotEmpty(t *testing.T) { + diff := new(Diff) + + mod := diff.AddModule(rootModulePath) + mod.Resources["nodeA"] = &InstanceDiff{ + DestroyTainted: true, + } + + if diff.Empty() { + t.Fatal("should not be empty, since DestroyTainted was set") + } +} + func TestModuleDiff_ChangeType(t *testing.T) { cases := []struct { Diff *ModuleDiff From e909bc8ad26eba29d33a4426ec1a0787ab2f9a8e Mon Sep 17 00:00:00 2001 From: Paul Hinze Date: Fri, 12 Aug 2016 18:07:23 -0500 Subject: [PATCH 0697/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6ce00755f..55f4f4ede 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,7 @@ IMPROVEMENTS * provider/google: Support Import of `google_compute_autoscaler` [GH-8115] BUG FIXES: + * core: Fix issue preventing `taint` from working with resources that had no other attributes in their diff [GH-8167] * provider/aws: guard against missing image_digest in `aws_ecs_task_definition` [GH-7966] * provider/aws: `aws_cloudformation_stack` now respects `timeout_in_minutes` field when waiting for CF API to finish an update operation [GH-7997] * provider/aws: Prevent errors when `aws_s3_bucket` `acceleration_status` is not available in a given region [GH-7999] From 5316739ba1418268387820b028d91817a0e7e6f8 Mon Sep 17 00:00:00 2001 From: Anthony Stanton Date: Fri, 12 Aug 2016 18:57:48 +0200 Subject: [PATCH 0698/1238] Update go-librato --- .../henrikhodne/go-librato/librato/alerts.go | 110 ++++++++++++ .../henrikhodne/go-librato/librato/client.go | 9 +- .../henrikhodne/go-librato/librato/metrics.go | 163 ++++++++++++++++++ .../go-librato/librato/pagination.go | 70 ++++++++ .../go-librato/librato/services.go | 90 ++++++++++ vendor/vendor.json | 5 +- 6 files changed, 444 insertions(+), 3 deletions(-) create mode 100644 vendor/github.com/henrikhodne/go-librato/librato/alerts.go create mode 100644 vendor/github.com/henrikhodne/go-librato/librato/metrics.go create mode 100644 vendor/github.com/henrikhodne/go-librato/librato/pagination.go create mode 100644 vendor/github.com/henrikhodne/go-librato/librato/services.go diff --git a/vendor/github.com/henrikhodne/go-librato/librato/alerts.go b/vendor/github.com/henrikhodne/go-librato/librato/alerts.go new file mode 100644 index 000000000..fd027b906 --- /dev/null +++ b/vendor/github.com/henrikhodne/go-librato/librato/alerts.go @@ -0,0 +1,110 @@ +package librato + +import ( + "fmt" + "net/http" +) + +// AlertsService handles communication with the Librato API methods related to +// alerts. +type AlertsService struct { + client *Client +} + +// Alert represents a Librato Alert. +type Alert struct { + Name *string `json:"name"` + ID *uint `json:"id,omitempty"` + Conditions []AlertCondition `json:"conditions,omitempty"` + // These are interface{} because the Librato API asks for integers + // on Create and returns hashes on Get + Services interface{} `json:"services,omitempty"` + Attributes *AlertAttributes `json:"attributes,omitempty"` + Description *string `json:"description,omitempty"` + Active *bool `json:"active,omitempty"` + RearmSeconds *uint `json:"rearm_seconds,omitempty"` +} + +func (a Alert) String() string { + return Stringify(a) +} + +// AlertCondition represents an alert trigger condition. +type AlertCondition struct { + Type *string `json:"type,omitempty"` + MetricName *string `json:"metric_name,omitempty"` + Source *string `json:"source,omitempty"` + DetectReset *bool `json:"detect_reset,omitempty"` + Threshold *float64 `json:"threshold,omitempty"` + SummaryFunction *string `json:"summary_function,omitempty"` + Duration *uint `json:"duration,omitempty"` +} + +// AlertAttributes represents the attributes of an alert. +type AlertAttributes struct { + RunbookURL *string `json:"runbook_url,omitempty"` +} + +// Get an alert by ID +// +// Librato API docs: https://www.librato.com/docs/api/#retrieve-alert-by-id +func (a *AlertsService) Get(id uint) (*Alert, *http.Response, error) { + urlStr := fmt.Sprintf("alerts/%d", id) + + req, err := a.client.NewRequest("GET", urlStr, nil) + if err != nil { + return nil, nil, err + } + + alert := new(Alert) + resp, err := a.client.Do(req, alert) + if err != nil { + return nil, resp, err + } + + return alert, resp, err +} + +// Create an alert +// +// Librato API docs: https://www.librato.com/docs/api/?shell#create-an-alert +func (a *AlertsService) Create(alert *Alert) (*Alert, *http.Response, error) { + req, err := a.client.NewRequest("POST", "alerts", alert) + if err != nil { + return nil, nil, err + } + + al := new(Alert) + resp, err := a.client.Do(req, al) + if err != nil { + return nil, resp, err + } + + return al, resp, err +} + +// Edit an alert. +// +// Librato API docs: https://www.librato.com/docs/api/?shell#update-alert +func (a *AlertsService) Edit(alertID uint, alert *Alert) (*http.Response, error) { + u := fmt.Sprintf("alerts/%d", alertID) + req, err := a.client.NewRequest("PUT", u, alert) + if err != nil { + return nil, err + } + + return a.client.Do(req, nil) +} + +// Delete an alert +// +// Librato API docs: https://www.librato.com/docs/api/?shell#delete-alert +func (a *AlertsService) Delete(id uint) (*http.Response, error) { + u := fmt.Sprintf("alerts/%d", id) + req, err := a.client.NewRequest("DELETE", u, nil) + if err != nil { + return nil, err + } + + return a.client.Do(req, nil) +} diff --git a/vendor/github.com/henrikhodne/go-librato/librato/client.go b/vendor/github.com/henrikhodne/go-librato/librato/client.go index 181e2c7f5..5cb240c2d 100644 --- a/vendor/github.com/henrikhodne/go-librato/librato/client.go +++ b/vendor/github.com/henrikhodne/go-librato/librato/client.go @@ -44,7 +44,10 @@ type Client struct { UserAgent string // Services used to manipulate API entities. - Spaces *SpacesService + Spaces *SpacesService + Metrics *MetricsService + Alerts *AlertsService + Services *ServicesService } // NewClient returns a new Librato API client bound to the public Librato API. @@ -74,6 +77,9 @@ func NewClientWithBaseURL(baseURL *url.URL, email, token string) *Client { } c.Spaces = &SpacesService{client: c} + c.Metrics = &MetricsService{client: c} + c.Alerts = &AlertsService{client: c} + c.Services = &ServicesService{client: c} return c } @@ -89,7 +95,6 @@ func (c *Client) NewRequest(method, urlStr string, body interface{}) (*http.Requ if err != nil { return nil, err } - u := c.BaseURL.ResolveReference(rel) var buf io.ReadWriter diff --git a/vendor/github.com/henrikhodne/go-librato/librato/metrics.go b/vendor/github.com/henrikhodne/go-librato/librato/metrics.go new file mode 100644 index 000000000..974204a29 --- /dev/null +++ b/vendor/github.com/henrikhodne/go-librato/librato/metrics.go @@ -0,0 +1,163 @@ +package librato + +import ( + "fmt" + "net/http" +) + +// MetricsService handles communication with the Librato API methods related to +// metrics. +type MetricsService struct { + client *Client +} + +// Metric represents a Librato Metric. +type Metric struct { + Name *string `json:"name"` + Period *uint `json:"period,omitempty"` + DisplayName *string `json:"display_name,omitempty"` + Attributes *MetricAttributes `json:"attributes,omitempty"` +} + +type MetricAttributes struct { + Color *string `json:"color"` + // These are interface{} because sometimes the Librato API + // returns strings, and sometimes it returns integers + DisplayMax interface{} `json:"display_max"` + DisplayMin interface{} `json:"display_min"` + DisplayUnitsShort string `json:"display_units_short"` + DisplayStacked bool `json:"display_stacked"` + DisplayTransform string `json:"display_transform"` +} + +type ListMetricsOptions struct { + *PaginationMeta + Name string `url:"name,omitempty"` +} + +// Advance to the specified page in result set, while retaining +// the filtering options. +func (l *ListMetricsOptions) AdvancePage(next *PaginationMeta) ListMetricsOptions { + return ListMetricsOptions{ + PaginationMeta: next, + Name: l.Name, + } +} + +type ListMetricsResponse struct { + ThisPage *PaginationResponseMeta + NextPage *PaginationMeta +} + +// List metrics using the provided options. +// +// Librato API docs: https://www.librato.com/docs/api/#retrieve-metrics +func (m *MetricsService) List(opts *ListMetricsOptions) ([]Metric, *ListMetricsResponse, error) { + u, err := urlWithOptions("metrics", opts) + if err != nil { + return nil, nil, err + } + + req, err := m.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + var metricsResponse struct { + Query PaginationResponseMeta + Metrics []Metric + } + + _, err = m.client.Do(req, &metricsResponse) + if err != nil { + return nil, nil, err + } + + return metricsResponse.Metrics, + &ListMetricsResponse{ + ThisPage: &metricsResponse.Query, + NextPage: metricsResponse.Query.nextPage(opts.PaginationMeta), + }, + nil +} + +// Get a metric by name +// +// Librato API docs: https://www.librato.com/docs/api/#retrieve-metric-by-name +func (m *MetricsService) Get(name string) (*Metric, *http.Response, error) { + u := fmt.Sprintf("metrics/%s", name) + req, err := m.client.NewRequest("GET", u, nil) + if err != nil { + return nil, nil, err + } + + metric := new(Metric) + resp, err := m.client.Do(req, metric) + if err != nil { + return nil, resp, err + } + + return metric, resp, err +} + +type MeasurementSubmission struct { + MeasureTime *uint `json:"measure_time,omitempty"` + Source *string `json:"source,omitempty"` + Gauges []*GaugeMeasurement `json:"gauges,omitempty"` + Counters []*Measurement `json:"counters,omitempty"` +} + +type Measurement struct { + Name string `json:"name"` + Value *float64 `json:"value,omitempty"` + MeasureTime *uint `json:"measure_time,omitempty"` + Source *string `json:"source,omitempty"` +} + +type GaugeMeasurement struct { + *Measurement + Count *uint `json:"count,omitempty"` + Sum *float64 `json:"sum,omitempty"` + Max *float64 `json:"max,omitempty"` + Min *float64 `json:"min,omitempty"` + SumSquares *float64 `json:"sum_squares,omitempty"` +} + +// Submit metrics +// +// Librato API docs: https://www.librato.com/docs/api/#submit-metrics +func (m *MetricsService) Submit(measurements *MeasurementSubmission) (*http.Response, error) { + req, err := m.client.NewRequest("POST", "/metrics", measurements) + if err != nil { + return nil, err + } + + return m.client.Do(req, nil) +} + +// Edit a metric. +// +// Librato API docs: https://www.librato.com/docs/api/#update-metric-by-name +func (m *MetricsService) Edit(metric *Metric) (*http.Response, error) { + u := fmt.Sprintf("metrics/%s", *metric.Name) + + req, err := m.client.NewRequest("PUT", u, metric) + if err != nil { + return nil, err + } + + return m.client.Do(req, nil) +} + +// Delete a metric. +// +// Librato API docs: https://www.librato.com/docs/api/#delete-metric-by-name +func (m *MetricsService) Delete(name string) (*http.Response, error) { + u := fmt.Sprintf("metrics/%s", name) + req, err := m.client.NewRequest("DELETE", u, nil) + if err != nil { + return nil, err + } + + return m.client.Do(req, nil) +} diff --git a/vendor/github.com/henrikhodne/go-librato/librato/pagination.go b/vendor/github.com/henrikhodne/go-librato/librato/pagination.go new file mode 100644 index 000000000..5b6934eeb --- /dev/null +++ b/vendor/github.com/henrikhodne/go-librato/librato/pagination.go @@ -0,0 +1,70 @@ +package librato + +import ( + "fmt" + "net/url" +) + +// PaginationResponseMeta contains pagination metadata from Librato API +// responses. +type PaginationResponseMeta struct { + Offset uint `json:"offset"` + Length uint `json:"length"` + Total uint `json:"total"` + Found uint `json:"found"` +} + +// Calculate the pagination metadata for the next page of the result set. +// Takes the metadata used to request the current page so that it can use the +// same sort/orderby options +func (p *PaginationResponseMeta) nextPage(originalQuery *PaginationMeta) (next *PaginationMeta) { + nextOffset := p.Offset + p.Length + + if nextOffset >= p.Found { + return nil + } + + next = &PaginationMeta{} + next.Offset = nextOffset + next.Length = p.Length + + if originalQuery != nil { + next.OrderBy = originalQuery.OrderBy + next.Sort = originalQuery.Sort + } + + return next +} + +// PaginationMeta contains metadata that the Librato API requires for pagination +// http://dev.librato.com/v1/pagination +type PaginationMeta struct { + Offset uint `url:"offset,omitempty"` + Length uint `url:"length,omitempty"` + OrderBy string `url:"orderby,omitempty"` + Sort string `url:"sort,omitempty"` +} + +// EncodeValues is implemented to allow other strucs to embed PaginationMeta and +// still use github.com/google/go-querystring/query to encode the struct. It +// makes PaginationMeta implement query.Encoder. +func (m *PaginationMeta) EncodeValues(name string, values *url.Values) error { + if m == nil { + return nil + } + + if m.Offset != 0 { + values.Set("offset", fmt.Sprintf("%d", m.Offset)) + } + if m.Length != 0 { + values.Set("length", fmt.Sprintf("%d", m.Length)) + } + if m.OrderBy != "" { + values.Set("orderby", m.OrderBy) + } + if m.Sort != "" { + values.Set("sort", m.Sort) + } + + return nil +} diff --git a/vendor/github.com/henrikhodne/go-librato/librato/services.go b/vendor/github.com/henrikhodne/go-librato/librato/services.go new file mode 100644 index 000000000..6d15d1e21 --- /dev/null +++ b/vendor/github.com/henrikhodne/go-librato/librato/services.go @@ -0,0 +1,90 @@ +package librato + +import ( + "fmt" + "net/http" +) + +// ServicesService handles communication with the Librato API methods related to +// notification services. +type ServicesService struct { + client *Client +} + +// Service represents a Librato Service. +type Service struct { + ID *uint `json:"id,omitempty"` + Type *string `json:"type,omitempty"` + Title *string `json:"title,omitempty"` + // This is an interface{} because it's a hash of settings + // specific to each service. + Settings map[string]string `json:"settings,omitempty"` +} + +func (a Service) String() string { + return Stringify(a) +} + +// Get a service by ID +// +// Librato API docs: https://www.librato.com/docs/api/#retrieve-specific-service +func (s *ServicesService) Get(id uint) (*Service, *http.Response, error) { + urlStr := fmt.Sprintf("services/%d", id) + + req, err := s.client.NewRequest("GET", urlStr, nil) + if err != nil { + return nil, nil, err + } + + service := new(Service) + resp, err := s.client.Do(req, service) + if err != nil { + return nil, resp, err + } + + return service, resp, err +} + +// Create a service +// +// Librato API docs: https://www.librato.com/docs/api/#create-a-service +func (s *ServicesService) Create(service *Service) (*Service, *http.Response, error) { + req, err := s.client.NewRequest("POST", "services", service) + if err != nil { + return nil, nil, err + } + + sv := new(Service) + resp, err := s.client.Do(req, sv) + if err != nil { + return nil, resp, err + } + + return sv, resp, err +} + +// Edit a service. +// +// Librato API docs: https://www.librato.com/docs/api/#update-a-service +func (s *ServicesService) Edit(serviceID uint, service *Service) (*http.Response, error) { + u := fmt.Sprintf("services/%d", serviceID) + req, err := s.client.NewRequest("PUT", u, service) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} + +// Delete a service +// +// Librato API docs: https://www.librato.com/docs/api/#delete-a-service +func (s *ServicesService) Delete(id uint) (*http.Response, error) { + u := fmt.Sprintf("services/%d", id) + req, err := s.client.NewRequest("DELETE", u, nil) + if err != nil { + return nil, err + } + + return s.client.Do(req, nil) +} diff --git a/vendor/vendor.json b/vendor/vendor.json index ee31a419f..2850550dd 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -1215,8 +1215,11 @@ "revision": "df949784da9ed028ee76df44652e42d37a09d7e4" }, { + "checksumSHA1": "jq2E42bB0kwKaerHXwJslUea4eM=", + "origin": "github.com/hashicorp/terraform/vendor/github.com/henrikhodne/go-librato/librato", "path": "github.com/henrikhodne/go-librato/librato", - "revision": "613abdebf4922c4d9d46bcb4bcf14ee18c08d7de" + "revision": "6e9aa4b1a8a8b735ad14b4f1c9542ef183e82dc2", + "revisionTime": "2016-08-11T07:26:26Z" }, { "comment": "v0.0.2-37-g5cd82f0", From cb6607953854c06f369801aa326209a35842c3a9 Mon Sep 17 00:00:00 2001 From: Anthony Stanton Date: Fri, 5 Aug 2016 23:16:32 +0200 Subject: [PATCH 0699/1238] Support for Librato Alerts and Services --- builtin/providers/librato/provider.go | 2 + .../librato/resource_librato_alert.go | 446 ++++++++++++++++++ .../librato/resource_librato_alert_test.go | 191 ++++++++ .../librato/resource_librato_service.go | 229 +++++++++ .../librato/resource_librato_service_test.go | 153 ++++++ .../providers/librato/r/alert.html.markdown | 66 +++ .../providers/librato/r/service.html.markdown | 44 ++ 7 files changed, 1131 insertions(+) create mode 100644 builtin/providers/librato/resource_librato_alert.go create mode 100644 builtin/providers/librato/resource_librato_alert_test.go create mode 100644 builtin/providers/librato/resource_librato_service.go create mode 100644 builtin/providers/librato/resource_librato_service_test.go create mode 100644 website/source/docs/providers/librato/r/alert.html.markdown create mode 100644 website/source/docs/providers/librato/r/service.html.markdown diff --git a/builtin/providers/librato/provider.go b/builtin/providers/librato/provider.go index 0b7894f6f..7b1a64016 100644 --- a/builtin/providers/librato/provider.go +++ b/builtin/providers/librato/provider.go @@ -28,6 +28,8 @@ func Provider() terraform.ResourceProvider { ResourcesMap: map[string]*schema.Resource{ "librato_space": resourceLibratoSpace(), "librato_space_chart": resourceLibratoSpaceChart(), + "librato_alert": resourceLibratoAlert(), + "librato_service": resourceLibratoService(), }, ConfigureFunc: providerConfigure, diff --git a/builtin/providers/librato/resource_librato_alert.go b/builtin/providers/librato/resource_librato_alert.go new file mode 100644 index 000000000..2d08be194 --- /dev/null +++ b/builtin/providers/librato/resource_librato_alert.go @@ -0,0 +1,446 @@ +package librato + +import ( + "bytes" + "fmt" + "log" + "math" + "strconv" + "time" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/henrikhodne/go-librato/librato" +) + +func resourceLibratoAlert() *schema.Resource { + return &schema.Resource{ + Create: resourceLibratoAlertCreate, + Read: resourceLibratoAlertRead, + Update: resourceLibratoAlertUpdate, + Delete: resourceLibratoAlertDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + "id": &schema.Schema{ + Type: schema.TypeInt, + Computed: true, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "active": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "rearm_seconds": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 600, + }, + "services": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: schema.HashString, + }, + "condition": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "metric_name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "source": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "detect_reset": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + "duration": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + "threshold": &schema.Schema{ + Type: schema.TypeFloat, + Optional: true, + }, + "summary_function": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + Set: resourceLibratoAlertConditionsHash, + }, + "attributes": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "runbook_url": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + // TODO add missing condition attrs + }, + } +} + +func resourceLibratoAlertConditionsHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["type"].(string))) + buf.WriteString(fmt.Sprintf("%f-", m["threshold"].(float64))) + buf.WriteString(fmt.Sprintf("%s-", m["metric_name"].(string))) + + return hashcode.String(buf.String()) +} + +func resourceLibratoAlertCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*librato.Client) + + alert := new(librato.Alert) + if v, ok := d.GetOk("name"); ok { + alert.Name = librato.String(v.(string)) + } + if v, ok := d.GetOk("description"); ok { + alert.Description = librato.String(v.(string)) + } + // GetOK returns not OK for false boolean values, use Get + alert.Active = librato.Bool(d.Get("active").(bool)) + if v, ok := d.GetOk("rearm_seconds"); ok { + alert.RearmSeconds = librato.Uint(uint(v.(int))) + } + if v, ok := d.GetOk("services"); ok { + vs := v.(*schema.Set) + services := make([]*string, vs.Len()) + for i, serviceData := range vs.List() { + services[i] = librato.String(serviceData.(string)) + } + alert.Services = services + } + if v, ok := d.GetOk("condition"); ok { + vs := v.(*schema.Set) + conditions := make([]librato.AlertCondition, vs.Len()) + for i, conditionDataM := range vs.List() { + conditionData := conditionDataM.(map[string]interface{}) + var condition librato.AlertCondition + if v, ok := conditionData["type"].(string); ok && v != "" { + condition.Type = librato.String(v) + } + if v, ok := conditionData["threshold"].(float64); ok && !math.IsNaN(v) { + condition.Threshold = librato.Float(v) + } + if v, ok := conditionData["metric_name"].(string); ok && v != "" { + condition.MetricName = librato.String(v) + } + if v, ok := conditionData["source"].(string); ok && v != "" { + condition.Source = librato.String(v) + } + if v, ok := conditionData["detect_reset"].(bool); ok { + condition.DetectReset = librato.Bool(v) + } + if v, ok := conditionData["duration"].(uint); ok { + condition.Duration = librato.Uint(v) + } + if v, ok := conditionData["summary_function"].(string); ok && v != "" { + condition.SummaryFunction = librato.String(v) + } + conditions[i] = condition + } + alert.Conditions = conditions + } + if v, ok := d.GetOk("attributes"); ok { + attributeData := v.([]interface{}) + if len(attributeData) > 1 { + return fmt.Errorf("Only one set of attributes per alert is supported") + } else if len(attributeData) == 1 { + if attributeData[0] == nil { + return fmt.Errorf("No attributes found in attributes block") + } + attributeDataMap := attributeData[0].(map[string]interface{}) + attributes := new(librato.AlertAttributes) + if v, ok := attributeDataMap["runbook_url"].(string); ok && v != "" { + attributes.RunbookURL = librato.String(v) + } + alert.Attributes = attributes + } + } + + alertResult, _, err := client.Alerts.Create(alert) + + if err != nil { + return fmt.Errorf("Error creating Librato alert %s: %s", *alert.Name, err) + } + + resource.Retry(1*time.Minute, func() *resource.RetryError { + _, _, err := client.Alerts.Get(*alertResult.ID) + if err != nil { + if errResp, ok := err.(*librato.ErrorResponse); ok && errResp.Response.StatusCode == 404 { + return resource.RetryableError(err) + } + return resource.NonRetryableError(err) + } + return nil + }) + + return resourceLibratoAlertReadResult(d, alertResult) +} + +func resourceLibratoAlertRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*librato.Client) + id, err := strconv.ParseUint(d.Id(), 10, 0) + if err != nil { + return err + } + + alert, _, err := client.Alerts.Get(uint(id)) + if err != nil { + if errResp, ok := err.(*librato.ErrorResponse); ok && errResp.Response.StatusCode == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error reading Librato Alert %s: %s", d.Id(), err) + } + + return resourceLibratoAlertReadResult(d, alert) +} + +func resourceLibratoAlertReadResult(d *schema.ResourceData, alert *librato.Alert) error { + d.SetId(strconv.FormatUint(uint64(*alert.ID), 10)) + if alert.ID != nil { + if err := d.Set("id", *alert.ID); err != nil { + return err + } + } + if alert.Name != nil { + if err := d.Set("name", *alert.Name); err != nil { + return err + } + } + if alert.Description != nil { + if err := d.Set("description", *alert.Description); err != nil { + return err + } + } + if alert.Active != nil { + if err := d.Set("active", *alert.Active); err != nil { + return err + } + } + if alert.RearmSeconds != nil { + if err := d.Set("rearm_seconds", *alert.RearmSeconds); err != nil { + return err + } + } + if alert.Services != nil { + services := resourceLibratoAlertServicesGather(d, alert.Services.([]interface{})) + if err := d.Set("services", services); err != nil { + return err + } + } + if alert.Conditions != nil { + conditions := resourceLibratoAlertConditionsGather(d, alert.Conditions) + if err := d.Set("condition", conditions); err != nil { + return err + } + } + if alert.Attributes != nil { + attributes := resourceLibratoAlertAttributesGather(d, alert.Attributes) + if err := d.Set("attributes", attributes); err != nil { + return err + } + } + + return nil +} + +func resourceLibratoAlertServicesGather(d *schema.ResourceData, services []interface{}) []string { + retServices := make([]string, 0, len(services)) + + for _, s := range services { + serviceData := s.(map[string]interface{}) + // ID field is returned as float64, for whatever reason + retServices = append(retServices, fmt.Sprintf("%.f", serviceData["id"])) + } + + return retServices +} + +func resourceLibratoAlertConditionsGather(d *schema.ResourceData, conditions []librato.AlertCondition) []map[string]interface{} { + retConditions := make([]map[string]interface{}, 0, len(conditions)) + for _, c := range conditions { + condition := make(map[string]interface{}) + if c.Type != nil { + condition["type"] = *c.Type + } + if c.Threshold != nil { + condition["threshold"] = *c.Threshold + } + if c.MetricName != nil { + condition["metric_name"] = *c.MetricName + } + if c.Source != nil { + condition["source"] = *c.Source + } + if c.DetectReset != nil { + condition["detect_reset"] = *c.MetricName + } + if c.Duration != nil { + condition["duration"] = *c.Duration + } + if c.SummaryFunction != nil { + condition["summary_function"] = *c.SummaryFunction + } + retConditions = append(retConditions, condition) + } + + return retConditions +} + +// Flattens an attributes hash into something that flatmap.Flatten() can handle +func resourceLibratoAlertAttributesGather(d *schema.ResourceData, attributes *librato.AlertAttributes) []map[string]interface{} { + result := make([]map[string]interface{}, 0, 1) + + if attributes != nil { + retAttributes := make(map[string]interface{}) + if attributes.RunbookURL != nil { + retAttributes["runbook_url"] = *attributes.RunbookURL + } + result = append(result, retAttributes) + } + + return result +} + +func resourceLibratoAlertUpdate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*librato.Client) + + alertID, err := strconv.ParseUint(d.Id(), 10, 0) + if err != nil { + return err + } + + alert := new(librato.Alert) + if d.HasChange("name") { + alert.Name = librato.String(d.Get("name").(string)) + } + if d.HasChange("description") { + alert.Description = librato.String(d.Get("description").(string)) + } + if d.HasChange("active") { + alert.Active = librato.Bool(d.Get("active").(bool)) + } + if d.HasChange("rearm_seconds") { + alert.RearmSeconds = librato.Uint(d.Get("rearm_seconds").(uint)) + } + if d.HasChange("services") { + vs := d.Get("services").(*schema.Set) + services := make([]*string, vs.Len()) + for i, serviceData := range vs.List() { + services[i] = librato.String(serviceData.(string)) + } + alert.Services = services + } + if d.HasChange("condition") { + vs := d.Get("condition").(*schema.Set) + conditions := make([]librato.AlertCondition, vs.Len()) + for i, conditionDataM := range vs.List() { + conditionData := conditionDataM.(map[string]interface{}) + var condition librato.AlertCondition + if v, ok := conditionData["type"].(string); ok && v != "" { + condition.Type = librato.String(v) + } + if v, ok := conditionData["threshold"].(float64); ok && !math.IsNaN(v) { + condition.Threshold = librato.Float(v) + } + if v, ok := conditionData["metric_name"].(string); ok && v != "" { + condition.MetricName = librato.String(v) + } + if v, ok := conditionData["source"].(string); ok && v != "" { + condition.Source = librato.String(v) + } + if v, ok := conditionData["detect_reset"].(bool); ok { + condition.DetectReset = librato.Bool(v) + } + if v, ok := conditionData["duration"].(uint); ok { + condition.Duration = librato.Uint(v) + } + if v, ok := conditionData["summary_function"].(string); ok && v != "" { + condition.SummaryFunction = librato.String(v) + } + conditions[i] = condition + } + alert.Conditions = conditions + } + if d.HasChange("attributes") { + attributeData := d.Get("attributes").([]interface{}) + if len(attributeData) > 1 { + return fmt.Errorf("Only one set of attributes per alert is supported") + } else if len(attributeData) == 1 { + if attributeData[0] == nil { + return fmt.Errorf("No attributes found in attributes block") + } + attributeDataMap := attributeData[0].(map[string]interface{}) + attributes := new(librato.AlertAttributes) + if v, ok := attributeDataMap["runbook_url"].(string); ok && v != "" { + attributes.RunbookURL = librato.String(v) + } + alert.Attributes = attributes + } + } + + _, err = client.Alerts.Edit(uint(alertID), alert) + if err != nil { + return fmt.Errorf("Error updating Librato alert: %s", err) + } + + return resourceLibratoAlertRead(d, meta) +} + +func resourceLibratoAlertDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*librato.Client) + id, err := strconv.ParseUint(d.Id(), 10, 0) + if err != nil { + return err + } + + log.Printf("[INFO] Deleting Alert: %d", id) + _, err = client.Alerts.Delete(uint(id)) + if err != nil { + return fmt.Errorf("Error deleting Alert: %s", err) + } + + resource.Retry(1*time.Minute, func() *resource.RetryError { + _, _, err := client.Alerts.Get(uint(id)) + if err != nil { + if errResp, ok := err.(*librato.ErrorResponse); ok && errResp.Response.StatusCode == 404 { + return nil + } + return resource.NonRetryableError(err) + } + return resource.RetryableError(fmt.Errorf("alert still exists")) + }) + + d.SetId("") + return nil +} diff --git a/builtin/providers/librato/resource_librato_alert_test.go b/builtin/providers/librato/resource_librato_alert_test.go new file mode 100644 index 000000000..0acf3a646 --- /dev/null +++ b/builtin/providers/librato/resource_librato_alert_test.go @@ -0,0 +1,191 @@ +package librato + +import ( + "fmt" + "strconv" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/henrikhodne/go-librato/librato" +) + +func TestAccLibratoAlert_Basic(t *testing.T) { + var alert librato.Alert + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckLibratoAlertDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckLibratoAlertConfig_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckLibratoAlertExists("librato_alert.foobar", &alert), + testAccCheckLibratoAlertName(&alert, "FooBar"), + resource.TestCheckResourceAttr( + "librato_alert.foobar", "name", "FooBar"), + ), + }, + }, + }) +} + +func TestAccLibratoAlert_Full(t *testing.T) { + var alert librato.Alert + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckLibratoAlertDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckLibratoAlertConfig_full, + Check: resource.ComposeTestCheckFunc( + testAccCheckLibratoAlertExists("librato_alert.foobar", &alert), + testAccCheckLibratoAlertName(&alert, "FooBar"), + resource.TestCheckResourceAttr( + "librato_alert.foobar", "name", "FooBar"), + ), + }, + }, + }) +} + +func TestAccLibratoAlert_Updated(t *testing.T) { + var alert librato.Alert + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckLibratoAlertDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccCheckLibratoAlertConfig_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckLibratoAlertExists("librato_alert.foobar", &alert), + testAccCheckLibratoAlertName(&alert, "FooBar"), + resource.TestCheckResourceAttr( + "librato_alert.foobar", "name", "FooBar"), + ), + }, + resource.TestStep{ + Config: testAccCheckLibratoAlertConfig_new_value, + Check: resource.ComposeTestCheckFunc( + testAccCheckLibratoAlertExists("librato_alert.foobar", &alert), + testAccCheckLibratoAlertName(&alert, "BarBaz"), + resource.TestCheckResourceAttr( + "librato_alert.foobar", "name", "BarBaz"), + ), + }, + }, + }) +} + +func testAccCheckLibratoAlertDestroy(s *terraform.State) error { + client := testAccProvider.Meta().(*librato.Client) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "librato_alert" { + continue + } + + id, err := strconv.ParseUint(rs.Primary.ID, 10, 0) + if err != nil { + return fmt.Errorf("ID not a number") + } + + _, _, err = client.Alerts.Get(uint(id)) + + if err == nil { + return fmt.Errorf("Alert still exists") + } + } + + return nil +} + +func testAccCheckLibratoAlertName(alert *librato.Alert, name string) resource.TestCheckFunc { + return func(s *terraform.State) error { + + if alert.Name == nil || *alert.Name != name { + return fmt.Errorf("Bad name: %s", *alert.Name) + } + + return nil + } +} + +func testAccCheckLibratoAlertExists(n string, alert *librato.Alert) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No Alert ID is set") + } + + client := testAccProvider.Meta().(*librato.Client) + + id, err := strconv.ParseUint(rs.Primary.ID, 10, 0) + if err != nil { + return fmt.Errorf("ID not a number") + } + + foundAlert, _, err := client.Alerts.Get(uint(id)) + + if err != nil { + return err + } + + if foundAlert.ID == nil || *foundAlert.ID != uint(id) { + return fmt.Errorf("Alert not found") + } + + *alert = *foundAlert + + return nil + } +} + +const testAccCheckLibratoAlertConfig_basic = ` +resource "librato_alert" "foobar" { + name = "FooBar" + description = "A Test Alert" +}` + +const testAccCheckLibratoAlertConfig_new_value = ` +resource "librato_alert" "foobar" { + name = "BarBaz" + description = "A Test Alert" +}` + +const testAccCheckLibratoAlertConfig_full = ` +resource "librato_service" "foobar" { + title = "Foo Bar" + type = "mail" + settings = < Date: Sat, 13 Aug 2016 19:37:46 +0100 Subject: [PATCH 0700/1238] provider/azurerm: create virtual_network_peering resource (#8168) TF_ACC=1 go test ./builtin/providers/azurerm -v -run TestAccAzureRMVirtualNetworkPeering -timeout 120m === RUN TestAccAzureRMVirtualNetworkPeering_importBasic --- PASS: TestAccAzureRMVirtualNetworkPeering_importBasic (225.50s) === RUN TestAccAzureRMVirtualNetworkPeering_basic --- PASS: TestAccAzureRMVirtualNetworkPeering_basic (216.95s) === RUN TestAccAzureRMVirtualNetworkPeering_update --- PASS: TestAccAzureRMVirtualNetworkPeering_update (266.97s) PASS ok github.com/hashicorp/terraform/builtin/providers/azurerm 709.545s --- builtin/providers/azurerm/config.go | 7 + ...import_arm_virtual_network_peering_test.go | 34 +++ builtin/providers/azurerm/provider.go | 1 + .../resource_arm_virtual_network_peering.go | 182 +++++++++++++++ ...source_arm_virtual_network_peering_test.go | 212 ++++++++++++++++++ .../r/virtual_network_peering.html.markdown | 102 +++++++++ website/source/layouts/azurerm.erb | 4 + 7 files changed, 542 insertions(+) create mode 100644 builtin/providers/azurerm/import_arm_virtual_network_peering_test.go create mode 100644 builtin/providers/azurerm/resource_arm_virtual_network_peering.go create mode 100644 builtin/providers/azurerm/resource_arm_virtual_network_peering_test.go create mode 100644 website/source/docs/providers/azurerm/r/virtual_network_peering.html.markdown diff --git a/builtin/providers/azurerm/config.go b/builtin/providers/azurerm/config.go index b85166d76..04d6403c8 100644 --- a/builtin/providers/azurerm/config.go +++ b/builtin/providers/azurerm/config.go @@ -45,6 +45,7 @@ type ArmClient struct { vnetGatewayConnectionsClient network.VirtualNetworkGatewayConnectionsClient vnetGatewayClient network.VirtualNetworkGatewaysClient vnetClient network.VirtualNetworksClient + vnetPeeringsClient network.VirtualNetworkPeeringsClient routeTablesClient network.RouteTablesClient routesClient network.RoutesClient @@ -257,6 +258,12 @@ func (c *Config) getArmClient() (*ArmClient, error) { vnc.Sender = autorest.CreateSender(withRequestLogging()) client.vnetClient = vnc + vnpc := network.NewVirtualNetworkPeeringsClient(c.SubscriptionID) + setUserAgent(&vnpc.Client) + vnpc.Authorizer = spt + vnpc.Sender = autorest.CreateSender(withRequestLogging()) + client.vnetPeeringsClient = vnpc + rtc := network.NewRouteTablesClient(c.SubscriptionID) setUserAgent(&rtc.Client) rtc.Authorizer = spt diff --git a/builtin/providers/azurerm/import_arm_virtual_network_peering_test.go b/builtin/providers/azurerm/import_arm_virtual_network_peering_test.go new file mode 100644 index 000000000..8269294f8 --- /dev/null +++ b/builtin/providers/azurerm/import_arm_virtual_network_peering_test.go @@ -0,0 +1,34 @@ +package azurerm + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccAzureRMVirtualNetworkPeering_importBasic(t *testing.T) { + resourceName := "azurerm_virtual_network_peering.test1" + + ri := acctest.RandInt() + config := fmt.Sprintf(testAccAzureRMVirtualNetworkPeering_basic, ri, ri, ri, ri, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMVirtualNetworkPeeringDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: config, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"resource_group_name"}, + }, + }, + }) +} diff --git a/builtin/providers/azurerm/provider.go b/builtin/providers/azurerm/provider.go index dd85e7bfb..e235fe7a6 100644 --- a/builtin/providers/azurerm/provider.go +++ b/builtin/providers/azurerm/provider.go @@ -68,6 +68,7 @@ func Provider() terraform.ResourceProvider { "azurerm_virtual_machine": resourceArmVirtualMachine(), "azurerm_virtual_machine_scale_set": resourceArmVirtualMachineScaleSet(), "azurerm_virtual_network": resourceArmVirtualNetwork(), + "azurerm_virtual_network_peering": resourceArmVirtualNetworkPeering(), // These resources use the Riviera SDK "azurerm_dns_a_record": resourceArmDnsARecord(), diff --git a/builtin/providers/azurerm/resource_arm_virtual_network_peering.go b/builtin/providers/azurerm/resource_arm_virtual_network_peering.go new file mode 100644 index 000000000..854ceeae0 --- /dev/null +++ b/builtin/providers/azurerm/resource_arm_virtual_network_peering.go @@ -0,0 +1,182 @@ +package azurerm + +import ( + "fmt" + "log" + "net/http" + "sync" + + "github.com/Azure/azure-sdk-for-go/arm/network" + "github.com/hashicorp/terraform/helper/schema" +) + +// peerMutex is used to prevet multiple Peering resources being creaed, updated +// or deleted at the same time +var peerMutex = &sync.Mutex{} + +func resourceArmVirtualNetworkPeering() *schema.Resource { + return &schema.Resource{ + Create: resourceArmVirtualNetworkPeeringCreate, + Read: resourceArmVirtualNetworkPeeringRead, + Update: resourceArmVirtualNetworkPeeringCreate, + Delete: resourceArmVirtualNetworkPeeringDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "resource_group_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "virtual_network_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "remote_virtual_network_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "allow_virtual_network_access": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + + "allow_forwarded_traffic": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + + "allow_gateway_transit": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + + "use_remote_gateways": { + Type: schema.TypeBool, + Optional: true, + Computed: true, + }, + }, + } +} + +func resourceArmVirtualNetworkPeeringCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).vnetPeeringsClient + + log.Printf("[INFO] preparing arguments for Azure ARM virtual network peering creation.") + + name := d.Get("name").(string) + vnetName := d.Get("virtual_network_name").(string) + resGroup := d.Get("resource_group_name").(string) + + peer := network.VirtualNetworkPeering{ + Name: &name, + Properties: getVirtualNetworkPeeringProperties(d), + } + + peerMutex.Lock() + defer peerMutex.Unlock() + + _, err := client.CreateOrUpdate(resGroup, vnetName, name, peer, make(chan struct{})) + if err != nil { + return err + } + + read, err := client.Get(resGroup, vnetName, name) + if err != nil { + return err + } + if read.ID == nil { + return fmt.Errorf("Cannot read Virtual Network Peering %s (resource group %s) ID", name, resGroup) + } + + d.SetId(*read.ID) + + return resourceArmVirtualNetworkPeeringRead(d, meta) +} + +func resourceArmVirtualNetworkPeeringRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).vnetPeeringsClient + + id, err := parseAzureResourceID(d.Id()) + if err != nil { + return err + } + resGroup := id.ResourceGroup + vnetName := id.Path["virtualNetworks"] + name := id.Path["virtualNetworkPeerings"] + + resp, err := client.Get(resGroup, vnetName, name) + if resp.StatusCode == http.StatusNotFound { + d.SetId("") + return nil + } + if err != nil { + return fmt.Errorf("Error making Read request on Azure virtual network peering %s: %s", name, err) + } + peer := *resp.Properties + + // update appropriate values + d.Set("name", resp.Name) + d.Set("virtual_network_name", vnetName) + d.Set("allow_virtual_network_access", peer.AllowVirtualNetworkAccess) + d.Set("allow_forwarded_traffic", peer.AllowForwardedTraffic) + d.Set("allow_gateway_transit", peer.AllowGatewayTransit) + d.Set("use_remote_gateways", peer.UseRemoteGateways) + d.Set("remote_virtual_network_id", peer.RemoteVirtualNetwork.ID) + + return nil +} + +func resourceArmVirtualNetworkPeeringDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient).vnetPeeringsClient + + id, err := parseAzureResourceID(d.Id()) + if err != nil { + return err + } + resGroup := id.ResourceGroup + vnetName := id.Path["virtualNetworks"] + name := id.Path["virtualNetworkPeerings"] + + peerMutex.Lock() + defer peerMutex.Unlock() + + _, err = client.Delete(resGroup, vnetName, name, make(chan struct{})) + + return err +} + +func getVirtualNetworkPeeringProperties(d *schema.ResourceData) *network.VirtualNetworkPeeringPropertiesFormat { + allowVirtualNetworkAccess := d.Get("allow_virtual_network_access").(bool) + allowForwardedTraffic := d.Get("allow_forwarded_traffic").(bool) + allowGatewayTransit := d.Get("allow_gateway_transit").(bool) + useRemoteGateways := d.Get("use_remote_gateways").(bool) + remoteVirtualNetworkID := d.Get("remote_virtual_network_id").(string) + + return &network.VirtualNetworkPeeringPropertiesFormat{ + AllowVirtualNetworkAccess: &allowVirtualNetworkAccess, + AllowForwardedTraffic: &allowForwardedTraffic, + AllowGatewayTransit: &allowGatewayTransit, + UseRemoteGateways: &useRemoteGateways, + RemoteVirtualNetwork: &network.SubResource{ + ID: &remoteVirtualNetworkID, + }, + } +} diff --git a/builtin/providers/azurerm/resource_arm_virtual_network_peering_test.go b/builtin/providers/azurerm/resource_arm_virtual_network_peering_test.go new file mode 100644 index 000000000..08a48ecd7 --- /dev/null +++ b/builtin/providers/azurerm/resource_arm_virtual_network_peering_test.go @@ -0,0 +1,212 @@ +package azurerm + +import ( + "fmt" + "testing" + + "github.com/Azure/azure-sdk-for-go/core/http" + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAzureRMVirtualNetworkPeering_basic(t *testing.T) { + ri := acctest.RandInt() + config := fmt.Sprintf(testAccAzureRMVirtualNetworkPeering_basic, ri, ri, ri, ri, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMVirtualNetworkPeeringDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMVirtualNetworkPeeringExists("azurerm_virtual_network_peering.test1"), + testCheckAzureRMVirtualNetworkPeeringExists("azurerm_virtual_network_peering.test2"), + resource.TestCheckResourceAttr( + "azurerm_virtual_network_peering.test1", "allow_virtual_network_access", "true"), + resource.TestCheckResourceAttr( + "azurerm_virtual_network_peering.test2", "allow_virtual_network_access", "true"), + ), + }, + }, + }) +} + +func TestAccAzureRMVirtualNetworkPeering_update(t *testing.T) { + ri := acctest.RandInt() + preConfig := fmt.Sprintf(testAccAzureRMVirtualNetworkPeering_basic, ri, ri, ri, ri, ri) + postConfig := fmt.Sprintf(testAccAzureRMVirtualNetworkPeering_basicUpdate, ri, ri, ri, ri, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMVirtualNetworkPeeringDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: preConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMVirtualNetworkPeeringExists("azurerm_virtual_network_peering.test1"), + testCheckAzureRMVirtualNetworkPeeringExists("azurerm_virtual_network_peering.test2"), + resource.TestCheckResourceAttr( + "azurerm_virtual_network_peering.test1", "allow_virtual_network_access", "true"), + resource.TestCheckResourceAttr( + "azurerm_virtual_network_peering.test2", "allow_virtual_network_access", "true"), + resource.TestCheckResourceAttr( + "azurerm_virtual_network_peering.test1", "allow_forwarded_traffic", "false"), + resource.TestCheckResourceAttr( + "azurerm_virtual_network_peering.test2", "allow_forwarded_traffic", "false"), + ), + }, + + resource.TestStep{ + Config: postConfig, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMVirtualNetworkPeeringExists("azurerm_virtual_network_peering.test1"), + testCheckAzureRMVirtualNetworkPeeringExists("azurerm_virtual_network_peering.test2"), + resource.TestCheckResourceAttr( + "azurerm_virtual_network_peering.test1", "allow_virtual_network_access", "true"), + resource.TestCheckResourceAttr( + "azurerm_virtual_network_peering.test2", "allow_virtual_network_access", "true"), + resource.TestCheckResourceAttr( + "azurerm_virtual_network_peering.test1", "allow_forwarded_traffic", "true"), + resource.TestCheckResourceAttr( + "azurerm_virtual_network_peering.test2", "allow_forwarded_traffic", "true"), + ), + }, + }, + }) +} + +func testCheckAzureRMVirtualNetworkPeeringExists(name string) resource.TestCheckFunc { + return func(s *terraform.State) error { + // Ensure we have enough information in state to look up in API + rs, ok := s.RootModule().Resources[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + name := rs.Primary.Attributes["name"] + vnetName := rs.Primary.Attributes["virtual_network_name"] + resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] + if !hasResourceGroup { + return fmt.Errorf("Bad: no resource group found in state for virtual network peering: %s", name) + } + + // Ensure resource group/virtual network peering combination exists in API + conn := testAccProvider.Meta().(*ArmClient).vnetPeeringsClient + + resp, err := conn.Get(resourceGroup, vnetName, name) + if err != nil { + return fmt.Errorf("Bad: Get on vnetPeeringsClient: %s", err) + } + + if resp.StatusCode == http.StatusNotFound { + return fmt.Errorf("Bad: Virtual Network Peering %q (resource group: %q) does not exist", name, resourceGroup) + } + + return nil + } +} + +func testCheckAzureRMVirtualNetworkPeeringDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*ArmClient).vnetPeeringsClient + + for _, rs := range s.RootModule().Resources { + if rs.Type != "azurerm_virtual_network_peering" { + continue + } + + name := rs.Primary.Attributes["name"] + vnetName := rs.Primary.Attributes["virtual_network_name"] + resourceGroup := rs.Primary.Attributes["resource_group_name"] + + resp, err := conn.Get(resourceGroup, vnetName, name) + if err != nil { + return nil + } + + if resp.StatusCode != http.StatusNotFound { + return fmt.Errorf("Virtual Network Peering sitll exists:\n%#v", resp.Properties) + } + } + + return nil +} + +var testAccAzureRMVirtualNetworkPeering_basic = ` +resource "azurerm_resource_group" "test" { + name = "acctestrg-%d" + location = "West US" +} + +resource "azurerm_virtual_network" "test1" { + name = "acctestvirtnet-1-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + address_space = ["10.0.1.0/24"] + location = "${azurerm_resource_group.test.location}" +} + +resource "azurerm_virtual_network" "test2" { + name = "acctestvirtnet-2-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + address_space = ["10.0.2.0/24"] + location = "${azurerm_resource_group.test.location}" +} + +resource "azurerm_virtual_network_peering" "test1" { + name = "acctestpeer-1-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test1.name}" + remote_virtual_network_id = "${azurerm_virtual_network.test2.id}" + allow_virtual_network_access = true +} + +resource "azurerm_virtual_network_peering" "test2" { + name = "acctestpeer-2-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test2.name}" + remote_virtual_network_id = "${azurerm_virtual_network.test1.id}" + allow_virtual_network_access = true +} +` + +var testAccAzureRMVirtualNetworkPeering_basicUpdate = ` +resource "azurerm_resource_group" "test" { + name = "acctestrg-%d" + location = "West US" +} + +resource "azurerm_virtual_network" "test1" { + name = "acctestvirtnet-1-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + address_space = ["10.0.1.0/24"] + location = "${azurerm_resource_group.test.location}" +} + +resource "azurerm_virtual_network" "test2" { + name = "acctestvirtnet-2-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + address_space = ["10.0.2.0/24"] + location = "${azurerm_resource_group.test.location}" +} + +resource "azurerm_virtual_network_peering" "test1" { + name = "acctestpeer-1-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test1.name}" + remote_virtual_network_id = "${azurerm_virtual_network.test2.id}" + allow_forwarded_traffic = true + allow_virtual_network_access = true +} + +resource "azurerm_virtual_network_peering" "test2" { + name = "acctestpeer-2-%d" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test2.name}" + remote_virtual_network_id = "${azurerm_virtual_network.test1.id}" + allow_forwarded_traffic = true + allow_virtual_network_access = true +} +` diff --git a/website/source/docs/providers/azurerm/r/virtual_network_peering.html.markdown b/website/source/docs/providers/azurerm/r/virtual_network_peering.html.markdown new file mode 100644 index 000000000..fd635c4ee --- /dev/null +++ b/website/source/docs/providers/azurerm/r/virtual_network_peering.html.markdown @@ -0,0 +1,102 @@ +--- +layout: "azurerm" +page_title: "Azure Resource Manager: azure_virtual_network_peering" +sidebar_current: "docs-azurerm-resource-network-virtual-network-peering" +description: |- + Creates a new virtual network peering which allows resources to access other + resources in the linked virtual network. +--- + +# azurerm\_virtual\_network\_peering + +Creates a new virtual network peering which allows resources to access other +resources in the linked virtual network. + +## Example Usage + +``` +resource "azurerm_resource_group" "test" { + name = "peeredvnets-rg" + location = "West US" +} + +resource "azurerm_virtual_network" "test1" { + name = "peternetwork1" + resource_group_name = "${azurerm_resource_group.test.name}" + address_space = ["10.0.1.0/24"] + location = "West US" +} + +resource "azurerm_virtual_network" "test2" { + name = "peternetwork2" + resource_group_name = "${azurerm_resource_group.test.name}" + address_space = ["10.0.2.0/24"] + location = "West US" +} + +resource "azurerm_virtual_network_peering" "test1" { + name = "peer1to2" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test1.name}" + remote_virtual_network_id = "${azurerm_virtual_network.test2.id}" +} + +resource "azurerm_virtual_network_peering" "test2" { + name = "peer2to1" + resource_group_name = "${azurerm_resource_group.test.name}" + virtual_network_name = "${azurerm_virtual_network.test2.name}" + remote_virtual_network_id = "${azurerm_virtual_network.test1.id}" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the virtual network peering. Changing this + forces a new resource to be created. + +* `virtual_network_name` - (Required) The name of the virtual network. Changing + this forces a new resource to be created. + +* `remote_virtual_network_id` - (Required) The full Azure resource ID of the + remote virtual network. Changing this forces a new resource to be created. + +* `resource_group_name` - (Required) The name of the resource group in which to + create the virtual network. Changing this forces a new resource to be + created. + +* `allow_virtual_network_access` - (Optional) Controls if the VMs in the remote + virtual network can access VMs in the local virtual network. Defaults to + false. + +* `allow_forwarded_traffic` - (Optional) Controls if forwarded traffic from VMs + in the remote virtual network is allowed. Defaults to false. + +* `allow_gateway_transit` - (Optional) Controls gatewayLinks can be used in the + remote virtual network’s link to the local virtual network. + +* `use_remote_gateways` - (Optional) Controls if remote gateways can be used on + the local virtual network. If the flag is set to true, and + allowGatewayTransit on the remote peering is also true, virtual network will + use gateways of remote virtual network for transit. Only one peering can + have this flag set to true. This flag cannot be set if virtual network + already has a gateway. Defaults to false. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The Virtual Network Peering resource ID. + +## Note + +Virtual Network peerings cannot be created, updated or deleted concurrently. + +## Import + +Virtual Network Peerings can be imported using the `resource id`, e.g. + +``` +terraform import azurerm_virtual_network_peering.testPeering /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/virtualNetworks/myvnet1/virtualNetworkPeerings/myvnet1peering +``` \ No newline at end of file diff --git a/website/source/layouts/azurerm.erb b/website/source/layouts/azurerm.erb index df3bcea46..b617310bb 100644 --- a/website/source/layouts/azurerm.erb +++ b/website/source/layouts/azurerm.erb @@ -82,6 +82,10 @@ azurerm_virtual_network + > + azurerm_virtual_network_peering + + > azurerm_network_security_group From a5aef9041af6856cda74ab120dc6408769244cef Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Sat, 13 Aug 2016 19:39:09 +0100 Subject: [PATCH 0701/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 55f4f4ede..af244ea56 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ FEATURES: * **New Resource:** `aws_load_balancer_backend_server_policy` [GH-7458] * **New Resource:** `aws_load_balancer_listener_policy` [GH-7458] * **New Resource:** `aws_lb_ssl_negotiation_policy` [GH-8084] + * **New Resource:** `azurerm_virtual_network_peering` [GH-8168] * **New Resource:** `google_compute_image` [GH-7960] * **New Data Source:** `aws_ip_ranges` [GH-7984] * **New Data Source:** `fastly_ip_ranges` [GH-7984] From 9c793b84617a5162e763565e819e2df031de05e9 Mon Sep 17 00:00:00 2001 From: Joe Topjian Date: Sat, 13 Aug 2016 12:46:28 -0600 Subject: [PATCH 0702/1238] Update CHANGELOG.md --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index af244ea56..b57127d25 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -29,7 +29,8 @@ IMPROVEMENTS * provider/google: allows atomic Cloud DNS record changes [GH-6575] * provider/google: Move URLMap hosts to TypeSet from TypeList [GH-7472] * provider/google: Support static private IP addresses in `resource_compute_instance` [GH-6310] - * provider/openstack: Support pdating the External Gateway assigned to a Neutron router [GH-8070] + * provider/openstack: Support updating the External Gateway assigned to a Neutron router [GH-8070] + * provider/openstack: Support for `value_specs` param on `openstack_networking_network_v2` #8155 * provider/vsphere: Improved SCSI controller handling in `vsphere_virtual_machine` [GH-7908] * provider/vsphere: Adding disk type of `Thick Lazy` to `vsphere_virtual_disk` and `vsphere_virtual_machine` [GH-7916] * provider/consul: add tls config support to consul provider [GH-7015] From 44b6d7bbeee5527474c323d0b93c3ac2181abcf3 Mon Sep 17 00:00:00 2001 From: Joe Topjian Date: Sat, 13 Aug 2016 12:47:34 -0600 Subject: [PATCH 0703/1238] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b57127d25..4f010be6c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -30,7 +30,7 @@ IMPROVEMENTS * provider/google: Move URLMap hosts to TypeSet from TypeList [GH-7472] * provider/google: Support static private IP addresses in `resource_compute_instance` [GH-6310] * provider/openstack: Support updating the External Gateway assigned to a Neutron router [GH-8070] - * provider/openstack: Support for `value_specs` param on `openstack_networking_network_v2` #8155 + * provider/openstack: Support for `value_specs` param on `openstack_networking_network_v2` [GH-8155] * provider/vsphere: Improved SCSI controller handling in `vsphere_virtual_machine` [GH-7908] * provider/vsphere: Adding disk type of `Thick Lazy` to `vsphere_virtual_disk` and `vsphere_virtual_machine` [GH-7916] * provider/consul: add tls config support to consul provider [GH-7015] From 760e022e46bd5823f635e87a727017dcf2fb0663 Mon Sep 17 00:00:00 2001 From: Rafal Jeczalik Date: Sun, 14 Aug 2016 12:02:49 +0200 Subject: [PATCH 0704/1238] provider/azure: add custom_data argument for azure_instance resource (#8158) * provider/azure: add custom_data argument for azure_instance resource * website: update azure doc * provider/azure: fix whitespace in test templates --- .../azure/resource_azure_instance.go | 41 +++++++++++++++++ .../azure/resource_azure_instance_test.go | 44 +++++++++++-------- .../providers/azure/r/instance.html.markdown | 2 + 3 files changed, 68 insertions(+), 19 deletions(-) diff --git a/builtin/providers/azure/resource_azure_instance.go b/builtin/providers/azure/resource_azure_instance.go index 9bb74ffff..659c7a955 100644 --- a/builtin/providers/azure/resource_azure_instance.go +++ b/builtin/providers/azure/resource_azure_instance.go @@ -2,7 +2,9 @@ package azure import ( "bytes" + "crypto/sha1" "encoding/base64" + "encoding/hex" "fmt" "log" "strings" @@ -208,6 +210,19 @@ func resourceAzureInstance() *schema.Resource { Optional: true, ForceNew: true, }, + + "custom_data": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + StateFunc: func(v interface{}) string { + if s, ok := v.(string); ok && s != "" { + hash := sha1.Sum([]byte(s)) + return hex.EncodeToString(hash[:]) + } + return "" + }, + }, }, } } @@ -277,6 +292,18 @@ func resourceAzureInstanceCreate(d *schema.ResourceData, meta interface{}) (err return fmt.Errorf("Error configuring the deployment for %s: %s", name, err) } + var customData string + if data, ok := d.GetOk("custom_data"); ok { + data := data.(string) + + // Ensure the custom_data is not double-encoded. + if _, err := base64.StdEncoding.DecodeString(data); err != nil { + customData = base64.StdEncoding.EncodeToString([]byte(data)) + } else { + customData = data + } + } + if osType == linux { // This is pretty ugly, but the Azure SDK leaves me no other choice... if tp, ok := d.GetOk("ssh_key_thumbprint"); ok { @@ -298,6 +325,13 @@ func resourceAzureInstanceCreate(d *schema.ResourceData, meta interface{}) (err if err != nil { return fmt.Errorf("Error configuring %s for Linux: %s", name, err) } + + if customData != "" { + err = vmutils.ConfigureWithCustomDataForLinux(&role, customData) + if err != nil { + return fmt.Errorf("Error configuring custom data for %s: %s", name, err) + } + } } if osType == windows { @@ -325,6 +359,13 @@ func resourceAzureInstanceCreate(d *schema.ResourceData, meta interface{}) (err return fmt.Errorf("Error configuring %s for WindowsToJoinDomain: %s", name, err) } } + + if customData != "" { + err = vmutils.ConfigureWithCustomDataForWindows(&role, customData) + if err != nil { + return fmt.Errorf("Error configuring custom data for %s: %s", name, err) + } + } } if s := d.Get("endpoint").(*schema.Set); s.Len() > 0 { diff --git a/builtin/providers/azure/resource_azure_instance_test.go b/builtin/providers/azure/resource_azure_instance_test.go index d7270d991..95061d30f 100644 --- a/builtin/providers/azure/resource_azure_instance_test.go +++ b/builtin/providers/azure/resource_azure_instance_test.go @@ -38,6 +38,8 @@ func TestAccAzureInstance_basic(t *testing.T) { "azure_instance.foo", "location", "West US"), resource.TestCheckResourceAttr( "azure_instance.foo", "endpoint.2462817782.public_port", "22"), + resource.TestCheckResourceAttr( + "azure_instance.foo", "custom_data", "0ea0f28b0c42d6bef7d0c7ab4886324feaa8b5e1"), ), }, }, @@ -104,6 +106,8 @@ func TestAccAzureInstance_advanced(t *testing.T) { "azure_instance.foo", "security_group", "terraform-security-group1"), resource.TestCheckResourceAttr( "azure_instance.foo", "endpoint.1814039778.public_port", "3389"), + resource.TestCheckResourceAttr( + "azure_instance.foo", "custom_data", "04c589e0edaa5ffe185d1e5532e77d1b2ac4b948"), ), }, }, @@ -437,6 +441,7 @@ resource "azure_instance" "foo" { location = "West US" username = "terraform" password = "Pass!admin123" + custom_data = "# Hello world" endpoint { name = "SSH" @@ -448,9 +453,9 @@ resource "azure_instance" "foo" { var testAccAzureInstance_separateHostedService = ` resource "azure_hosted_service" "foo" { - name = "%s" - location = "West US" - ephemeral_contents = true + name = "%s" + location = "West US" + ephemeral_contents = true } resource "azure_instance" "foo" { @@ -475,16 +480,16 @@ var testAccAzureInstance_advanced = fmt.Sprintf(` resource "azure_virtual_network" "foo" { name = "terraform-vnet-advanced-test" address_space = ["10.1.2.0/24"] - location = "West US" + location = "West US" - subnet { + subnet { name = "subnet1" - address_prefix = "10.1.2.0/25" - } + address_prefix = "10.1.2.0/25" + } - subnet { + subnet { name = "subnet2" - address_prefix = "10.1.2.128/25" + address_prefix = "10.1.2.128/25" } } @@ -501,8 +506,8 @@ resource "azure_security_group_rule" "foo" { source_port_range = "*" destination_address_prefix = "*" destination_port_range = "3389" - action = "Deny" - type = "Inbound" + action = "Deny" + type = "Inbound" protocol = "TCP" } @@ -518,6 +523,7 @@ resource "azure_instance" "foo" { security_group = "${azure_security_group.foo.name}" username = "terraform" password = "Pass!admin123" + custom_data = "IyBIZWxsbyB3b3JsZA==" endpoint { name = "RDP" @@ -531,16 +537,16 @@ var testAccAzureInstance_update = fmt.Sprintf(` resource "azure_virtual_network" "foo" { name = "terraform-vnet-update-test" address_space = ["10.1.2.0/24"] - location = "West US" + location = "West US" subnet { name = "subnet1" - address_prefix = "10.1.2.0/25" - } + address_prefix = "10.1.2.0/25" + } subnet { name = "subnet2" - address_prefix = "10.1.2.128/25" + address_prefix = "10.1.2.128/25" } } @@ -557,8 +563,8 @@ resource "azure_security_group_rule" "foo" { source_port_range = "*" destination_address_prefix = "*" destination_port_range = "3389" - type = "Inbound" - action = "Deny" + type = "Inbound" + action = "Deny" protocol = "TCP" } @@ -575,8 +581,8 @@ resource "azure_security_group_rule" "bar" { source_port_range = "*" destination_address_prefix = "*" destination_port_range = "3389" - type = "Inbound" - action = "Deny" + type = "Inbound" + action = "Deny" protocol = "TCP" } diff --git a/website/source/docs/providers/azure/r/instance.html.markdown b/website/source/docs/providers/azure/r/instance.html.markdown index 38d1684e0..cbd76cb17 100644 --- a/website/source/docs/providers/azure/r/instance.html.markdown +++ b/website/source/docs/providers/azure/r/instance.html.markdown @@ -124,6 +124,8 @@ The following arguments are supported: * `domain_password` - (Optional) The password for the domain_username account specified above. +* `custom_data` - (Optional) The custom data to provide when launching the + instance. The `endpoint` block supports: From 598b940e1cc4e7466778361c106f891a67a2d992 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Sun, 14 Aug 2016 11:03:30 +0100 Subject: [PATCH 0705/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4f010be6c..76bd4e012 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ IMPROVEMENTS * provider/aws: API gateway request/response parameters can now be specified as map, original `*_in_json` parameters deprecated [GH-7794] * provider/aws: Add support for `promotion_tier` to `aws_rds_cluster_instance` [GH-8087] * provider/aws: Allow specifying custom S3 endpoint and enforcing S3 path style URLs via new provider options [GH-7871] + * provider/azure: add custom_data argument for azure_instance resource [GH-8158] * provider/azurerm: Adds support for uploading blobs to azure storage from local source [GH-7994] * provider/google: allows atomic Cloud DNS record changes [GH-6575] * provider/google: Move URLMap hosts to TypeSet from TypeList [GH-7472] From ec42a98cb20e5151f345f928c9beccbc82dbee32 Mon Sep 17 00:00:00 2001 From: Dave Walter Date: Sun, 14 Aug 2016 03:14:42 -0700 Subject: [PATCH 0706/1238] Azure blob contents can be copied from an existing blob (#8126) - adds "source_uri" field - "source_uri" expects the URI to an existing blob that you have access to - it can be in a different storage account, or in the Azure File service - the docs have been updated to reflect the change Signed-off-by: Dan Wendorf --- .../azurerm/resource_arm_storage_blob.go | 66 +++++++++------ .../azurerm/resource_arm_storage_blob_test.go | 83 +++++++++++++++++++ .../azurerm/r/storage_blob.html.markdown | 10 ++- 3 files changed, 130 insertions(+), 29 deletions(-) diff --git a/builtin/providers/azurerm/resource_arm_storage_blob.go b/builtin/providers/azurerm/resource_arm_storage_blob.go index 67a3900b0..5fcf0f1c9 100644 --- a/builtin/providers/azurerm/resource_arm_storage_blob.go +++ b/builtin/providers/azurerm/resource_arm_storage_blob.go @@ -46,7 +46,7 @@ func resourceArmStorageBlob() *schema.Resource { }, "type": { Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: true, ValidateFunc: validateArmStorageBlobType, }, @@ -58,9 +58,16 @@ func resourceArmStorageBlob() *schema.Resource { ValidateFunc: validateArmStorageBlobSize, }, "source": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"source_uri"}, + }, + "source_uri": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"source"}, }, "url": { Type: schema.TypeString, @@ -144,34 +151,41 @@ func resourceArmStorageBlobCreate(d *schema.ResourceData, meta interface{}) erro name := d.Get("name").(string) blobType := d.Get("type").(string) cont := d.Get("storage_container_name").(string) + sourceUri := d.Get("source_uri").(string) log.Printf("[INFO] Creating blob %q in storage account %q", name, storageAccountName) - switch strings.ToLower(blobType) { - case "block": - if err := blobClient.CreateBlockBlob(cont, name); err != nil { + if sourceUri != "" { + if err := blobClient.CopyBlob(cont, name, sourceUri); err != nil { return fmt.Errorf("Error creating storage blob on Azure: %s", err) } + } else { + switch strings.ToLower(blobType) { + case "block": + if err := blobClient.CreateBlockBlob(cont, name); err != nil { + return fmt.Errorf("Error creating storage blob on Azure: %s", err) + } - source := d.Get("source").(string) - if source != "" { - parallelism := d.Get("parallelism").(int) - attempts := d.Get("attempts").(int) - if err := resourceArmStorageBlobBlockUploadFromSource(cont, name, source, blobClient, parallelism, attempts); err != nil { - return fmt.Errorf("Error creating storage blob on Azure: %s", err) + source := d.Get("source").(string) + if source != "" { + parallelism := d.Get("parallelism").(int) + attempts := d.Get("attempts").(int) + if err := resourceArmStorageBlobBlockUploadFromSource(cont, name, source, blobClient, parallelism, attempts); err != nil { + return fmt.Errorf("Error creating storage blob on Azure: %s", err) + } } - } - case "page": - source := d.Get("source").(string) - if source != "" { - parallelism := d.Get("parallelism").(int) - attempts := d.Get("attempts").(int) - if err := resourceArmStorageBlobPageUploadFromSource(cont, name, source, blobClient, parallelism, attempts); err != nil { - return fmt.Errorf("Error creating storage blob on Azure: %s", err) - } - } else { - size := int64(d.Get("size").(int)) - if err := blobClient.PutPageBlob(cont, name, size, map[string]string{}); err != nil { - return fmt.Errorf("Error creating storage blob on Azure: %s", err) + case "page": + source := d.Get("source").(string) + if source != "" { + parallelism := d.Get("parallelism").(int) + attempts := d.Get("attempts").(int) + if err := resourceArmStorageBlobPageUploadFromSource(cont, name, source, blobClient, parallelism, attempts); err != nil { + return fmt.Errorf("Error creating storage blob on Azure: %s", err) + } + } else { + size := int64(d.Get("size").(int)) + if err := blobClient.PutPageBlob(cont, name, size, map[string]string{}); err != nil { + return fmt.Errorf("Error creating storage blob on Azure: %s", err) + } } } } diff --git a/builtin/providers/azurerm/resource_arm_storage_blob_test.go b/builtin/providers/azurerm/resource_arm_storage_blob_test.go index ecf768fe7..4a5794fb0 100644 --- a/builtin/providers/azurerm/resource_arm_storage_blob_test.go +++ b/builtin/providers/azurerm/resource_arm_storage_blob_test.go @@ -257,6 +257,41 @@ func TestAccAzureRMStorageBlobPage_source(t *testing.T) { }) } +func TestAccAzureRMStorageBlob_source_uri(t *testing.T) { + ri := acctest.RandInt() + rs1 := strings.ToLower(acctest.RandString(11)) + sourceBlob, err := ioutil.TempFile("", "") + if err != nil { + t.Fatalf("Failed to create local source blob file") + } + + _, err = io.CopyN(sourceBlob, rand.Reader, 25*1024*1024) + if err != nil { + t.Fatalf("Failed to write random test to source blob") + } + + err = sourceBlob.Close() + if err != nil { + t.Fatalf("Failed to close source blob") + } + + config := fmt.Sprintf(testAccAzureRMStorageBlob_source_uri, ri, rs1, sourceBlob.Name()) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMStorageBlobDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMStorageBlobMatchesFile("azurerm_storage_blob.destination", storage.BlobTypeBlock, sourceBlob.Name()), + ), + }, + }, + }) +} + func testCheckAzureRMStorageBlobExists(name string) resource.TestCheckFunc { return func(s *terraform.State) error { @@ -500,3 +535,51 @@ resource "azurerm_storage_blob" "source" { attempts = 3 } ` + +var testAccAzureRMStorageBlob_source_uri = ` +resource "azurerm_resource_group" "test" { + name = "acctestrg-%d" + location = "westus" +} + +resource "azurerm_storage_account" "source" { + name = "acctestacc%s" + resource_group_name = "${azurerm_resource_group.test.name}" + location = "westus" + account_type = "Standard_LRS" + + tags { + environment = "staging" + } +} + +resource "azurerm_storage_container" "source" { + name = "source" + resource_group_name = "${azurerm_resource_group.test.name}" + storage_account_name = "${azurerm_storage_account.source.name}" + container_access_type = "blob" +} + +resource "azurerm_storage_blob" "source" { + name = "source.vhd" + + resource_group_name = "${azurerm_resource_group.test.name}" + storage_account_name = "${azurerm_storage_account.source.name}" + storage_container_name = "${azurerm_storage_container.source.name}" + + type = "block" + source = "%s" + parallelism = 4 + attempts = 2 +} + +resource "azurerm_storage_blob" "destination" { + name = "destination.vhd" + + resource_group_name = "${azurerm_resource_group.test.name}" + storage_account_name = "${azurerm_storage_account.source.name}" + storage_container_name = "${azurerm_storage_container.source.name}" + + source_uri = "${azurerm_storage_blob.source.url}" +} +` diff --git a/website/source/docs/providers/azurerm/r/storage_blob.html.markdown b/website/source/docs/providers/azurerm/r/storage_blob.html.markdown index 9c8a3e7ea..4cac70e26 100644 --- a/website/source/docs/providers/azurerm/r/storage_blob.html.markdown +++ b/website/source/docs/providers/azurerm/r/storage_blob.html.markdown @@ -58,11 +58,15 @@ The following arguments are supported: * `storage_container_name` - (Required) The name of the storage container in which this blob should be created. -* `type` - (Required) The type of the storage blob to be created. One of either `block` or `page`. +* `type` - (Optional) The type of the storage blob to be created. One of either `block` or `page`. When not copying from an existing blob, + this becomes required. * `size` - (Optional) Used only for `page` blobs to specify the size in bytes of the blob to be created. Must be a multiple of 512. Defaults to 0. - -* `source` - (Optional) An absolute path to a file on the local system + +* `source` - (Optional) An absolute path to a file on the local system. Cannot be defined if `source_uri` is defined. + +* `source_uri` - (Optional) The URI of an existing blob, or a file in the Azure File service, to use as the source contents + for the blob to be created. Changing this forces a new resource to be created. Cannot be defined if `source` is defined. * `parallelism` - (Optional) The number of workers per CPU core to run for concurrent uploads. Defaults to `8`. From d3a4714efbf584ba8246c79db040b9ade9365195 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Sun, 14 Aug 2016 11:15:25 +0100 Subject: [PATCH 0707/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 76bd4e012..4a3b02ba2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ IMPROVEMENTS * provider/aws: Allow specifying custom S3 endpoint and enforcing S3 path style URLs via new provider options [GH-7871] * provider/azure: add custom_data argument for azure_instance resource [GH-8158] * provider/azurerm: Adds support for uploading blobs to azure storage from local source [GH-7994] + * provider/azurerm: Storage blob contents can be copied from an existing blob [GH-8126] * provider/google: allows atomic Cloud DNS record changes [GH-6575] * provider/google: Move URLMap hosts to TypeSet from TypeList [GH-7472] * provider/google: Support static private IP addresses in `resource_compute_instance` [GH-6310] From 8f976e5ceb3a229a9602fed93b536635bc661086 Mon Sep 17 00:00:00 2001 From: Brian Fallik Date: Sun, 14 Aug 2016 11:38:00 -0400 Subject: [PATCH 0708/1238] fix small typo (#8175) I'm pretty sure "with" was intended here. --- website/source/upgrade-guides/0-7.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/source/upgrade-guides/0-7.html.markdown b/website/source/upgrade-guides/0-7.html.markdown index a29ac6621..91b5b7834 100644 --- a/website/source/upgrade-guides/0-7.html.markdown +++ b/website/source/upgrade-guides/0-7.html.markdown @@ -89,7 +89,7 @@ This allows double quote characters to be expressed properly within strings insi ## Safer `terraform plan` Behavior -Prior to v0.7, the `terraform plan` command had the potential to write updates to the state if changes were detected during the Refresh step (which happens by default during `plan`). Some configurations have metadata that changes which every read, so Refresh would always result in changes to the state, and therefore a write. +Prior to v0.7, the `terraform plan` command had the potential to write updates to the state if changes were detected during the Refresh step (which happens by default during `plan`). Some configurations have metadata that changes with every read, so Refresh would always result in changes to the state, and therefore a write. In collaborative enviroments with shared remote state, this potential side effect of `plan` would cause unnecessary contention over the state, and potentially even interfere with active `apply` operations if they were happening simultaneously elsewhere. From 698000eb84c5adca346932cbc8c21a758c86a16f Mon Sep 17 00:00:00 2001 From: "Gruendler, Daniel (415)" Date: Sun, 14 Aug 2016 20:56:53 +0200 Subject: [PATCH 0709/1238] Fix typo in OpenStack LBaaSv2 pool resource --- builtin/providers/openstack/resource_openstack_lb_pool_v2.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/builtin/providers/openstack/resource_openstack_lb_pool_v2.go b/builtin/providers/openstack/resource_openstack_lb_pool_v2.go index aeb59caaf..92ef3e486 100644 --- a/builtin/providers/openstack/resource_openstack_lb_pool_v2.go +++ b/builtin/providers/openstack/resource_openstack_lb_pool_v2.go @@ -50,7 +50,7 @@ func resourcePoolV2() *schema.Resource { ForceNew: true, ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { value := v.(string) - if value != "lTCP" && value != "HTTP" && value != "HTTPS" { + if value != "TCP" && value != "HTTP" && value != "HTTPS" { errors = append(errors, fmt.Errorf( "Only 'TCP', 'HTTP', and 'HTTPS' are supported values for 'protocol'")) } From 7d6a2bef8a70e468831c29375a8fcdff87de7a56 Mon Sep 17 00:00:00 2001 From: Joe Topjian Date: Sun, 14 Aug 2016 13:08:44 -0600 Subject: [PATCH 0710/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4a3b02ba2..ffb132c22 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -58,6 +58,7 @@ BUG FIXES: * provider/aws: Correct how CORS rules are handled in `aws_s3_bucket` [GH-8096] * provider/google: Use resource specific project when making queries/changes [GH-7029] * provider/google: Fix read for the backend service resource [GH-7476] + * provider/openstack: Fix typo in OpenStack LBaaSv2 pool resource [GH-8179] ## 0.7.0 (August 2, 2016) From f71646a0f4f6937d571292ad38fd833f4815e2a9 Mon Sep 17 00:00:00 2001 From: Tommy Murphy Date: Sun, 14 Aug 2016 16:38:37 -0400 Subject: [PATCH 0711/1238] provider/digitalocean: trim whitespace from ssh key (#8173) --- .../digitalocean/resource_digitalocean_ssh_key.go | 4 ++++ .../digitalocean/resource_digitalocean_ssh_key_test.go | 7 +++---- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/builtin/providers/digitalocean/resource_digitalocean_ssh_key.go b/builtin/providers/digitalocean/resource_digitalocean_ssh_key.go index 1495cb67d..db57085b6 100644 --- a/builtin/providers/digitalocean/resource_digitalocean_ssh_key.go +++ b/builtin/providers/digitalocean/resource_digitalocean_ssh_key.go @@ -4,6 +4,7 @@ import ( "fmt" "log" "strconv" + "strings" "github.com/digitalocean/godo" "github.com/hashicorp/terraform/helper/schema" @@ -34,6 +35,9 @@ func resourceDigitalOceanSSHKey() *schema.Resource { Type: schema.TypeString, Required: true, ForceNew: true, + StateFunc: func(val interface{}) string { + return strings.TrimSpace(val.(string)) + }, }, "fingerprint": &schema.Schema{ diff --git a/builtin/providers/digitalocean/resource_digitalocean_ssh_key_test.go b/builtin/providers/digitalocean/resource_digitalocean_ssh_key_test.go index e14833fe9..ed64cf0f4 100644 --- a/builtin/providers/digitalocean/resource_digitalocean_ssh_key_test.go +++ b/builtin/providers/digitalocean/resource_digitalocean_ssh_key_test.go @@ -27,7 +27,7 @@ func TestAccDigitalOceanSSHKey_Basic(t *testing.T) { resource.TestCheckResourceAttr( "digitalocean_ssh_key.foobar", "name", "foobar"), resource.TestCheckResourceAttr( - "digitalocean_ssh_key.foobar", "public_key", testAccValidPublicKey), + "digitalocean_ssh_key.foobar", "public_key", strings.TrimSpace(testAccValidPublicKey)), ), }, }, @@ -111,6 +111,5 @@ resource "digitalocean_ssh_key" "foobar" { public_key = "%s" }`, testAccValidPublicKey) -var testAccValidPublicKey = strings.TrimSpace(` -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCKVmnMOlHKcZK8tpt3MP1lqOLAcqcJzhsvJcjscgVERRN7/9484SOBJ3HSKxxNG5JN8owAjy5f9yYwcUg+JaUVuytn5Pv3aeYROHGGg+5G346xaq3DAwX6Y5ykr2fvjObgncQBnuU5KHWCECO/4h8uWuwh/kfniXPVjFToc+gnkqA+3RKpAecZhFXwfalQ9mMuYGFxn+fwn8cYEApsJbsEmb0iJwPiZ5hjFC8wREuiTlhPHDgkBLOiycd20op2nXzDbHfCHInquEe/gYxEitALONxm0swBOwJZwlTDOB7C6y2dzlrtxr1L59m7pCkWI4EtTRLvleehBoj3u7jB4usR -`) +var testAccValidPublicKey = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCKVmnMOlHKcZK8tpt3MP1lqOLAcqcJzhsvJcjscgVERRN7/9484SOBJ3HSKxxNG5JN8owAjy5f9yYwcUg+JaUVuytn5Pv3aeYROHGGg+5G346xaq3DAwX6Y5ykr2fvjObgncQBnuU5KHWCECO/4h8uWuwh/kfniXPVjFToc+gnkqA+3RKpAecZhFXwfalQ9mMuYGFxn+fwn8cYEApsJbsEmb0iJwPiZ5hjFC8wREuiTlhPHDgkBLOiycd20op2nXzDbHfCHInquEe/gYxEitALONxm0swBOwJZwlTDOB7C6y2dzlrtxr1L59m7pCkWI4EtTRLvleehBoj3u7jB4usR +` From b1a778ee233eadf2e16e61e1d5d672305ee917a0 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Sun, 14 Aug 2016 21:40:14 +0100 Subject: [PATCH 0712/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ffb132c22..2ebbc024b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -56,6 +56,7 @@ BUG FIXES: * provider/aws: Fix line ending errors/diffs with IAM Server Certs [GH-8074] * provider/aws: Fixing IAM data source policy generation to prevent spurious diffs [GH-6956] * provider/aws: Correct how CORS rules are handled in `aws_s3_bucket` [GH-8096] + * provider/digitalocean: trim whitespace from ssh key [GH-8173] * provider/google: Use resource specific project when making queries/changes [GH-7029] * provider/google: Fix read for the backend service resource [GH-7476] * provider/openstack: Fix typo in OpenStack LBaaSv2 pool resource [GH-8179] From bd22a4f65a6a37ee371cbeedf01415a8e7c4514f Mon Sep 17 00:00:00 2001 From: Alexander Zhukau Date: Sun, 14 Aug 2016 16:57:44 -0400 Subject: [PATCH 0713/1238] provider/aws: allow numeric characters in RedshiftClusterDbName (#8178) --- builtin/providers/aws/resource_aws_redshift_cluster.go | 4 ++-- builtin/providers/aws/resource_aws_redshift_cluster_test.go | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/builtin/providers/aws/resource_aws_redshift_cluster.go b/builtin/providers/aws/resource_aws_redshift_cluster.go index af6eda093..3273b639b 100644 --- a/builtin/providers/aws/resource_aws_redshift_cluster.go +++ b/builtin/providers/aws/resource_aws_redshift_cluster.go @@ -750,9 +750,9 @@ func validateRedshiftClusterIdentifier(v interface{}, k string) (ws []string, er func validateRedshiftClusterDbName(v interface{}, k string) (ws []string, errors []error) { value := v.(string) - if !regexp.MustCompile(`^[a-z]+$`).MatchString(value) { + if !regexp.MustCompile(`^[0-9a-z]+$`).MatchString(value) { errors = append(errors, fmt.Errorf( - "only lowercase letters characters allowed in %q", k)) + "only lowercase letters and numeric characters allowed in %q", k)) } if len(value) > 64 { errors = append(errors, fmt.Errorf( diff --git a/builtin/providers/aws/resource_aws_redshift_cluster_test.go b/builtin/providers/aws/resource_aws_redshift_cluster_test.go index 4be682136..903278c88 100644 --- a/builtin/providers/aws/resource_aws_redshift_cluster_test.go +++ b/builtin/providers/aws/resource_aws_redshift_cluster_test.go @@ -319,7 +319,7 @@ func TestResourceAWSRedshiftClusterDbNameValidation(t *testing.T) { }, { Value: "testing1", - ErrCount: 1, + ErrCount: 0, }, { Value: "testing-", From b02dacfb7e14c47c0db2f5ba34f7563a50323dfd Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Sun, 14 Aug 2016 21:59:47 +0100 Subject: [PATCH 0714/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2ebbc024b..ddba47e78 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -56,6 +56,7 @@ BUG FIXES: * provider/aws: Fix line ending errors/diffs with IAM Server Certs [GH-8074] * provider/aws: Fixing IAM data source policy generation to prevent spurious diffs [GH-6956] * provider/aws: Correct how CORS rules are handled in `aws_s3_bucket` [GH-8096] + * provider/aws: allow numeric characters in RedshiftClusterDbName [GH-8178] * provider/digitalocean: trim whitespace from ssh key [GH-8173] * provider/google: Use resource specific project when making queries/changes [GH-7029] * provider/google: Fix read for the backend service resource [GH-7476] From e9438514296662434870a22499457f6c7642af40 Mon Sep 17 00:00:00 2001 From: Krzysztof Wilczynski Date: Mon, 15 Aug 2016 15:30:47 +0900 Subject: [PATCH 0715/1238] Add ability to set Storage Class in aws_s3_bucket_object. (#8174) An S3 Bucket owner may wish to select a different underlying storage class for an object. This commit adds an optional "storage_class" attribute to the aws_s3_bucket_object resource so that the owner of the S3 bucket can specify an appropriate storage class to use when creating an object. Signed-off-by: Krzysztof Wilczynski --- .../aws/data_source_aws_s3_bucket_object.go | 8 +- ... data_source_aws_s3_bucket_object_test.go} | 4 +- .../aws/resource_aws_s3_bucket_object.go | 46 +++++- .../aws/resource_aws_s3_bucket_object_test.go | 134 ++++++++++++++++-- .../aws/r/s3_bucket_object.html.markdown | 2 + 5 files changed, 177 insertions(+), 17 deletions(-) rename builtin/providers/aws/{data_source_aws_s3_object_test.go => data_source_aws_s3_bucket_object_test.go} (99%) diff --git a/builtin/providers/aws/data_source_aws_s3_bucket_object.go b/builtin/providers/aws/data_source_aws_s3_bucket_object.go index d5fbbd31c..b9fb74db0 100644 --- a/builtin/providers/aws/data_source_aws_s3_bucket_object.go +++ b/builtin/providers/aws/data_source_aws_s3_bucket_object.go @@ -155,10 +155,16 @@ func dataSourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) e d.Set("metadata", pointersMapToStringList(out.Metadata)) d.Set("server_side_encryption", out.ServerSideEncryption) d.Set("sse_kms_key_id", out.SSEKMSKeyId) - d.Set("storage_class", out.StorageClass) d.Set("version_id", out.VersionId) d.Set("website_redirect_location", out.WebsiteRedirectLocation) + // The "STANDARD" (which is also the default) storage + // class when set would not be included in the results. + d.Set("storage_class", s3.StorageClassStandard) + if out.StorageClass != nil { + d.Set("storage_class", out.StorageClass) + } + if isContentTypeAllowed(out.ContentType) { input := s3.GetObjectInput{ Bucket: aws.String(bucket), diff --git a/builtin/providers/aws/data_source_aws_s3_object_test.go b/builtin/providers/aws/data_source_aws_s3_bucket_object_test.go similarity index 99% rename from builtin/providers/aws/data_source_aws_s3_object_test.go rename to builtin/providers/aws/data_source_aws_s3_bucket_object_test.go index f9210437d..7c7c7e922 100644 --- a/builtin/providers/aws/data_source_aws_s3_object_test.go +++ b/builtin/providers/aws/data_source_aws_s3_bucket_object_test.go @@ -154,12 +154,12 @@ func TestAccDataSourceAWSS3BucketObject_allParams(t *testing.T) { resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "server_side_encryption", ""), resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "sse_kms_key_id", ""), // Supported, but difficult to reproduce in short testing time - resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "storage_class", ""), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "storage_class", "STANDARD"), resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "expiration", ""), // Currently unsupported in aws_s3_bucket_object resource resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "expires", ""), resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "website_redirect_location", ""), - resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "metadata.#", "0"), + resource.TestCheckResourceAttr("data.aws_s3_bucket_object.obj", "metadata.%", "0"), ), }, }, diff --git a/builtin/providers/aws/resource_aws_s3_bucket_object.go b/builtin/providers/aws/resource_aws_s3_bucket_object.go index 2df9d5da0..67548fa73 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket_object.go +++ b/builtin/providers/aws/resource_aws_s3_bucket_object.go @@ -82,6 +82,13 @@ func resourceAwsS3BucketObject() *schema.Resource { ConflictsWith: []string{"source"}, }, + "storage_class": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validateS3BucketObjectStorageClassType, + }, + "kms_key_id": &schema.Schema{ Type: schema.TypeString, Optional: true, @@ -107,9 +114,6 @@ func resourceAwsS3BucketObject() *schema.Resource { func resourceAwsS3BucketObjectPut(d *schema.ResourceData, meta interface{}) error { s3conn := meta.(*AWSClient).s3conn - bucket := d.Get("bucket").(string) - key := d.Get("key").(string) - acl := d.Get("acl").(string) var body io.ReadSeeker if v, ok := d.GetOk("source"); ok { @@ -137,13 +141,20 @@ func resourceAwsS3BucketObjectPut(d *schema.ResourceData, meta interface{}) erro } } + bucket := d.Get("bucket").(string) + key := d.Get("key").(string) + putInput := &s3.PutObjectInput{ Bucket: aws.String(bucket), Key: aws.String(key), - ACL: aws.String(acl), + ACL: aws.String(d.Get("acl").(string)), Body: body, } + if v, ok := d.GetOk("storage_class"); ok { + putInput.StorageClass = aws.String(v.(string)) + } + if v, ok := d.GetOk("cache_control"); ok { putInput.CacheControl = aws.String(v.(string)) } @@ -205,6 +216,7 @@ func resourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) err } return err } + log.Printf("[DEBUG] Reading S3 Bucket Object meta: %s", resp) d.Set("cache_control", resp.CacheControl) d.Set("content_disposition", resp.ContentDisposition) @@ -214,7 +226,13 @@ func resourceAwsS3BucketObjectRead(d *schema.ResourceData, meta interface{}) err d.Set("version_id", resp.VersionId) d.Set("kms_key_id", resp.SSEKMSKeyId) - log.Printf("[DEBUG] Reading S3 Bucket Object meta: %s", resp) + // The "STANDARD" (which is also the default) storage + // class when set would not be included in the results. + d.Set("storage_class", s3.StorageClassStandard) + if resp.StorageClass != nil { + d.Set("storage_class", resp.StorageClass) + } + return nil } @@ -297,3 +315,21 @@ func validateS3BucketObjectAclType(v interface{}, k string) (ws []string, errors } return } + +func validateS3BucketObjectStorageClassType(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + + storageClass := map[string]bool{ + s3.StorageClassStandard: true, + s3.StorageClassReducedRedundancy: true, + s3.StorageClassStandardIa: true, + } + + if _, ok := storageClass[value]; !ok { + errors = append(errors, fmt.Errorf( + "%q contains an invalid Storage Class type %q. Valid types are either %q, %q, or %q", + k, value, s3.StorageClassStandard, s3.StorageClassReducedRedundancy, + s3.StorageClassStandardIa)) + } + return +} diff --git a/builtin/providers/aws/resource_aws_s3_bucket_object_test.go b/builtin/providers/aws/resource_aws_s3_bucket_object_test.go index d88b3c99d..824c5ba35 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket_object_test.go +++ b/builtin/providers/aws/resource_aws_s3_bucket_object_test.go @@ -309,6 +309,34 @@ func TestAccAWSS3BucketObject_acl(t *testing.T) { }) } +func testAccCheckAWSS3BucketObjectAcl(n string, expectedPerms []string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, _ := s.RootModule().Resources[n] + s3conn := testAccProvider.Meta().(*AWSClient).s3conn + + out, err := s3conn.GetObjectAcl(&s3.GetObjectAclInput{ + Bucket: aws.String(rs.Primary.Attributes["bucket"]), + Key: aws.String(rs.Primary.Attributes["key"]), + }) + + if err != nil { + return fmt.Errorf("GetObjectAcl error: %v", err) + } + + var perms []string + for _, v := range out.Grants { + perms = append(perms, *v.Permission) + } + sort.Strings(perms) + + if !reflect.DeepEqual(perms, expectedPerms) { + return fmt.Errorf("Expected ACL permissions to be %v, got %v", expectedPerms, perms) + } + + return nil + } +} + func TestResourceAWSS3BucketObjectAcl_validation(t *testing.T) { _, errors := validateS3BucketObjectAclType("incorrect", "acl") if len(errors) == 0 { @@ -337,28 +365,102 @@ func TestResourceAWSS3BucketObjectAcl_validation(t *testing.T) { } } -func testAccCheckAWSS3BucketObjectAcl(n string, expectedPerms []string) resource.TestCheckFunc { +func TestAccAWSS3BucketObject_storageClass(t *testing.T) { + rInt := acctest.RandInt() + var obj s3.GetObjectOutput + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSS3BucketObjectDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + PreConfig: func() {}, + Config: testAccAWSS3BucketObjectConfigContent(rInt), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists( + "aws_s3_bucket_object.object", + &obj), + resource.TestCheckResourceAttr( + "aws_s3_bucket_object.object", + "storage_class", + "STANDARD"), + testAccCheckAWSS3BucketObjectStorageClass( + "aws_s3_bucket_object.object", + "STANDARD"), + ), + }, + resource.TestStep{ + Config: testAccAWSS3BucketObjectConfig_storageClass(rInt, "REDUCED_REDUNDANCY"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSS3BucketObjectExists( + "aws_s3_bucket_object.object", + &obj), + resource.TestCheckResourceAttr( + "aws_s3_bucket_object.object", + "storage_class", + "REDUCED_REDUNDANCY"), + testAccCheckAWSS3BucketObjectStorageClass( + "aws_s3_bucket_object.object", + "REDUCED_REDUNDANCY"), + ), + }, + }, + }) +} + +func TestResourceAWSS3BucketObjectStorageClass_validation(t *testing.T) { + _, errors := validateS3BucketObjectStorageClassType("incorrect", "storage_class") + if len(errors) == 0 { + t.Fatalf("Expected to trigger a validation error") + } + + var testCases = []struct { + Value string + ErrCount int + }{ + { + Value: "STANDARD", + ErrCount: 0, + }, + { + Value: "REDUCED_REDUNDANCY", + ErrCount: 0, + }, + } + + for _, tc := range testCases { + _, errors := validateS3BucketObjectStorageClassType(tc.Value, "storage_class") + if len(errors) != tc.ErrCount { + t.Fatalf("Expected not to trigger a validation error") + } + } +} + +func testAccCheckAWSS3BucketObjectStorageClass(n, expectedClass string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, _ := s.RootModule().Resources[n] s3conn := testAccProvider.Meta().(*AWSClient).s3conn - out, err := s3conn.GetObjectAcl(&s3.GetObjectAclInput{ + out, err := s3conn.HeadObject(&s3.HeadObjectInput{ Bucket: aws.String(rs.Primary.Attributes["bucket"]), Key: aws.String(rs.Primary.Attributes["key"]), }) if err != nil { - return fmt.Errorf("GetObjectAcl error: %v", err) + return fmt.Errorf("HeadObject error: %v", err) } - var perms []string - for _, v := range out.Grants { - perms = append(perms, *v.Permission) + // The "STANDARD" (which is also the default) storage + // class when set would not be included in the results. + storageClass := s3.StorageClassStandard + if out.StorageClass != nil { + storageClass = *out.StorageClass } - sort.Strings(perms) - if !reflect.DeepEqual(perms, expectedPerms) { - return fmt.Errorf("Expected ACL permissions to be %v, got %v", expectedPerms, perms) + if storageClass != expectedClass { + return fmt.Errorf("Expected Storage Class to be %v, got %v", + expectedClass, storageClass) } return nil @@ -472,3 +574,17 @@ resource "aws_s3_bucket_object" "object" { } `, randInt, acl) } + +func testAccAWSS3BucketObjectConfig_storageClass(randInt int, storage_class string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "object_bucket" { + bucket = "tf-object-test-bucket-%d" +} +resource "aws_s3_bucket_object" "object" { + bucket = "${aws_s3_bucket.object_bucket.bucket}" + key = "test-key" + content = "some_bucket_content" + storage_class = "%s" +} +`, randInt, storage_class) +} diff --git a/website/source/docs/providers/aws/r/s3_bucket_object.html.markdown b/website/source/docs/providers/aws/r/s3_bucket_object.html.markdown index fc7f95b53..40f806669 100644 --- a/website/source/docs/providers/aws/r/s3_bucket_object.html.markdown +++ b/website/source/docs/providers/aws/r/s3_bucket_object.html.markdown @@ -58,6 +58,8 @@ The following arguments are supported: * `content_encoding` - (Optional) Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [w3c content encoding](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11) for further information. * `content_language` - (Optional) The language the content is in e.g. en-US or en-GB. * `content_type` - (Optional) A standard MIME type describing the format of the object data, e.g. application/octet-stream. All Valid MIME Types are valid for this input. +* `storage_class` - (Optional) Specifies the desired [Storage Class](http://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html) +for the object. Can be either "`STANDARD`", "`REDUCED_REDUNDANCY`", or "`STANDARD_IA`". Defaults to "`STANDARD`". * `etag` - (Optional) Used to trigger updates. The only meaningful value is `${md5(file("path/to/file"))}`. This attribute is not compatible with `kms_key_id` * `kms_key_id` - (Optional) Specifies the AWS KMS Key ID to use for object encryption. From bbe42f07247f5025ab5472a61186c260280dfe90 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 15 Aug 2016 07:31:34 +0100 Subject: [PATCH 0716/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ddba47e78..48884037d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,6 +25,7 @@ IMPROVEMENTS * provider/aws: API gateway request/response parameters can now be specified as map, original `*_in_json` parameters deprecated [GH-7794] * provider/aws: Add support for `promotion_tier` to `aws_rds_cluster_instance` [GH-8087] * provider/aws: Allow specifying custom S3 endpoint and enforcing S3 path style URLs via new provider options [GH-7871] + * provider/aws: Add ability to set Storage Class in `aws_s3_bucket_object` [GH-8174] * provider/azure: add custom_data argument for azure_instance resource [GH-8158] * provider/azurerm: Adds support for uploading blobs to azure storage from local source [GH-7994] * provider/azurerm: Storage blob contents can be copied from an existing blob [GH-8126] From ee56f1d075b41f4a39bdc9cf86f4c88d5be9b729 Mon Sep 17 00:00:00 2001 From: Gavin Williams Date: Fri, 12 Aug 2016 18:13:20 +0100 Subject: [PATCH 0717/1238] provieder/openstack: Add 'value_specs' support for openstack_networking_subnet_v2 provider. Updated provider documentation to support. --- ...resource_openstack_networking_subnet_v2.go | 92 ++++++++++++++++++- .../r/networking_subnet_v2.html.markdown | 2 + 2 files changed, 93 insertions(+), 1 deletion(-) diff --git a/builtin/providers/openstack/resource_openstack_networking_subnet_v2.go b/builtin/providers/openstack/resource_openstack_networking_subnet_v2.go index 2ef42c78c..140b29f02 100644 --- a/builtin/providers/openstack/resource_openstack_networking_subnet_v2.go +++ b/builtin/providers/openstack/resource_openstack_networking_subnet_v2.go @@ -114,10 +114,91 @@ func resourceNetworkingSubnetV2() *schema.Resource { }, }, }, + "value_specs": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: true, + }, }, } } +// SubnetCreateOpts represents the attributes used when creating a new subnet. +type SubnetCreateOpts struct { + // Required + NetworkID string + CIDR string + // Optional + Name string + TenantID string + AllocationPools []subnets.AllocationPool + GatewayIP string + NoGateway bool + IPVersion int + EnableDHCP *bool + DNSNameservers []string + HostRoutes []subnets.HostRoute + ValueSpecs map[string]string +} + +// ToSubnetCreateMap casts a CreateOpts struct to a map. +func (opts SubnetCreateOpts) ToSubnetCreateMap() (map[string]interface{}, error) { + s := make(map[string]interface{}) + + if opts.NetworkID == "" { + return nil, fmt.Errorf("A network ID is required") + } + if opts.CIDR == "" { + return nil, fmt.Errorf("A valid CIDR is required") + } + if opts.IPVersion != 0 && opts.IPVersion != subnets.IPv4 && opts.IPVersion != subnets.IPv6 { + return nil, fmt.Errorf("An IP type must either be 4 or 6") + } + + // Both GatewayIP and NoGateway should not be set + if opts.GatewayIP != "" && opts.NoGateway { + return nil, fmt.Errorf("Both disabling the gateway and specifying a gateway is not allowed") + } + + s["network_id"] = opts.NetworkID + s["cidr"] = opts.CIDR + + if opts.EnableDHCP != nil { + s["enable_dhcp"] = &opts.EnableDHCP + } + if opts.Name != "" { + s["name"] = opts.Name + } + if opts.GatewayIP != "" { + s["gateway_ip"] = opts.GatewayIP + } else if opts.NoGateway { + s["gateway_ip"] = nil + } + if opts.TenantID != "" { + s["tenant_id"] = opts.TenantID + } + if opts.IPVersion != 0 { + s["ip_version"] = opts.IPVersion + } + if len(opts.AllocationPools) != 0 { + s["allocation_pools"] = opts.AllocationPools + } + if len(opts.DNSNameservers) != 0 { + s["dns_nameservers"] = opts.DNSNameservers + } + if len(opts.HostRoutes) != 0 { + s["host_routes"] = opts.HostRoutes + } + + if opts.ValueSpecs != nil { + for k, v := range opts.ValueSpecs { + s[k] = v + } + } + + return map[string]interface{}{"subnet": s}, nil +} + func resourceNetworkingSubnetV2Create(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) networkingClient, err := config.networkingV2Client(d.Get("region").(string)) @@ -133,7 +214,7 @@ func resourceNetworkingSubnetV2Create(d *schema.ResourceData, meta interface{}) enableDHCP := d.Get("enable_dhcp").(bool) - createOpts := subnets.CreateOpts{ + createOpts := SubnetCreateOpts{ NetworkID: d.Get("network_id").(string), CIDR: d.Get("cidr").(string), Name: d.Get("name").(string), @@ -145,6 +226,7 @@ func resourceNetworkingSubnetV2Create(d *schema.ResourceData, meta interface{}) DNSNameservers: resourceSubnetDNSNameserversV2(d), HostRoutes: resourceSubnetHostRoutesV2(d), EnableDHCP: &enableDHCP, + ValueSpecs: subnetValueSpecs(d), } log.Printf("[DEBUG] Create Options: %#v", createOpts) @@ -354,3 +436,11 @@ func waitForSubnetDelete(networkingClient *gophercloud.ServiceClient, subnetId s return s, "ACTIVE", nil } } + +func subnetValueSpecs(d *schema.ResourceData) map[string]string { + m := make(map[string]string) + for key, val := range d.Get("value_specs").(map[string]interface{}) { + m[key] = val.(string) + } + return m +} diff --git a/website/source/docs/providers/openstack/r/networking_subnet_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_subnet_v2.html.markdown index 072c49b16..ecc5f6b99 100644 --- a/website/source/docs/providers/openstack/r/networking_subnet_v2.html.markdown +++ b/website/source/docs/providers/openstack/r/networking_subnet_v2.html.markdown @@ -73,6 +73,8 @@ The following arguments are supported: object structure is documented below. Changing this updates the host routes for the existing subnet. +* `value_specs` - (Optional) Map of additional options. + The `allocation_pools` block supports: * `start` - (Required) The starting address. From ffd04882e7072d93ab194a306df6f2a0421d0125 Mon Sep 17 00:00:00 2001 From: stack72 Date: Mon, 15 Aug 2016 08:38:43 +0100 Subject: [PATCH 0718/1238] provider/packet: Restructure the Packet Volume test to no longer rely on environment variables --- .../packet/resource_packet_volume_test.go | 29 ++++++------------- 1 file changed, 9 insertions(+), 20 deletions(-) diff --git a/builtin/providers/packet/resource_packet_volume_test.go b/builtin/providers/packet/resource_packet_volume_test.go index cffd55f13..cc487d897 100644 --- a/builtin/providers/packet/resource_packet_volume_test.go +++ b/builtin/providers/packet/resource_packet_volume_test.go @@ -2,9 +2,9 @@ package packet import ( "fmt" - "os" "testing" + "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" "github.com/packethost/packngo" @@ -13,20 +13,17 @@ import ( func TestAccPacketVolume_Basic(t *testing.T) { var volume packngo.Volume - project_id := os.Getenv("PACKET_PROJECT_ID") - facility := os.Getenv("PACKET_FACILITY") + rs := acctest.RandString(10) resource.Test(t, resource.TestCase{ - PreCheck: testAccPacketVolumePreCheck(t), + PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckPacketVolumeDestroy, Steps: []resource.TestStep{ resource.TestStep{ - Config: fmt.Sprintf(testAccCheckPacketVolumeConfig_basic, project_id, facility), + Config: fmt.Sprintf(testAccCheckPacketVolumeConfig_basic, rs), Check: resource.ComposeTestCheckFunc( testAccCheckPacketVolumeExists("packet_volume.foobar", &volume), - resource.TestCheckResourceAttr( - "packet_volume.foobar", "project_id", project_id), resource.TestCheckResourceAttr( "packet_volume.foobar", "plan", "storage_1"), resource.TestCheckResourceAttr( @@ -80,24 +77,16 @@ func testAccCheckPacketVolumeExists(n string, volume *packngo.Volume) resource.T } } -func testAccPacketVolumePreCheck(t *testing.T) func() { - return func() { - testAccPreCheck(t) - if os.Getenv("PACKET_PROJECT_ID") == "" { - t.Fatal("PACKET_PROJECT_ID must be set") - } - if os.Getenv("PACKET_FACILITY") == "" { - t.Fatal("PACKET_FACILITY must be set") - } - } +const testAccCheckPacketVolumeConfig_basic = ` +resource "packet_project" "foobar" { + name = "%s" } -const testAccCheckPacketVolumeConfig_basic = ` resource "packet_volume" "foobar" { plan = "storage_1" billing_cycle = "hourly" size = 100 - project_id = "%s" - facility = "%s" + project_id = "${packet_project.foobar.id}" + facility = "ewr1" snapshot_policies = { snapshot_frequency = "1day", snapshot_count = 7 } }` From 3ff1321ef7e0d96630062501458a4d588702b48a Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 15 Aug 2016 08:54:31 +0100 Subject: [PATCH 0719/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 48884037d..2466482bb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,7 @@ FEATURES: * **New Resource:** `aws_lb_ssl_negotiation_policy` [GH-8084] * **New Resource:** `azurerm_virtual_network_peering` [GH-8168] * **New Resource:** `google_compute_image` [GH-7960] + * **New Resource:** `packet_volume` [GH-8142] * **New Data Source:** `aws_ip_ranges` [GH-7984] * **New Data Source:** `fastly_ip_ranges` [GH-7984] From fe5d7d1c63a3dbd4608d76c15c6e0de02b50a404 Mon Sep 17 00:00:00 2001 From: Noah Webb Date: Wed, 3 Aug 2016 16:58:35 -0400 Subject: [PATCH 0720/1238] provider/google: Support Import of 'google_compute_instance_template' --- .../import_compute_instance_template_test.go | 114 ++++++++++++++++ .../resource_compute_instance_template.go | 129 +++++++++++++++++- .../r/compute_instance_template.html.markdown | 3 +- 3 files changed, 244 insertions(+), 2 deletions(-) create mode 100644 builtin/providers/google/import_compute_instance_template_test.go diff --git a/builtin/providers/google/import_compute_instance_template_test.go b/builtin/providers/google/import_compute_instance_template_test.go new file mode 100644 index 000000000..fc414cd53 --- /dev/null +++ b/builtin/providers/google/import_compute_instance_template_test.go @@ -0,0 +1,114 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccComputeInstanceTemplate_importBasic(t *testing.T) { + resourceName := "google_compute_instance_template.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceTemplate_basic, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_importIp(t *testing.T) { + resourceName := "google_compute_instance_template.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceTemplate_ip, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_importDisks(t *testing.T) { + resourceName := "google_compute_instance_template.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceTemplate_disks, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_importSubnetAuto(t *testing.T) { + resourceName := "google_compute_instance_template.foobar" + network := "network-" + acctest.RandString(10) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceTemplate_subnet_auto(network), + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccComputeInstanceTemplate_importSubnetCustom(t *testing.T) { + resourceName := "google_compute_instance_template.foobar" + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeInstanceTemplateDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeInstanceTemplate_subnet_custom, + }, + + resource.TestStep{ + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} diff --git a/builtin/providers/google/resource_compute_instance_template.go b/builtin/providers/google/resource_compute_instance_template.go index 4add7124d..9b448f1a9 100644 --- a/builtin/providers/google/resource_compute_instance_template.go +++ b/builtin/providers/google/resource_compute_instance_template.go @@ -3,6 +3,7 @@ package google import ( "fmt" "log" + "strings" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" @@ -15,6 +16,9 @@ func resourceComputeInstanceTemplate() *schema.Resource { Create: resourceComputeInstanceTemplateCreate, Read: resourceComputeInstanceTemplateRead, Delete: resourceComputeInstanceTemplateDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, Schema: map[string]*schema.Schema{ "name": &schema.Schema{ @@ -66,6 +70,7 @@ func resourceComputeInstanceTemplate() *schema.Resource { Type: schema.TypeBool, Optional: true, ForceNew: true, + Computed: true, }, "device_name": &schema.Schema{ @@ -90,6 +95,7 @@ func resourceComputeInstanceTemplate() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, + Computed: true, }, "source_image": &schema.Schema{ @@ -102,12 +108,14 @@ func resourceComputeInstanceTemplate() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, + Computed: true, }, "mode": &schema.Schema{ Type: schema.TypeString, Optional: true, ForceNew: true, + Computed: true, }, "source": &schema.Schema{ @@ -120,6 +128,7 @@ func resourceComputeInstanceTemplate() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, + Computed: true, }, }, }, @@ -179,6 +188,7 @@ func resourceComputeInstanceTemplate() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, + Computed: true, }, "subnetwork": &schema.Schema{ @@ -215,6 +225,7 @@ func resourceComputeInstanceTemplate() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, + Computed: true, }, "region": &schema.Schema{ @@ -226,12 +237,14 @@ func resourceComputeInstanceTemplate() *schema.Resource { "scheduling": &schema.Schema{ Type: schema.TypeList, Optional: true, + Computed: true, ForceNew: true, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "preemptible": &schema.Schema{ Type: schema.TypeBool, Optional: true, + Default: false, ForceNew: true, }, @@ -245,6 +258,7 @@ func resourceComputeInstanceTemplate() *schema.Resource { "on_host_maintenance": &schema.Schema{ Type: schema.TypeString, Optional: true, + Computed: true, ForceNew: true, }, }, @@ -476,6 +490,7 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac instanceProperties.Scheduling = &compute.Scheduling{} instanceProperties.Scheduling.OnHostMaintenance = "MIGRATE" + // Depreciated fields if v, ok := d.GetOk("automatic_restart"); ok { instanceProperties.Scheduling.AutomaticRestart = v.(bool) } @@ -570,9 +585,91 @@ func resourceComputeInstanceTemplateCreate(d *schema.ResourceData, meta interfac return resourceComputeInstanceTemplateRead(d, meta) } +func flattenDisks(disks []*compute.AttachedDisk) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(disks)) + for _, disk := range disks { + diskMap := make(map[string]interface{}) + if disk.InitializeParams != nil { + sourceImageUrl := strings.Split(disk.InitializeParams.SourceImage, "/") + diskMap["source_image"] = sourceImageUrl[len(sourceImageUrl)-1] + diskMap["disk_type"] = disk.InitializeParams.DiskType + diskMap["disk_name"] = disk.InitializeParams.DiskName + diskMap["disk_size_gb"] = disk.InitializeParams.DiskSizeGb + } + diskMap["auto_delete"] = disk.AutoDelete + diskMap["boot"] = disk.Boot + diskMap["device_name"] = disk.DeviceName + diskMap["interface"] = disk.Interface + diskMap["source"] = disk.Source + diskMap["mode"] = disk.Mode + diskMap["type"] = disk.Type + result = append(result, diskMap) + } + return result +} + +func flattenNetworkInterfaces(networkInterfaces []*compute.NetworkInterface) ([]map[string]interface{}, string) { + result := make([]map[string]interface{}, 0, len(networkInterfaces)) + region := "" + for _, networkInterface := range networkInterfaces { + networkInterfaceMap := make(map[string]interface{}) + if networkInterface.Network != "" { + networkUrl := strings.Split(networkInterface.Network, "/") + networkInterfaceMap["network"] = networkUrl[len(networkUrl)-1] + } + if networkInterface.Subnetwork != "" { + subnetworkUrl := strings.Split(networkInterface.Subnetwork, "/") + networkInterfaceMap["subnetwork"] = subnetworkUrl[len(subnetworkUrl)-1] + region = subnetworkUrl[len(subnetworkUrl)-3] + } + + if networkInterface.AccessConfigs != nil { + accessConfigsMap := make([]map[string]interface{}, 0, len(networkInterface.AccessConfigs)) + for _, accessConfig := range networkInterface.AccessConfigs { + accessConfigMap := make(map[string]interface{}) + accessConfigMap["nat_ip"] = accessConfig.NatIP + + accessConfigsMap = append(accessConfigsMap, accessConfigMap) + } + networkInterfaceMap["access_config"] = accessConfigsMap + } + result = append(result, networkInterfaceMap) + } + return result, region +} + +func flattenScheduling(scheduling *compute.Scheduling) ([]map[string]interface{}, bool) { + result := make([]map[string]interface{}, 0, 1) + schedulingMap := make(map[string]interface{}) + schedulingMap["automatic_restart"] = scheduling.AutomaticRestart + schedulingMap["on_host_maintenance"] = scheduling.OnHostMaintenance + schedulingMap["preemptible"] = scheduling.Preemptible + result = append(result, schedulingMap) + return result, scheduling.AutomaticRestart +} + +func flattenServiceAccounts(serviceAccounts []*compute.ServiceAccount) []map[string]interface{} { + result := make([]map[string]interface{}, 0, len(serviceAccounts)) + for _, serviceAccount := range serviceAccounts { + serviceAccountMap := make(map[string]interface{}) + serviceAccountMap["email"] = serviceAccount.Email + serviceAccountMap["scopes"] = serviceAccount.Scopes + + result = append(result, serviceAccountMap) + } + return result +} + +func flattenMetadata(metadata *compute.Metadata) map[string]string { + metadataMap := make(map[string]string) + for _, item := range metadata.Items { + metadataMap[item.Key] = *item.Value + } + return metadataMap +} + func resourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{}) error { config := meta.(*Config) - project, err := getProject(d, config) if err != nil { return err @@ -603,6 +700,36 @@ func resourceComputeInstanceTemplateRead(d *schema.ResourceData, meta interface{ } d.Set("self_link", instanceTemplate.SelfLink) d.Set("name", instanceTemplate.Name) + if instanceTemplate.Properties.Disks != nil { + d.Set("disk", flattenDisks(instanceTemplate.Properties.Disks)) + } + d.Set("description", instanceTemplate.Description) + d.Set("machine_type", instanceTemplate.Properties.MachineType) + d.Set("can_ip_forward", instanceTemplate.Properties.CanIpForward) + if instanceTemplate.Properties.Metadata != nil { + d.Set("metadata", flattenMetadata(instanceTemplate.Properties.Metadata)) + } + d.Set("instance_description", instanceTemplate.Properties.Description) + d.Set("project", project) + if instanceTemplate.Properties.NetworkInterfaces != nil { + networkInterfaces, region := flattenNetworkInterfaces(instanceTemplate.Properties.NetworkInterfaces) + d.Set("network_interface", networkInterfaces) + // region is where to look up the subnetwork if there is one attached to the instance template + if region != "" { + d.Set("region", region) + } + } + if instanceTemplate.Properties.Scheduling != nil { + scheduling, autoRestart := flattenScheduling(instanceTemplate.Properties.Scheduling) + d.Set("scheduling", scheduling) + d.Set("automatic_restart", autoRestart) + } + if instanceTemplate.Properties.Tags != nil { + d.Set("tags", instanceTemplate.Properties.Tags.Items) + } + if instanceTemplate.Properties.ServiceAccounts != nil { + d.Set("service_account", flattenServiceAccounts(instanceTemplate.Properties.ServiceAccounts)) + } return nil } diff --git a/website/source/docs/providers/google/r/compute_instance_template.html.markdown b/website/source/docs/providers/google/r/compute_instance_template.html.markdown index be56da80e..2ce38389e 100644 --- a/website/source/docs/providers/google/r/compute_instance_template.html.markdown +++ b/website/source/docs/providers/google/r/compute_instance_template.html.markdown @@ -226,7 +226,8 @@ The `scheduling` block supports: * `on_host_maintenance` - (Optional) Defines the maintenance behavior for this instance. -* `preemptible` - (Optional) Allows instance to be preempted. Read more on this +* `preemptible` - (Optional) Allows instance to be preempted. This defaults to + false. Read more on this [here](https://cloud.google.com/compute/docs/instances/preemptible). ## Attributes Reference From 73b10c81869e71124e6efeececb9fcec9040786b Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 15 Aug 2016 15:11:52 +0100 Subject: [PATCH 0721/1238] provider/aws: `aws_security_group` now creates tags as early as possible (#7849) in the process Fixes #7577 7577 discovered that sometimes setting tags at the end of the creation model doesn't quite work for everyone. We now move that further up the tree by calling the setTags func a second time. The setTags func in the Update is not called immediately after creation as we check for it not being a NewResource ``` % make testacc TEST=./builtin/providers/aws TESTARGS='-run=TestAccAWSSecurityGroup_' ==> Checking that code complies with gofmt requirements... go generate $(go list ./... | grep -v /terraform/vendor/) TF_ACC=1 go test ./builtin/providers/aws -v -run=TestAccAWSSecurityGroup_ -timeout 120m === RUN TestAccAWSSecurityGroup_importBasic --- PASS: TestAccAWSSecurityGroup_importBasic (60.96s) === RUN TestAccAWSSecurityGroup_importSelf --- PASS: TestAccAWSSecurityGroup_importSelf (72.72s) === RUN TestAccAWSSecurityGroup_basic --- PASS: TestAccAWSSecurityGroup_basic (62.33s) === RUN TestAccAWSSecurityGroup_namePrefix --- PASS: TestAccAWSSecurityGroup_namePrefix (22.12s) === RUN TestAccAWSSecurityGroup_self --- PASS: TestAccAWSSecurityGroup_self (64.26s) === RUN TestAccAWSSecurityGroup_vpc --- PASS: TestAccAWSSecurityGroup_vpc (58.35s) === RUN TestAccAWSSecurityGroup_vpcNegOneIngress --- PASS: TestAccAWSSecurityGroup_vpcNegOneIngress (54.95s) === RUN TestAccAWSSecurityGroup_MultiIngress --- PASS: TestAccAWSSecurityGroup_MultiIngress (64.81s) === RUN TestAccAWSSecurityGroup_Change --- PASS: TestAccAWSSecurityGroup_Change (96.86s) === RUN TestAccAWSSecurityGroup_generatedName --- PASS: TestAccAWSSecurityGroup_generatedName (60.75s) === RUN TestAccAWSSecurityGroup_DefaultEgress_VPC --- PASS: TestAccAWSSecurityGroup_DefaultEgress_VPC (57.05s) === RUN TestAccAWSSecurityGroup_DefaultEgress_Classic --- PASS: TestAccAWSSecurityGroup_DefaultEgress_Classic (20.94s) === RUN TestAccAWSSecurityGroup_drift --- PASS: TestAccAWSSecurityGroup_drift (27.39s) === RUN TestAccAWSSecurityGroup_drift_complex --- PASS: TestAccAWSSecurityGroup_drift_complex (64.62s) === RUN TestAccAWSSecurityGroup_tags --- PASS: TestAccAWSSecurityGroup_tags (87.49s) === RUN TestAccAWSSecurityGroup_CIDRandGroups --- PASS: TestAccAWSSecurityGroup_CIDRandGroups (71.62s) === RUN TestAccAWSSecurityGroup_ingressWithCidrAndSGs --- PASS: TestAccAWSSecurityGroup_ingressWithCidrAndSGs (69.60s) === RUN TestAccAWSSecurityGroup_ingressWithCidrAndSGs_classic --- PASS: TestAccAWSSecurityGroup_ingressWithCidrAndSGs_classic (25.47s) === RUN TestAccAWSSecurityGroup_egressWithPrefixList --- PASS: TestAccAWSSecurityGroup_egressWithPrefixList (64.46s) === RUN TestAccAWSSecurityGroup_failWithDiffMismatch --- PASS: TestAccAWSSecurityGroup_failWithDiffMismatch (60.21s) PASS ok github.com/hashicorp/terraform/builtin/providers/aws 1166.983s ``` --- .../aws/resource_aws_security_group.go | 13 +++-- .../aws/resource_aws_security_group_test.go | 54 ++++++++++++++++++- 2 files changed, 62 insertions(+), 5 deletions(-) diff --git a/builtin/providers/aws/resource_aws_security_group.go b/builtin/providers/aws/resource_aws_security_group.go index a4773e506..236abbd16 100644 --- a/builtin/providers/aws/resource_aws_security_group.go +++ b/builtin/providers/aws/resource_aws_security_group.go @@ -239,6 +239,10 @@ func resourceAwsSecurityGroupCreate(d *schema.ResourceData, meta interface{}) er d.Id(), err) } + if err := setTags(conn, d); err != nil { + return err + } + // AWS defaults all Security Groups to have an ALLOW ALL egress rule. Here we // revoke that rule, so users don't unknowingly have/use it. group := resp.(*ec2.SecurityGroup) @@ -340,12 +344,13 @@ func resourceAwsSecurityGroupUpdate(d *schema.ResourceData, meta interface{}) er } } - if err := setTags(conn, d); err != nil { - return err + if !d.IsNewResource() { + if err := setTags(conn, d); err != nil { + return err + } + d.SetPartial("tags") } - d.SetPartial("tags") - return resourceAwsSecurityGroupRead(d, meta) } diff --git a/builtin/providers/aws/resource_aws_security_group_test.go b/builtin/providers/aws/resource_aws_security_group_test.go index c361917f9..34dece107 100644 --- a/builtin/providers/aws/resource_aws_security_group_test.go +++ b/builtin/providers/aws/resource_aws_security_group_test.go @@ -3,6 +3,7 @@ package aws import ( "fmt" "reflect" + "regexp" "strings" "testing" @@ -287,6 +288,26 @@ func TestAccAWSSecurityGroup_basic(t *testing.T) { }) } +func TestAccAWSSecurityGroup_tagsCreatedFirst(t *testing.T) { + var group ec2.SecurityGroup + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckAWSSecurityGroupDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccAWSSecurityGroupConfigForTagsOrdering, + ExpectError: regexp.MustCompile("InvalidParameterValue"), + Check: resource.ComposeTestCheckFunc( + testAccCheckAWSSecurityGroupExists("aws_security_group.foo", &group), + testAccCheckTags(&group.Tags, "Name", "tf-acc-test"), + ), + }, + }, + }) +} + func TestAccAWSSecurityGroup_namePrefix(t *testing.T) { var group ec2.SecurityGroup @@ -791,6 +812,7 @@ func TestAccAWSSecurityGroup_tags(t *testing.T) { testAccCheckAWSSecurityGroupExists("aws_security_group.foo", &group), testAccCheckTags(&group.Tags, "foo", ""), testAccCheckTags(&group.Tags, "bar", "baz"), + testAccCheckTags(&group.Tags, "env", "Production"), ), }, }, @@ -1056,6 +1078,35 @@ func TestAccAWSSecurityGroup_failWithDiffMismatch(t *testing.T) { }) } +const testAccAWSSecurityGroupConfigForTagsOrdering = ` +resource "aws_vpc" "foo" { + cidr_block = "10.1.0.0/16" +} + +resource "aws_security_group" "web" { + name = "terraform_acceptance_test_example" + description = "Used in the terraform acceptance tests" + vpc_id = "${aws_vpc.foo.id}" + + ingress { + protocol = "6" + from_port = 80 + to_port = 80000 + cidr_blocks = ["10.0.0.0/8"] + } + + egress { + protocol = "tcp" + from_port = 80 + to_port = 8000 + cidr_blocks = ["10.0.0.0/8"] + } + + tags { + Name = "tf-acc-test" + } +}` + const testAccAWSSecurityGroupConfig = ` resource "aws_vpc" "foo" { cidr_block = "10.1.0.0/16" @@ -1305,6 +1356,7 @@ resource "aws_security_group" "foo" { tags { bar = "baz" + env = "Production" } } ` @@ -1736,7 +1788,7 @@ resource "aws_security_group" "egress" { name = "terraform_acceptance_test_prefix_list_egress" description = "Used in the terraform acceptance tests" vpc_id = "${aws_vpc.tf_sg_prefix_list_egress_test.id}" - + egress { protocol = "-1" from_port = 0 From 85c0105c9ec71db451ae53ba3b76e6a1eb802fa1 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 15 Aug 2016 15:13:18 +0100 Subject: [PATCH 0722/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2466482bb..19da50793 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -59,6 +59,7 @@ BUG FIXES: * provider/aws: Fixing IAM data source policy generation to prevent spurious diffs [GH-6956] * provider/aws: Correct how CORS rules are handled in `aws_s3_bucket` [GH-8096] * provider/aws: allow numeric characters in RedshiftClusterDbName [GH-8178] + * provider/aws: `aws_security_group` now creates tags as early as possible in the process [GH-7849] * provider/digitalocean: trim whitespace from ssh key [GH-8173] * provider/google: Use resource specific project when making queries/changes [GH-7029] * provider/google: Fix read for the backend service resource [GH-7476] From eac6546e33c30034e5507ddc21747459eaa62003 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 15 Aug 2016 15:52:48 +0100 Subject: [PATCH 0723/1238] provider/digitalocean: Enforce Lowercase on IPV6 Addresses (#7652) IPV6 Addresses are generally case insensitive but it is recommented to store them as lowercase (https://tools.ietf.org/html/rfc5952#section-4.3) When Terraform didn't store them as LowerCase, we got the following error when using in DNS records: ``` -/+ digitalocean_record.web6 domain: "mydomain.com" => "mydomain.com" fqdn: "web02.in.mydomain.com" => "" name: "web02.in" => "web02.in" port: "0" => "" priority: "0" => "" type: "AAAA" => "AAAA" value: "2a03:b0c0:0003:00d0:0000:0000:0b66:6001" => "2A03:B0C0:0003:00D0:0000:0000:0B66:6001" (forces new resource) weight: "0" => "" ``` There was no need for this to be the case. We now enforce lowercase on both state and also when responses are returned from the API --- .../digitalocean/resource_digitalocean_droplet.go | 5 ++++- .../digitalocean/resource_digitalocean_droplet_test.go | 10 ++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/builtin/providers/digitalocean/resource_digitalocean_droplet.go b/builtin/providers/digitalocean/resource_digitalocean_droplet.go index a44f3eb3a..a33e20d88 100644 --- a/builtin/providers/digitalocean/resource_digitalocean_droplet.go +++ b/builtin/providers/digitalocean/resource_digitalocean_droplet.go @@ -76,6 +76,9 @@ func resourceDigitalOceanDroplet() *schema.Resource { "ipv6_address": &schema.Schema{ Type: schema.TypeString, Computed: true, + StateFunc: func(val interface{}) string { + return strings.ToLower(val.(string)) + }, }, "ipv6_address_private": &schema.Schema{ @@ -253,7 +256,7 @@ func resourceDigitalOceanDropletRead(d *schema.ResourceData, meta interface{}) e if publicIPv6 := findIPv6AddrByType(droplet, "public"); publicIPv6 != "" { d.Set("ipv6", true) - d.Set("ipv6_address", publicIPv6) + d.Set("ipv6_address", strings.ToLower(publicIPv6)) d.Set("ipv6_address_private", findIPv6AddrByType(droplet, "private")) } diff --git a/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go b/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go index d6f2b190d..9ac1473c1 100644 --- a/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go +++ b/builtin/providers/digitalocean/resource_digitalocean_droplet_test.go @@ -260,6 +260,16 @@ func testAccCheckDigitalOceanDropletAttributes_PrivateNetworkingIpv6(droplet *go return fmt.Errorf("No ipv6 public: %s", findIPv6AddrByType(droplet, "public")) } + for _, rs := range s.RootModule().Resources { + if rs.Type != "digitalocean_droplet" { + continue + } + if rs.Primary.Attributes["ipv6_address"] != strings.ToLower(findIPv6AddrByType(droplet, "public")) { + return fmt.Errorf("IPV6 Address should be lowercase") + } + + } + return nil } } From f2e18cbe4ab00b45bb694b45e4f4d35eaed50bf2 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 15 Aug 2016 15:54:15 +0100 Subject: [PATCH 0724/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 19da50793..52c031bcd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -61,6 +61,7 @@ BUG FIXES: * provider/aws: allow numeric characters in RedshiftClusterDbName [GH-8178] * provider/aws: `aws_security_group` now creates tags as early as possible in the process [GH-7849] * provider/digitalocean: trim whitespace from ssh key [GH-8173] + * provider/digitalocean: Enforce Lowercase on IPV6 Addresses [GH-7652] * provider/google: Use resource specific project when making queries/changes [GH-7029] * provider/google: Fix read for the backend service resource [GH-7476] * provider/openstack: Fix typo in OpenStack LBaaSv2 pool resource [GH-8179] From 9a39057a4fbd93c9ae62f4a6fb1374d12fbb982b Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 15 Aug 2016 17:06:50 +0100 Subject: [PATCH 0725/1238] docs/import: Add a note to the Import section that only ENV VARs can be used atm (#8194) --- website/source/docs/import/usage.html.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/website/source/docs/import/usage.html.md b/website/source/docs/import/usage.html.md index 8e22da3b1..101141db6 100644 --- a/website/source/docs/import/usage.html.md +++ b/website/source/docs/import/usage.html.md @@ -22,6 +22,9 @@ $ terraform import aws_instance.bar i-abcd1234 ... ``` +~> **Note:** In order to import resources, the provider should be configured with environment variables. +We currently do not support passing credentials directly to the provider. + The above command imports an AWS instance with the given ID to the address `aws_instance.bar`. You can also import resources into modules. See the [resource addressing](/docs/internals/resource-addressing.html) From 6aa3bb574aaccb98c6fa04ecc82d1805166b9fab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Kristinn=20=C3=96rn=20Sigur=C3=B0sson?= Date: Mon, 15 Aug 2016 18:43:54 +0200 Subject: [PATCH 0726/1238] provider/vSphere: Fix for IPv6 only environment creation. (#7643) The code only waited until one or more IPv4 interfaces came online. If you only had IPv6 interfaces attached to your machine, then the machine creation process would completely stall. --- .../resource_vsphere_virtual_machine.go | 6 +- .../resource_vsphere_virtual_machine_test.go | 72 +++++++++++++++---- 2 files changed, 64 insertions(+), 14 deletions(-) diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go index 3de90d502..1fc84789b 100644 --- a/builtin/providers/vsphere/resource_vsphere_virtual_machine.go +++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine.go @@ -925,10 +925,14 @@ func resourceVSphereVirtualMachineRead(d *schema.ResourceData, meta interface{}) if state == types.VirtualMachinePowerStatePoweredOn { // wait for interfaces to appear - _, err = vm.WaitForNetIP(context.TODO(), true) + log.Printf("[DEBUG] Waiting for interfaces to appear") + + _, err = vm.WaitForNetIP(context.TODO(), false) if err != nil { return err } + + log.Printf("[DEBUG] Successfully waited for interfaces to appear") } var mvm mo.VirtualMachine diff --git a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go index a8c4602ab..7ce5869d5 100644 --- a/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go +++ b/builtin/providers/vsphere/resource_vsphere_virtual_machine_test.go @@ -869,17 +869,15 @@ func TestAccVSphereVirtualMachine_updateVcpu(t *testing.T) { }) } -const testAccCheckVSphereVirtualMachineConfig_ipv4Andipv6 = ` -resource "vsphere_virtual_machine" "ipv4ipv6" { - name = "terraform-test-ipv4-ipv6" +const testAccCheckVSphereVirtualMachineConfig_ipv6 = ` +resource "vsphere_virtual_machine" "ipv6" { + name = "terraform-test-ipv6" %s vcpu = 2 memory = 1024 network_interface { label = "%s" - ipv4_address = "%s" - ipv4_prefix_length = %s - ipv4_gateway = "%s" + %s ipv6_address = "%s" ipv6_prefix_length = 64 ipv6_gateway = "%s" @@ -900,24 +898,28 @@ resource "vsphere_virtual_machine" "ipv4ipv6" { func TestAccVSphereVirtualMachine_ipv4Andipv6(t *testing.T) { var vm virtualMachine data := setupTemplateBasicBodyVars() - log.Printf("[DEBUG] template= %s", testAccCheckVSphereVirtualMachineConfig_ipv4Andipv6) + log.Printf("[DEBUG] template= %s", testAccCheckVSphereVirtualMachineConfig_ipv6) - vmName := "vsphere_virtual_machine.ipv4ipv6" + vmName := "vsphere_virtual_machine.ipv6" test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label := - TestFuncData{vm: vm, label: data.label, vmName: vmName, numDisks: "2", vmResource: "terraform-test-ipv4-ipv6"}.testCheckFuncBasic() + TestFuncData{vm: vm, label: data.label, vmName: vmName, numDisks: "2", vmResource: "terraform-test-ipv6"}.testCheckFuncBasic() // FIXME test for this or warn?? ipv6Address := os.Getenv("VSPHERE_IPV6_ADDRESS") ipv6Gateway := os.Getenv("VSPHERE_IPV6_GATEWAY") + ipv4Settings := fmt.Sprintf(` + ipv4_address = "%s" + ipv4_prefix_length = %s + ipv4_gateway = "%s" + `, data.ipv4IpAddress, data.ipv4Prefix, data.ipv4Gateway) + config := fmt.Sprintf( - testAccCheckVSphereVirtualMachineConfig_ipv4Andipv6, + testAccCheckVSphereVirtualMachineConfig_ipv6, data.locationOpt, data.label, - data.ipv4IpAddress, - data.ipv4Prefix, - data.ipv4Gateway, + ipv4Settings, ipv6Address, ipv6Gateway, data.datastoreOpt, @@ -945,6 +947,50 @@ func TestAccVSphereVirtualMachine_ipv4Andipv6(t *testing.T) { }) } +func TestAccVSphereVirtualMachine_ipv6Only(t *testing.T) { + var vm virtualMachine + data := setupTemplateBasicBodyVars() + log.Printf("[DEBUG] template= %s", testAccCheckVSphereVirtualMachineConfig_ipv6) + + vmName := "vsphere_virtual_machine.ipv6" + + test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label := + TestFuncData{vm: vm, label: data.label, vmName: vmName, numDisks: "2", vmResource: "terraform-test-ipv6"}.testCheckFuncBasic() + + // Checks for this will be handled when this code is merged with https://github.com/hashicorp/terraform/pull/7575. + ipv6Address := os.Getenv("VSPHERE_IPV6_ADDRESS") + ipv6Gateway := os.Getenv("VSPHERE_IPV6_GATEWAY") + + config := fmt.Sprintf( + testAccCheckVSphereVirtualMachineConfig_ipv6, + data.locationOpt, + data.label, + "", + ipv6Address, + ipv6Gateway, + data.datastoreOpt, + data.template, + ) + + log.Printf("[DEBUG] template config= %s", config) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckVSphereVirtualMachineDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: config, + Check: resource.ComposeTestCheckFunc( + test_exists, test_name, test_cpu, test_uuid, test_mem, test_num_disk, test_num_of_nic, test_nic_label, + resource.TestCheckResourceAttr(vmName, "network_interface.0.ipv6_address", ipv6Address), + resource.TestCheckResourceAttr(vmName, "network_interface.0.ipv6_gateway", ipv6Gateway), + ), + }, + }, + }) +} + const testAccCheckVSphereVirtualMachineConfig_updateAddDisks = ` resource "vsphere_virtual_machine" "foo" { name = "terraform-test" From 1af3e4fc0c5b28a248ac345b4dbe59e137237590 Mon Sep 17 00:00:00 2001 From: Clint Date: Mon, 15 Aug 2016 11:44:30 -0500 Subject: [PATCH 0727/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 52c031bcd..0e5927565 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -65,6 +65,7 @@ BUG FIXES: * provider/google: Use resource specific project when making queries/changes [GH-7029] * provider/google: Fix read for the backend service resource [GH-7476] * provider/openstack: Fix typo in OpenStack LBaaSv2 pool resource [GH-8179] + * provider/vSphere: Fix for IPv6 only environment creation [GH-7643] ## 0.7.0 (August 2, 2016) From 2aa28c34cab7a9c0113549cc63c02c915f375a27 Mon Sep 17 00:00:00 2001 From: Srikalyan Swayampakula Date: Mon, 15 Aug 2016 22:22:42 +0530 Subject: [PATCH 0728/1238] Not Error out on AWS Lambda VPC config if both subnet_ids and security_group_ids are empty. (#6191) AWS Lambda VPC config is an optional configuration and which needs to both subnet_ids and security_group_ids to tie the lambda function to a VPC. We should make it optional if both subnet_ids and security_group_ids are not net which would add better flexiblity in creation of more useful modules as there are "if else" checks. Without this we are creating duplicate modules one with VPC and one without VPC resulting in various anomalies. --- .../aws/resource_aws_lambda_function.go | 29 ++++++++++++------- .../aws/r/lambda_function.html.markdown | 2 ++ 2 files changed, 20 insertions(+), 11 deletions(-) diff --git a/builtin/providers/aws/resource_aws_lambda_function.go b/builtin/providers/aws/resource_aws_lambda_function.go index 486f45d2e..4d74ca3f0 100644 --- a/builtin/providers/aws/resource_aws_lambda_function.go +++ b/builtin/providers/aws/resource_aws_lambda_function.go @@ -181,19 +181,21 @@ func resourceAwsLambdaFunctionCreate(d *schema.ResourceData, meta interface{}) e return err } - var subnetIds []*string - for _, id := range config["subnet_ids"].(*schema.Set).List() { - subnetIds = append(subnetIds, aws.String(id.(string))) - } + if config != nil { + var subnetIds []*string + for _, id := range config["subnet_ids"].(*schema.Set).List() { + subnetIds = append(subnetIds, aws.String(id.(string))) + } - var securityGroupIds []*string - for _, id := range config["security_group_ids"].(*schema.Set).List() { - securityGroupIds = append(securityGroupIds, aws.String(id.(string))) - } + var securityGroupIds []*string + for _, id := range config["security_group_ids"].(*schema.Set).List() { + securityGroupIds = append(securityGroupIds, aws.String(id.(string))) + } - params.VpcConfig = &lambda.VpcConfig{ - SubnetIds: subnetIds, - SecurityGroupIds: securityGroupIds, + params.VpcConfig = &lambda.VpcConfig{ + SubnetIds: subnetIds, + SecurityGroupIds: securityGroupIds, + } } } @@ -402,6 +404,11 @@ func validateVPCConfig(v interface{}) (map[string]interface{}, error) { return nil, errors.New("vpc_config is ") } + // if subnet_ids and security_group_ids are both empty then the VPC is optional + if config["subnet_ids"].(*schema.Set).Len() == 0 && config["security_group_ids"].(*schema.Set).Len() == 0 { + return nil, nil + } + if config["subnet_ids"].(*schema.Set).Len() == 0 { return nil, errors.New("vpc_config.subnet_ids cannot be empty") } diff --git a/website/source/docs/providers/aws/r/lambda_function.html.markdown b/website/source/docs/providers/aws/r/lambda_function.html.markdown index 9466565da..16e3d35b0 100644 --- a/website/source/docs/providers/aws/r/lambda_function.html.markdown +++ b/website/source/docs/providers/aws/r/lambda_function.html.markdown @@ -65,6 +65,8 @@ resource "aws_lambda_function" "test_lambda" { * `subnet_ids` - (Required) A list of subnet IDs associated with the Lambda function. * `security_group_ids` - (Required) A list of security group IDs associated with the Lambda function. +~> **NOTE:** if both `subnet_ids` and `security_group_ids` are empty then vpc_config is considered to be empty or unset. + ## Attributes Reference * `arn` - The Amazon Resource Name (ARN) identifying your Lambda Function. From 68495ed8e61606921679c7ab29376102ec1b84a6 Mon Sep 17 00:00:00 2001 From: Radek Simko Date: Mon, 15 Aug 2016 17:55:40 +0100 Subject: [PATCH 0729/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 0e5927565..fc91ed022 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,7 @@ IMPROVEMENTS * provider/aws: Add support for `promotion_tier` to `aws_rds_cluster_instance` [GH-8087] * provider/aws: Allow specifying custom S3 endpoint and enforcing S3 path style URLs via new provider options [GH-7871] * provider/aws: Add ability to set Storage Class in `aws_s3_bucket_object` [GH-8174] + * provider/aws: Treat `aws_lambda_function` w/ empty `subnet_ids` & `security_groups_ids` in `vpc_config` as VPC-disabled function [GH-6191] * provider/azure: add custom_data argument for azure_instance resource [GH-8158] * provider/azurerm: Adds support for uploading blobs to azure storage from local source [GH-7994] * provider/azurerm: Storage blob contents can be copied from an existing blob [GH-8126] From e18b1962a92d01c0fc9d79fddb62854675230b6e Mon Sep 17 00:00:00 2001 From: Andy Royle Date: Mon, 15 Aug 2016 18:00:00 +0100 Subject: [PATCH 0730/1238] provider/azurerm: Add support for servicebus namespaces (#8195) * add dep for servicebus client from azure-sdk-for-node * add servicebus namespaces support * add docs for servicebus_namespaces * add Microsoft.ServiceBus to providers list --- builtin/providers/azurerm/config.go | 9 + builtin/providers/azurerm/provider.go | 3 +- .../resource_arm_servicebus_namespace.go | 176 ++++ .../resource_arm_servicebus_namespace_test.go | 162 ++++ .../azure-sdk-for-go/arm/servicebus/client.go | 58 ++ .../azure-sdk-for-go/arm/servicebus/models.go | 506 +++++++++++ .../arm/servicebus/namespaces.go | 825 ++++++++++++++++++ .../azure-sdk-for-go/arm/servicebus/queues.go | 746 ++++++++++++++++ .../arm/servicebus/subscriptions.go | 330 +++++++ .../azure-sdk-for-go/arm/servicebus/topics.go | 751 ++++++++++++++++ .../arm/servicebus/version.go | 43 + vendor/vendor.json | 6 + .../r/servicebus_namespace.html.markdown | 55 ++ 13 files changed, 3669 insertions(+), 1 deletion(-) create mode 100644 builtin/providers/azurerm/resource_arm_servicebus_namespace.go create mode 100644 builtin/providers/azurerm/resource_arm_servicebus_namespace_test.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/servicebus/client.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/servicebus/models.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/servicebus/namespaces.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/servicebus/queues.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/servicebus/subscriptions.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/servicebus/topics.go create mode 100644 vendor/github.com/Azure/azure-sdk-for-go/arm/servicebus/version.go create mode 100644 website/source/docs/providers/azurerm/r/servicebus_namespace.html.markdown diff --git a/builtin/providers/azurerm/config.go b/builtin/providers/azurerm/config.go index 04d6403c8..4783ce698 100644 --- a/builtin/providers/azurerm/config.go +++ b/builtin/providers/azurerm/config.go @@ -11,6 +11,7 @@ import ( "github.com/Azure/azure-sdk-for-go/arm/network" "github.com/Azure/azure-sdk-for-go/arm/resources/resources" "github.com/Azure/azure-sdk-for-go/arm/scheduler" + "github.com/Azure/azure-sdk-for-go/arm/servicebus" "github.com/Azure/azure-sdk-for-go/arm/storage" "github.com/Azure/azure-sdk-for-go/arm/trafficmanager" mainStorage "github.com/Azure/azure-sdk-for-go/storage" @@ -66,6 +67,8 @@ type ArmClient struct { trafficManagerProfilesClient trafficmanager.ProfilesClient trafficManagerEndpointsClient trafficmanager.EndpointsClient + + serviceBusNamespacesClient servicebus.NamespacesClient } func withRequestLogging() autorest.SendDecorator { @@ -348,6 +351,12 @@ func (c *Config) getArmClient() (*ArmClient, error) { tmec.Sender = autorest.CreateSender(withRequestLogging()) client.trafficManagerEndpointsClient = tmec + sbnc := servicebus.NewNamespacesClient(c.SubscriptionID) + setUserAgent(&sbnc.Client) + sbnc.Authorizer = spt + sbnc.Sender = autorest.CreateSender(withRequestLogging()) + client.serviceBusNamespacesClient = sbnc + return &client, nil } diff --git a/builtin/providers/azurerm/provider.go b/builtin/providers/azurerm/provider.go index e235fe7a6..b208c8e21 100644 --- a/builtin/providers/azurerm/provider.go +++ b/builtin/providers/azurerm/provider.go @@ -56,6 +56,7 @@ func Provider() terraform.ResourceProvider { "azurerm_public_ip": resourceArmPublicIp(), "azurerm_route": resourceArmRoute(), "azurerm_route_table": resourceArmRouteTable(), + "azurerm_servicebus_namespace": resourceArmServiceBusNamespace(), "azurerm_storage_account": resourceArmStorageAccount(), "azurerm_storage_blob": resourceArmStorageBlob(), "azurerm_storage_container": resourceArmStorageContainer(), @@ -176,7 +177,7 @@ func registerAzureResourceProvidersWithSubscription(client *riviera.Client) erro var err error providerRegistrationOnce.Do(func() { // We register Microsoft.Compute during client initialization - providers := []string{"Microsoft.Network", "Microsoft.Cdn", "Microsoft.Storage", "Microsoft.Sql", "Microsoft.Search", "Microsoft.Resources"} + providers := []string{"Microsoft.Network", "Microsoft.Cdn", "Microsoft.Storage", "Microsoft.Sql", "Microsoft.Search", "Microsoft.Resources", "Microsoft.ServiceBus"} var wg sync.WaitGroup wg.Add(len(providers)) diff --git a/builtin/providers/azurerm/resource_arm_servicebus_namespace.go b/builtin/providers/azurerm/resource_arm_servicebus_namespace.go new file mode 100644 index 000000000..4ded28d6c --- /dev/null +++ b/builtin/providers/azurerm/resource_arm_servicebus_namespace.go @@ -0,0 +1,176 @@ +package azurerm + +import ( + "fmt" + "log" + "net/http" + "strings" + + "github.com/Azure/azure-sdk-for-go/arm/servicebus" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceArmServiceBusNamespace() *schema.Resource { + return &schema.Resource{ + Create: resourceArmServiceBusNamespaceCreate, + Read: resourceArmServiceBusNamespaceRead, + Update: resourceArmServiceBusNamespaceCreate, + Delete: resourceArmServiceBusNamespaceDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + StateFunc: azureRMNormalizeLocation, + }, + + "resource_group_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "sku": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validateServiceBusNamespaceSku, + }, + + "capacity": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Default: 1, + ValidateFunc: validateServiceBusNamespaceCapacity, + }, + + "tags": tagsSchema(), + }, + } +} + +func resourceArmServiceBusNamespaceCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*ArmClient) + namespaceClient := client.serviceBusNamespacesClient + log.Printf("[INFO] preparing arguments for Azure ARM ServiceBus Namespace creation.") + + name := d.Get("name").(string) + location := d.Get("location").(string) + resGroup := d.Get("resource_group_name").(string) + sku := d.Get("sku").(string) + capacity := int32(d.Get("capacity").(int)) + tags := d.Get("tags").(map[string]interface{}) + + parameters := servicebus.NamespaceCreateOrUpdateParameters{ + Location: &location, + Sku: &servicebus.Sku{ + Name: servicebus.SkuName(sku), + Tier: servicebus.SkuTier(sku), + Capacity: &capacity, + }, + Tags: expandTags(tags), + } + + _, err := namespaceClient.CreateOrUpdate(resGroup, name, parameters, make(chan struct{})) + if err != nil { + return err + } + + read, err := namespaceClient.Get(resGroup, name) + if err != nil { + return err + } + + if read.ID == nil { + return fmt.Errorf("Cannot read ServiceBus Namespace %s (resource group %s) ID", name, resGroup) + } + + d.SetId(*read.ID) + + return resourceArmServiceBusNamespaceRead(d, meta) +} + +func resourceArmServiceBusNamespaceRead(d *schema.ResourceData, meta interface{}) error { + namespaceClient := meta.(*ArmClient).serviceBusNamespacesClient + + id, err := parseAzureResourceID(d.Id()) + if err != nil { + return err + } + resGroup := id.ResourceGroup + name := id.Path["namespaces"] + + resp, err := namespaceClient.Get(resGroup, name) + if resp.StatusCode == http.StatusNotFound { + d.SetId("") + return nil + } + if err != nil { + return fmt.Errorf("Error making Read request on Azure ServiceBus Namespace %s: %s", name, err) + } + + d.Set("name", resp.Name) + d.Set("sku", strings.ToLower(string(resp.Sku.Name))) + d.Set("capacity", resp.Sku.Capacity) + flattenAndSetTags(d, resp.Tags) + + return nil +} + +func resourceArmServiceBusNamespaceDelete(d *schema.ResourceData, meta interface{}) error { + namespaceClient := meta.(*ArmClient).serviceBusNamespacesClient + + id, err := parseAzureResourceID(d.Id()) + if err != nil { + return err + } + resGroup := id.ResourceGroup + name := id.Path["namespaces"] + + resp, err := namespaceClient.Delete(resGroup, name, make(chan struct{})) + + if resp.StatusCode != http.StatusNotFound { + return fmt.Errorf("Error issuing Azure ARM delete request of ServiceBus Namespace'%s': %s", name, err) + } + + return nil +} + +func validateServiceBusNamespaceSku(v interface{}, k string) (ws []string, errors []error) { + value := strings.ToLower(v.(string)) + skus := map[string]bool{ + "basic": true, + "standard": true, + "premium": true, + } + + if !skus[value] { + errors = append(errors, fmt.Errorf("ServiceBus Namespace SKU can only be Basic, Standard or Premium")) + } + return +} + +func validateServiceBusNamespaceCapacity(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + capacities := map[int]bool{ + 1: true, + 2: true, + 4: true, + } + + if !capacities[value] { + errors = append(errors, fmt.Errorf("ServiceBus Namespace Capacity can only be 1, 2 or 4")) + } + return +} diff --git a/builtin/providers/azurerm/resource_arm_servicebus_namespace_test.go b/builtin/providers/azurerm/resource_arm_servicebus_namespace_test.go new file mode 100644 index 000000000..eccb1fbbd --- /dev/null +++ b/builtin/providers/azurerm/resource_arm_servicebus_namespace_test.go @@ -0,0 +1,162 @@ +package azurerm + +import ( + "fmt" + "net/http" + "testing" + + "github.com/hashicorp/terraform/helper/acctest" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccAzureRMServiceBusNamespaceCapacity_validation(t *testing.T) { + cases := []struct { + Value int + ErrCount int + }{ + { + Value: 17, + ErrCount: 1, + }, + { + Value: 1, + ErrCount: 0, + }, + { + Value: 2, + ErrCount: 0, + }, + { + Value: 4, + ErrCount: 0, + }, + } + + for _, tc := range cases { + _, errors := validateServiceBusNamespaceCapacity(tc.Value, "azurerm_servicebus_namespace") + + if len(errors) != tc.ErrCount { + t.Fatalf("Expected the Azure RM ServiceBus Namespace Capacity to trigger a validation error") + } + } +} + +func TestAccAzureRMServiceBusNamespaceSku_validation(t *testing.T) { + cases := []struct { + Value string + ErrCount int + }{ + { + Value: "Basic", + ErrCount: 0, + }, + { + Value: "Standard", + ErrCount: 0, + }, + { + Value: "Premium", + ErrCount: 0, + }, + { + Value: "Random", + ErrCount: 1, + }, + } + + for _, tc := range cases { + _, errors := validateServiceBusNamespaceSku(tc.Value, "azurerm_servicebus_namespace") + + if len(errors) != tc.ErrCount { + t.Fatalf("Expected the Azure RM ServiceBus Namespace Sku to trigger a validation error") + } + } +} + +func TestAccAzureRMServiceBusNamespace_basic(t *testing.T) { + + ri := acctest.RandInt() + config := fmt.Sprintf(testAccAzureRMServiceBusNamespace_basic, ri, ri) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testCheckAzureRMServiceBusNamespaceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: config, + Check: resource.ComposeTestCheckFunc( + testCheckAzureRMServiceBusNamespaceExists("azurerm_servicebus_namespace.test"), + ), + }, + }, + }) +} + +func testCheckAzureRMServiceBusNamespaceDestroy(s *terraform.State) error { + conn := testAccProvider.Meta().(*ArmClient).serviceBusNamespacesClient + + for _, rs := range s.RootModule().Resources { + if rs.Type != "azurerm_servicebus_namespace" { + continue + } + + name := rs.Primary.Attributes["name"] + resourceGroup := rs.Primary.Attributes["resource_group_name"] + + resp, err := conn.Get(resourceGroup, name) + + if err != nil { + return nil + } + + if resp.StatusCode != http.StatusNotFound { + return fmt.Errorf("ServiceBus Namespace still exists:\n%#v", resp.Properties) + } + } + + return nil +} + +func testCheckAzureRMServiceBusNamespaceExists(name string) resource.TestCheckFunc { + return func(s *terraform.State) error { + // Ensure we have enough information in state to look up in API + rs, ok := s.RootModule().Resources[name] + if !ok { + return fmt.Errorf("Not found: %s", name) + } + + namespaceName := rs.Primary.Attributes["name"] + resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"] + if !hasResourceGroup { + return fmt.Errorf("Bad: no resource group found in state for public ip: %s", namespaceName) + } + + conn := testAccProvider.Meta().(*ArmClient).serviceBusNamespacesClient + + resp, err := conn.Get(resourceGroup, namespaceName) + if err != nil { + return fmt.Errorf("Bad: Get on serviceBusNamespacesClient: %s", err) + } + + if resp.StatusCode == http.StatusNotFound { + return fmt.Errorf("Bad: Public IP %q (resource group: %q) does not exist", namespaceName, resourceGroup) + } + + return nil + } +} + +var testAccAzureRMServiceBusNamespace_basic = ` +resource "azurerm_resource_group" "test" { + name = "acctestrg-%d" + location = "West US" +} +resource "azurerm_servicebus_namespace" "test" { + name = "acctestservicebusnamespace-%d" + location = "West US" + resource_group_name = "${azurerm_resource_group.test.name}" + sku = "basic" +} +` diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/servicebus/client.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/servicebus/client.go new file mode 100644 index 000000000..45027a525 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/servicebus/client.go @@ -0,0 +1,58 @@ +// Package servicebus implements the Azure ARM Servicebus service API version +// 2015-08-01. +// +// Azure Service Bus client +package servicebus + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // APIVersion is the version of the Servicebus + APIVersion = "2015-08-01" + + // DefaultBaseURI is the default URI used for the service Servicebus + DefaultBaseURI = "https://management.azure.com" +) + +// ManagementClient is the base client for Servicebus. +type ManagementClient struct { + autorest.Client + BaseURI string + APIVersion string + SubscriptionID string +} + +// New creates an instance of the ManagementClient client. +func New(subscriptionID string) ManagementClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the ManagementClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) ManagementClient { + return ManagementClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + APIVersion: APIVersion, + SubscriptionID: subscriptionID, + } +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/servicebus/models.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/servicebus/models.go new file mode 100644 index 000000000..b213c71ed --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/servicebus/models.go @@ -0,0 +1,506 @@ +package servicebus + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/to" + "net/http" +) + +// AccessRights enumerates the values for access rights. +type AccessRights string + +const ( + // Listen specifies the listen state for access rights. + Listen AccessRights = "Listen" + // Manage specifies the manage state for access rights. + Manage AccessRights = "Manage" + // Send specifies the send state for access rights. + Send AccessRights = "Send" +) + +// AvailabilityStatus enumerates the values for availability status. +type AvailabilityStatus string + +const ( + // Available specifies the available state for availability status. + Available AvailabilityStatus = "Available" + // Limited specifies the limited state for availability status. + Limited AvailabilityStatus = "Limited" + // Renaming specifies the renaming state for availability status. + Renaming AvailabilityStatus = "Renaming" + // Restoring specifies the restoring state for availability status. + Restoring AvailabilityStatus = "Restoring" + // Unknown specifies the unknown state for availability status. + Unknown AvailabilityStatus = "Unknown" +) + +// EntityStatus enumerates the values for entity status. +type EntityStatus string + +const ( + // EntityStatusActive specifies the entity status active state for entity + // status. + EntityStatusActive EntityStatus = "Active" + // EntityStatusCreating specifies the entity status creating state for + // entity status. + EntityStatusCreating EntityStatus = "Creating" + // EntityStatusDeleting specifies the entity status deleting state for + // entity status. + EntityStatusDeleting EntityStatus = "Deleting" + // EntityStatusDisabled specifies the entity status disabled state for + // entity status. + EntityStatusDisabled EntityStatus = "Disabled" + // EntityStatusReceiveDisabled specifies the entity status receive + // disabled state for entity status. + EntityStatusReceiveDisabled EntityStatus = "ReceiveDisabled" + // EntityStatusRenaming specifies the entity status renaming state for + // entity status. + EntityStatusRenaming EntityStatus = "Renaming" + // EntityStatusRestoring specifies the entity status restoring state for + // entity status. + EntityStatusRestoring EntityStatus = "Restoring" + // EntityStatusSendDisabled specifies the entity status send disabled + // state for entity status. + EntityStatusSendDisabled EntityStatus = "SendDisabled" + // EntityStatusUnknown specifies the entity status unknown state for + // entity status. + EntityStatusUnknown EntityStatus = "Unknown" +) + +// Kind enumerates the values for kind. +type Kind string + +const ( + // Messaging specifies the messaging state for kind. + Messaging Kind = "Messaging" +) + +// NamespaceState enumerates the values for namespace state. +type NamespaceState string + +const ( + // NamespaceStateActivating specifies the namespace state activating state + // for namespace state. + NamespaceStateActivating NamespaceState = "Activating" + // NamespaceStateActive specifies the namespace state active state for + // namespace state. + NamespaceStateActive NamespaceState = "Active" + // NamespaceStateCreated specifies the namespace state created state for + // namespace state. + NamespaceStateCreated NamespaceState = "Created" + // NamespaceStateCreating specifies the namespace state creating state for + // namespace state. + NamespaceStateCreating NamespaceState = "Creating" + // NamespaceStateDisabled specifies the namespace state disabled state for + // namespace state. + NamespaceStateDisabled NamespaceState = "Disabled" + // NamespaceStateDisabling specifies the namespace state disabling state + // for namespace state. + NamespaceStateDisabling NamespaceState = "Disabling" + // NamespaceStateEnabling specifies the namespace state enabling state for + // namespace state. + NamespaceStateEnabling NamespaceState = "Enabling" + // NamespaceStateFailed specifies the namespace state failed state for + // namespace state. + NamespaceStateFailed NamespaceState = "Failed" + // NamespaceStateRemoved specifies the namespace state removed state for + // namespace state. + NamespaceStateRemoved NamespaceState = "Removed" + // NamespaceStateRemoving specifies the namespace state removing state for + // namespace state. + NamespaceStateRemoving NamespaceState = "Removing" + // NamespaceStateSoftDeleted specifies the namespace state soft deleted + // state for namespace state. + NamespaceStateSoftDeleted NamespaceState = "SoftDeleted" + // NamespaceStateSoftDeleting specifies the namespace state soft deleting + // state for namespace state. + NamespaceStateSoftDeleting NamespaceState = "SoftDeleting" + // NamespaceStateUnknown specifies the namespace state unknown state for + // namespace state. + NamespaceStateUnknown NamespaceState = "Unknown" +) + +// Policykey enumerates the values for policykey. +type Policykey string + +const ( + // PrimaryKey specifies the primary key state for policykey. + PrimaryKey Policykey = "PrimaryKey" + // SecondayKey specifies the seconday key state for policykey. + SecondayKey Policykey = "SecondayKey" +) + +// SkuName enumerates the values for sku name. +type SkuName string + +const ( + // Basic specifies the basic state for sku name. + Basic SkuName = "Basic" + // Premium specifies the premium state for sku name. + Premium SkuName = "Premium" + // Standard specifies the standard state for sku name. + Standard SkuName = "Standard" +) + +// SkuTier enumerates the values for sku tier. +type SkuTier string + +const ( + // SkuTierBasic specifies the sku tier basic state for sku tier. + SkuTierBasic SkuTier = "Basic" + // SkuTierPremium specifies the sku tier premium state for sku tier. + SkuTierPremium SkuTier = "Premium" + // SkuTierStandard specifies the sku tier standard state for sku tier. + SkuTierStandard SkuTier = "Standard" +) + +// MessageCountDetails is message Count Details. +type MessageCountDetails struct { + ActiveMessageCount *int64 `json:"ActiveMessageCount,omitempty"` + DeadLetterMessageCount *int64 `json:"DeadLetterMessageCount,omitempty"` + ScheduledMessageCount *int64 `json:"ScheduledMessageCount,omitempty"` + TransferDeadLetterMessageCount *int64 `json:"TransferDeadLetterMessageCount,omitempty"` + TransferMessageCount *int64 `json:"TransferMessageCount,omitempty"` +} + +// NamespaceCreateOrUpdateParameters is parameters supplied to the +// CreateOrUpdate Namespace operation. +type NamespaceCreateOrUpdateParameters struct { + Location *string `json:"location,omitempty"` + Sku *Sku `json:"sku,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Kind Kind `json:"kind,omitempty"` + Properties *NamespaceProperties `json:"properties,omitempty"` +} + +// NamespaceListResult is the response of the List Namespace operation. +type NamespaceListResult struct { + autorest.Response `json:"-"` + Value *[]NamespaceResource `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// NamespaceListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client NamespaceListResult) NamespaceListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// NamespaceProperties is properties of the Namespace. +type NamespaceProperties struct { + ProvisioningState *string `json:"provisioningState,omitempty"` + Status NamespaceState `json:"status,omitempty"` + CreatedAt *date.Time `json:"createdAt,omitempty"` + UpdatedAt *date.Time `json:"updatedAt,omitempty"` + ServiceBusEndpoint *string `json:"serviceBusEndpoint,omitempty"` + CreateACSNamespace *bool `json:"createACSNamespace,omitempty"` + Enabled *bool `json:"enabled,omitempty"` +} + +// NamespaceResource is description of a Namespace resource. +type NamespaceResource struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Kind Kind `json:"kind,omitempty"` + Sku *Sku `json:"sku,omitempty"` + Properties *NamespaceProperties `json:"properties,omitempty"` +} + +// QueueCreateOrUpdateParameters is parameters supplied to the CreateOrUpdate +// Queue operation. +type QueueCreateOrUpdateParameters struct { + Name *string `json:"name,omitempty"` + Location *string `json:"location,omitempty"` + Properties *QueueProperties `json:"properties,omitempty"` +} + +// QueueListResult is the response of the List Queues operation. +type QueueListResult struct { + autorest.Response `json:"-"` + Value *[]QueueResource `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// QueueListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client QueueListResult) QueueListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// QueueProperties is +type QueueProperties struct { + AccessedAt *date.Time `json:"AccessedAt,omitempty"` + AutoDeleteOnIdle *string `json:"AutoDeleteOnIdle,omitempty"` + AvailabilityStatus AvailabilityStatus `json:"AvailabilityStatus ,omitempty"` + CreatedAt *date.Time `json:"CreatedAt,omitempty"` + DefaultMessageTimeToLive *string `json:"DefaultMessageTimeToLive,omitempty"` + DuplicateDetectionHistoryTimeWindow *string `json:"DuplicateDetectionHistoryTimeWindow ,omitempty"` + EnableBatchedOperations *bool `json:"EnableBatchedOperations,omitempty"` + EnableDeadLetteringOnMessageExpiration *bool `json:"EnableDeadLetteringOnMessageExpiration,omitempty"` + EnableExpress *bool `json:"EnableExpress,omitempty"` + EnablePartitioning *bool `json:"EnablePartitioning,omitempty"` + ForwardDeadLetteredMessagesTo *string `json:"ForwardDeadLetteredMessagesTo,omitempty"` + ForwardTo *string `json:"ForwardTo,omitempty"` + IsAnonymousAccessible *bool `json:"IsAnonymousAccessible,omitempty"` + LockDuration *string `json:"LockDuration ,omitempty"` + MaxDeliveryCount *int32 `json:"MaxDeliveryCount ,omitempty"` + MaxSizeInMegabytes *int64 `json:"MaxSizeInMegabytes ,omitempty"` + MessageCount *int64 `json:"MessageCount ,omitempty"` + MessageCountDetails *MessageCountDetails `json:"MessageCountDetails,omitempty"` + Path *string `json:"Path,omitempty"` + RequiresDuplicateDetection *bool `json:"RequiresDuplicateDetection,omitempty"` + RequiresSession *bool `json:"RequiresSession,omitempty"` + SizeInBytes *int64 `json:"SizeInBytes ,omitempty"` + Status EntityStatus `json:"Status,omitempty"` + SupportOrdering *bool `json:"SupportOrdering,omitempty"` + UpdatedAt *date.Time `json:"UpdatedAt,omitempty"` + UserMetadata *string `json:"UserMetadata,omitempty"` +} + +// QueueResource is description of queue Resource. +type QueueResource struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *QueueProperties `json:"properties,omitempty"` +} + +// RegenerateKeysParameters is parameters supplied to the Regenerate Auth Rule. +type RegenerateKeysParameters struct { + Policykey Policykey `json:"Policykey,omitempty"` +} + +// Resource is +type Resource struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` +} + +// ResourceListKeys is namespace/ServiceBus Connection String +type ResourceListKeys struct { + autorest.Response `json:"-"` + PrimaryConnectionString *string `json:"primaryConnectionString,omitempty"` + SecondaryConnectionString *string `json:"secondaryConnectionString,omitempty"` + PrimaryKey *string `json:"primaryKey,omitempty"` + SecondaryKey *string `json:"secondaryKey,omitempty"` + KeyName *string `json:"keyName,omitempty"` +} + +// SharedAccessAuthorizationRuleCreateOrUpdateParameters is parameters +// supplied to the CreateOrUpdate AuthorizationRules. +type SharedAccessAuthorizationRuleCreateOrUpdateParameters struct { + Location *string `json:"location,omitempty"` + Name *string `json:"name,omitempty"` + Properties *SharedAccessAuthorizationRuleProperties `json:"properties,omitempty"` +} + +// SharedAccessAuthorizationRuleListResult is the response of the List +// Namespace operation. +type SharedAccessAuthorizationRuleListResult struct { + autorest.Response `json:"-"` + Value *[]SharedAccessAuthorizationRuleResource `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// SharedAccessAuthorizationRuleListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client SharedAccessAuthorizationRuleListResult) SharedAccessAuthorizationRuleListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// SharedAccessAuthorizationRuleProperties is sharedAccessAuthorizationRule +// properties. +type SharedAccessAuthorizationRuleProperties struct { + Rights *[]AccessRights `json:"rights,omitempty"` +} + +// SharedAccessAuthorizationRuleResource is description of a Namespace +// AuthorizationRules. +type SharedAccessAuthorizationRuleResource struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *SharedAccessAuthorizationRuleProperties `json:"properties,omitempty"` +} + +// Sku is sku of the Namespace. +type Sku struct { + Name SkuName `json:"name,omitempty"` + Tier SkuTier `json:"tier,omitempty"` + Capacity *int32 `json:"capacity,omitempty"` +} + +// SubscriptionCreateOrUpdateParameters is parameters supplied to the +// CreateOrUpdate Subscription operation. +type SubscriptionCreateOrUpdateParameters struct { + Location *string `json:"location,omitempty"` + Type *string `json:"type,omitempty"` + Properties *SubscriptionProperties `json:"properties,omitempty"` +} + +// SubscriptionListResult is the response of the List Subscriptions operation. +type SubscriptionListResult struct { + autorest.Response `json:"-"` + Value *[]SubscriptionResource `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// SubscriptionListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client SubscriptionListResult) SubscriptionListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// SubscriptionProperties is description of Subscription Resource. +type SubscriptionProperties struct { + AccessedAt *date.Time `json:"AccessedAt,omitempty"` + AutoDeleteOnIdle *string `json:"AutoDeleteOnIdle,omitempty"` + AvailabilityStatus AvailabilityStatus `json:"AvailabilityStatus ,omitempty"` + CreatedAt *date.Time `json:"CreatedAt,omitempty"` + DefaultMessageTimeToLive *string `json:"DefaultMessageTimeToLive,omitempty"` + EnableBatchedOperations *bool `json:"EnableBatchedOperations,omitempty"` + EnableDeadLetteringOnFilterEvaluationExceptions *bool `json:"EnableDeadLetteringOnFilterEvaluationExceptions,omitempty"` + EnableDeadLetteringOnMessageExpiration *bool `json:"EnableDeadLetteringOnMessageExpiration,omitempty"` + ForwardDeadLetteredMessagesTo *string `json:"ForwardDeadLetteredMessagesTo,omitempty"` + ForwardTo *string `json:"ForwardTo,omitempty"` + IsReadOnly *bool `json:"IsReadOnly,omitempty"` + LockDuration *string `json:"LockDuration,omitempty"` + MaxDeliveryCount *int32 `json:"MaxDeliveryCount,omitempty"` + MessageCount *int64 `json:"MessageCount,omitempty"` + MessageCountDetails *MessageCountDetails `json:"MessageCountDetails,omitempty"` + RequiresSession *bool `json:"RequiresSession,omitempty"` + Status EntityStatus `json:"Status,omitempty"` + TopicPath *string `json:"TopicPath,omitempty"` + UpdatedAt *date.Time `json:"UpdatedAt,omitempty"` + UserMetadata *string `json:"UserMetadata,omitempty"` +} + +// SubscriptionResource is description of Subscription Resource. +type SubscriptionResource struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *SubscriptionProperties `json:"properties,omitempty"` +} + +// TopicCreateOrUpdateParameters is parameters supplied to the CreateOrUpdate +// Topic operation. +type TopicCreateOrUpdateParameters struct { + Name *string `json:"name,omitempty"` + Location *string `json:"location,omitempty"` + Properties *TopicProperties `json:"properties,omitempty"` +} + +// TopicListResult is the response of the List Topics operation. +type TopicListResult struct { + autorest.Response `json:"-"` + Value *[]TopicResource `json:"value,omitempty"` + NextLink *string `json:"nextLink,omitempty"` +} + +// TopicListResultPreparer prepares a request to retrieve the next set of results. It returns +// nil if no more results exist. +func (client TopicListResult) TopicListResultPreparer() (*http.Request, error) { + if client.NextLink == nil || len(to.String(client.NextLink)) <= 0 { + return nil, nil + } + return autorest.Prepare(&http.Request{}, + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(client.NextLink))) +} + +// TopicProperties is +type TopicProperties struct { + AccessedAt *date.Time `json:"AccessedAt,omitempty"` + AutoDeleteOnIdle *string `json:"AutoDeleteOnIdle,omitempty"` + AvailabilityStatus AvailabilityStatus `json:"AvailabilityStatus ,omitempty"` + CreatedAt *date.Time `json:"CreatedAt,omitempty"` + DefaultMessageTimeToLive *string `json:"DefaultMessageTimeToLive,omitempty"` + DuplicateDetectionHistoryTimeWindow *string `json:"DuplicateDetectionHistoryTimeWindow ,omitempty"` + EnableBatchedOperations *bool `json:"EnableBatchedOperations,omitempty"` + EnableExpress *bool `json:"EnableExpress,omitempty"` + EnableFilteringMessagesBeforePublishing *bool `json:"EnableFilteringMessagesBeforePublishing,omitempty"` + EnablePartitioning *bool `json:"EnablePartitioning,omitempty"` + IsAnonymousAccessible *bool `json:"IsAnonymousAccessible,omitempty"` + MaxSizeInMegabytes *int64 `json:"MaxSizeInMegabytes ,omitempty"` + MessageCountDetails *MessageCountDetails `json:"MessageCountDetails,omitempty"` + Path *string `json:"Path,omitempty"` + RequiresDuplicateDetection *bool `json:"RequiresDuplicateDetection,omitempty"` + SizeInBytes *int64 `json:"SizeInBytes ,omitempty"` + Status EntityStatus `json:"Status,omitempty"` + SubscriptionCount *int32 `json:"SubscriptionCount,omitempty"` + SupportOrdering *bool `json:"SupportOrdering,omitempty"` + UpdatedAt *date.Time `json:"UpdatedAt,omitempty"` + UserMetadata *string `json:"UserMetadata,omitempty"` +} + +// TopicResource is description of topic Resource. +type TopicResource struct { + autorest.Response `json:"-"` + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Type *string `json:"type,omitempty"` + Location *string `json:"location,omitempty"` + Tags *map[string]*string `json:"tags,omitempty"` + Properties *TopicProperties `json:"properties,omitempty"` +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/servicebus/namespaces.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/servicebus/namespaces.go new file mode 100644 index 000000000..deab1e4ef --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/servicebus/namespaces.go @@ -0,0 +1,825 @@ +package servicebus + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// NamespacesClient is the azure Service Bus client +type NamespacesClient struct { + ManagementClient +} + +// NewNamespacesClient creates an instance of the NamespacesClient client. +func NewNamespacesClient(subscriptionID string) NamespacesClient { + return NewNamespacesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewNamespacesClientWithBaseURI creates an instance of the NamespacesClient +// client. +func NewNamespacesClientWithBaseURI(baseURI string, subscriptionID string) NamespacesClient { + return NamespacesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates/Updates a service namespace. Once created, this +// namespace's resource manifest is immutable. This operation is idempotent. +// This method may poll for completion. Polling can be canceled by passing +// the cancel channel argument. The channel will be used to cancel polling +// and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. parameters is parameters supplied to create a Namespace +// Resource. +func (client NamespacesClient) CreateOrUpdate(resourceGroupName string, namespaceName string, parameters NamespaceCreateOrUpdateParameters, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, namespaceName, parameters, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client NamespacesClient) CreateOrUpdatePreparer(resourceGroupName string, namespaceName string, parameters NamespaceCreateOrUpdateParameters, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client NamespacesClient) CreateOrUpdateResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusCreated, http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// CreateOrUpdateAuthorizationRule creates an authorization rule for a +// namespace +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. authorizationRuleName is namespace Aauthorization Rule +// Name. parameters is the shared access authorization rule. +func (client NamespacesClient) CreateOrUpdateAuthorizationRule(resourceGroupName string, namespaceName string, authorizationRuleName string, parameters SharedAccessAuthorizationRuleCreateOrUpdateParameters) (result SharedAccessAuthorizationRuleResource, err error) { + req, err := client.CreateOrUpdateAuthorizationRulePreparer(resourceGroupName, namespaceName, authorizationRuleName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "CreateOrUpdateAuthorizationRule", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateAuthorizationRuleSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "CreateOrUpdateAuthorizationRule", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateAuthorizationRuleResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "CreateOrUpdateAuthorizationRule", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdateAuthorizationRulePreparer prepares the CreateOrUpdateAuthorizationRule request. +func (client NamespacesClient) CreateOrUpdateAuthorizationRulePreparer(resourceGroupName string, namespaceName string, authorizationRuleName string, parameters SharedAccessAuthorizationRuleCreateOrUpdateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationRuleName": autorest.Encode("path", authorizationRuleName), + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/AuthorizationRules/{authorizationRuleName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateAuthorizationRuleSender sends the CreateOrUpdateAuthorizationRule request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) CreateOrUpdateAuthorizationRuleSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateAuthorizationRuleResponder handles the response to the CreateOrUpdateAuthorizationRule request. The method always +// closes the http.Response Body. +func (client NamespacesClient) CreateOrUpdateAuthorizationRuleResponder(resp *http.Response) (result SharedAccessAuthorizationRuleResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes an existing namespace. This operation also removes all +// associated resources under the namespace. This method may poll for +// completion. Polling can be canceled by passing the cancel channel +// argument. The channel will be used to cancel polling and any outstanding +// HTTP requests. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. +func (client NamespacesClient) Delete(resourceGroupName string, namespaceName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, namespaceName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client NamespacesClient) DeletePreparer(resourceGroupName string, namespaceName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client NamespacesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteAuthorizationRule deletes a namespace authorization rule +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. authorizationRuleName is authorization Rule Name. +func (client NamespacesClient) DeleteAuthorizationRule(resourceGroupName string, namespaceName string, authorizationRuleName string) (result autorest.Response, err error) { + req, err := client.DeleteAuthorizationRulePreparer(resourceGroupName, namespaceName, authorizationRuleName) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "DeleteAuthorizationRule", nil, "Failure preparing request") + } + + resp, err := client.DeleteAuthorizationRuleSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "DeleteAuthorizationRule", resp, "Failure sending request") + } + + result, err = client.DeleteAuthorizationRuleResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "DeleteAuthorizationRule", resp, "Failure responding to request") + } + + return +} + +// DeleteAuthorizationRulePreparer prepares the DeleteAuthorizationRule request. +func (client NamespacesClient) DeleteAuthorizationRulePreparer(resourceGroupName string, namespaceName string, authorizationRuleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationRuleName": autorest.Encode("path", authorizationRuleName), + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/AuthorizationRules/{authorizationRuleName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteAuthorizationRuleSender sends the DeleteAuthorizationRule request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) DeleteAuthorizationRuleSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteAuthorizationRuleResponder handles the response to the DeleteAuthorizationRule request. The method always +// closes the http.Response Body. +func (client NamespacesClient) DeleteAuthorizationRuleResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get returns the description for the specified namespace. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. +func (client NamespacesClient) Get(resourceGroupName string, namespaceName string) (result NamespaceResource, err error) { + req, err := client.GetPreparer(resourceGroupName, namespaceName) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client NamespacesClient) GetPreparer(resourceGroupName string, namespaceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client NamespacesClient) GetResponder(resp *http.Response) (result NamespaceResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetAuthorizationRule authorization rule for a namespace by name. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name authorizationRuleName is authorization rule name. +func (client NamespacesClient) GetAuthorizationRule(resourceGroupName string, namespaceName string, authorizationRuleName string) (result SharedAccessAuthorizationRuleResource, err error) { + req, err := client.GetAuthorizationRulePreparer(resourceGroupName, namespaceName, authorizationRuleName) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "GetAuthorizationRule", nil, "Failure preparing request") + } + + resp, err := client.GetAuthorizationRuleSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "GetAuthorizationRule", resp, "Failure sending request") + } + + result, err = client.GetAuthorizationRuleResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "GetAuthorizationRule", resp, "Failure responding to request") + } + + return +} + +// GetAuthorizationRulePreparer prepares the GetAuthorizationRule request. +func (client NamespacesClient) GetAuthorizationRulePreparer(resourceGroupName string, namespaceName string, authorizationRuleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationRuleName": autorest.Encode("path", authorizationRuleName), + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/AuthorizationRules/{authorizationRuleName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetAuthorizationRuleSender sends the GetAuthorizationRule request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) GetAuthorizationRuleSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetAuthorizationRuleResponder handles the response to the GetAuthorizationRule request. The method always +// closes the http.Response Body. +func (client NamespacesClient) GetAuthorizationRuleResponder(resp *http.Response) (result SharedAccessAuthorizationRuleResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAuthorizationRules authorization rules for a namespace. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name +func (client NamespacesClient) ListAuthorizationRules(resourceGroupName string, namespaceName string) (result SharedAccessAuthorizationRuleListResult, err error) { + req, err := client.ListAuthorizationRulesPreparer(resourceGroupName, namespaceName) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListAuthorizationRules", nil, "Failure preparing request") + } + + resp, err := client.ListAuthorizationRulesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListAuthorizationRules", resp, "Failure sending request") + } + + result, err = client.ListAuthorizationRulesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListAuthorizationRules", resp, "Failure responding to request") + } + + return +} + +// ListAuthorizationRulesPreparer prepares the ListAuthorizationRules request. +func (client NamespacesClient) ListAuthorizationRulesPreparer(resourceGroupName string, namespaceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/AuthorizationRules", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAuthorizationRulesSender sends the ListAuthorizationRules request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) ListAuthorizationRulesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAuthorizationRulesResponder handles the response to the ListAuthorizationRules request. The method always +// closes the http.Response Body. +func (client NamespacesClient) ListAuthorizationRulesResponder(resp *http.Response) (result SharedAccessAuthorizationRuleListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAuthorizationRulesNextResults retrieves the next set of results, if any. +func (client NamespacesClient) ListAuthorizationRulesNextResults(lastResults SharedAccessAuthorizationRuleListResult) (result SharedAccessAuthorizationRuleListResult, err error) { + req, err := lastResults.SharedAccessAuthorizationRuleListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListAuthorizationRules", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAuthorizationRulesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListAuthorizationRules", resp, "Failure sending next results request request") + } + + result, err = client.ListAuthorizationRulesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListAuthorizationRules", resp, "Failure responding to next results request request") + } + + return +} + +// ListByResourceGroup lists the available namespaces within a resourceGroup. +// +// resourceGroupName is the name of the resource group. +func (client NamespacesClient) ListByResourceGroup(resourceGroupName string) (result NamespaceListResult, err error) { + req, err := client.ListByResourceGroupPreparer(resourceGroupName) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListByResourceGroup", nil, "Failure preparing request") + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListByResourceGroup", resp, "Failure sending request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client NamespacesClient) ListByResourceGroupPreparer(resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client NamespacesClient) ListByResourceGroupResponder(resp *http.Response) (result NamespaceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroupNextResults retrieves the next set of results, if any. +func (client NamespacesClient) ListByResourceGroupNextResults(lastResults NamespaceListResult) (result NamespaceListResult, err error) { + req, err := lastResults.NamespaceListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListByResourceGroup", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListByResourceGroup", resp, "Failure sending next results request request") + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListByResourceGroup", resp, "Failure responding to next results request request") + } + + return +} + +// ListBySubscription lists all the available namespaces within the +// subscription irrespective of the resourceGroups. +func (client NamespacesClient) ListBySubscription() (result NamespaceListResult, err error) { + req, err := client.ListBySubscriptionPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListBySubscription", nil, "Failure preparing request") + } + + resp, err := client.ListBySubscriptionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListBySubscription", resp, "Failure sending request") + } + + result, err = client.ListBySubscriptionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListBySubscription", resp, "Failure responding to request") + } + + return +} + +// ListBySubscriptionPreparer prepares the ListBySubscription request. +func (client NamespacesClient) ListBySubscriptionPreparer() (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.ServiceBus/namespaces", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListBySubscriptionSender sends the ListBySubscription request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always +// closes the http.Response Body. +func (client NamespacesClient) ListBySubscriptionResponder(resp *http.Response) (result NamespaceListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListBySubscriptionNextResults retrieves the next set of results, if any. +func (client NamespacesClient) ListBySubscriptionNextResults(lastResults NamespaceListResult) (result NamespaceListResult, err error) { + req, err := lastResults.NamespaceListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListBySubscription", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListBySubscriptionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListBySubscription", resp, "Failure sending next results request request") + } + + result, err = client.ListBySubscriptionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListBySubscription", resp, "Failure responding to next results request request") + } + + return +} + +// ListKeys primary and Secondary ConnectionStrings to the namespace +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. authorizationRuleName is the authorizationRule name. +func (client NamespacesClient) ListKeys(resourceGroupName string, namespaceName string, authorizationRuleName string) (result ResourceListKeys, err error) { + req, err := client.ListKeysPreparer(resourceGroupName, namespaceName, authorizationRuleName) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListKeys", nil, "Failure preparing request") + } + + resp, err := client.ListKeysSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListKeys", resp, "Failure sending request") + } + + result, err = client.ListKeysResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "ListKeys", resp, "Failure responding to request") + } + + return +} + +// ListKeysPreparer prepares the ListKeys request. +func (client NamespacesClient) ListKeysPreparer(resourceGroupName string, namespaceName string, authorizationRuleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationRuleName": autorest.Encode("path", authorizationRuleName), + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/AuthorizationRules/{authorizationRuleName}/listKeys", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListKeysSender sends the ListKeys request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) ListKeysSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListKeysResponder handles the response to the ListKeys request. The method always +// closes the http.Response Body. +func (client NamespacesClient) ListKeysResponder(resp *http.Response) (result ResourceListKeys, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// RegenerateKeys regenerats the Primary or Secondary ConnectionStrings to the +// namespace +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. authorizationRuleName is the authorizationRule name. +// parameters is parameters supplied to regenerate Auth Rule. +func (client NamespacesClient) RegenerateKeys(resourceGroupName string, namespaceName string, authorizationRuleName string, parameters RegenerateKeysParameters) (result ResourceListKeys, err error) { + req, err := client.RegenerateKeysPreparer(resourceGroupName, namespaceName, authorizationRuleName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "RegenerateKeys", nil, "Failure preparing request") + } + + resp, err := client.RegenerateKeysSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "RegenerateKeys", resp, "Failure sending request") + } + + result, err = client.RegenerateKeysResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "RegenerateKeys", resp, "Failure responding to request") + } + + return +} + +// RegenerateKeysPreparer prepares the RegenerateKeys request. +func (client NamespacesClient) RegenerateKeysPreparer(resourceGroupName string, namespaceName string, authorizationRuleName string, parameters RegenerateKeysParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationRuleName": autorest.Encode("path", authorizationRuleName), + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/AuthorizationRules/{authorizationRuleName}/regenerateKeys", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// RegenerateKeysSender sends the RegenerateKeys request. The method will close the +// http.Response Body if it receives an error. +func (client NamespacesClient) RegenerateKeysSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// RegenerateKeysResponder handles the response to the RegenerateKeys request. The method always +// closes the http.Response Body. +func (client NamespacesClient) RegenerateKeysResponder(resp *http.Response) (result ResourceListKeys, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/servicebus/queues.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/servicebus/queues.go new file mode 100644 index 000000000..25912e284 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/servicebus/queues.go @@ -0,0 +1,746 @@ +package servicebus + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// QueuesClient is the azure Service Bus client +type QueuesClient struct { + ManagementClient +} + +// NewQueuesClient creates an instance of the QueuesClient client. +func NewQueuesClient(subscriptionID string) QueuesClient { + return NewQueuesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewQueuesClientWithBaseURI creates an instance of the QueuesClient client. +func NewQueuesClientWithBaseURI(baseURI string, subscriptionID string) QueuesClient { + return QueuesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates/Updates a service Queue. This operation is +// idempotent. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. queueName is the queue name. parameters is parameters +// supplied to create a Queue Resource. +func (client QueuesClient) CreateOrUpdate(resourceGroupName string, namespaceName string, queueName string, parameters QueueCreateOrUpdateParameters) (result QueueResource, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, namespaceName, queueName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.QueuesClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.QueuesClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.QueuesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client QueuesClient) CreateOrUpdatePreparer(resourceGroupName string, namespaceName string, queueName string, parameters QueueCreateOrUpdateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "queueName": autorest.Encode("path", queueName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client QueuesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client QueuesClient) CreateOrUpdateResponder(resp *http.Response) (result QueueResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateOrUpdateAuthorizationRule creates an authorization rule for a queue +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. queueName is the queue name. authorizationRuleName is +// aauthorization Rule Name. parameters is the shared access authorization +// rule. +func (client QueuesClient) CreateOrUpdateAuthorizationRule(resourceGroupName string, namespaceName string, queueName string, authorizationRuleName string, parameters SharedAccessAuthorizationRuleCreateOrUpdateParameters) (result SharedAccessAuthorizationRuleResource, err error) { + req, err := client.CreateOrUpdateAuthorizationRulePreparer(resourceGroupName, namespaceName, queueName, authorizationRuleName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.QueuesClient", "CreateOrUpdateAuthorizationRule", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateAuthorizationRuleSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.QueuesClient", "CreateOrUpdateAuthorizationRule", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateAuthorizationRuleResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.QueuesClient", "CreateOrUpdateAuthorizationRule", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdateAuthorizationRulePreparer prepares the CreateOrUpdateAuthorizationRule request. +func (client QueuesClient) CreateOrUpdateAuthorizationRulePreparer(resourceGroupName string, namespaceName string, queueName string, authorizationRuleName string, parameters SharedAccessAuthorizationRuleCreateOrUpdateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationRuleName": autorest.Encode("path", authorizationRuleName), + "namespaceName": autorest.Encode("path", namespaceName), + "queueName": autorest.Encode("path", queueName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules/{authorizationRuleName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateAuthorizationRuleSender sends the CreateOrUpdateAuthorizationRule request. The method will close the +// http.Response Body if it receives an error. +func (client QueuesClient) CreateOrUpdateAuthorizationRuleSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateAuthorizationRuleResponder handles the response to the CreateOrUpdateAuthorizationRule request. The method always +// closes the http.Response Body. +func (client QueuesClient) CreateOrUpdateAuthorizationRuleResponder(resp *http.Response) (result SharedAccessAuthorizationRuleResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a queue from the specified namespace in resource group. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. queueName is the queue name. +func (client QueuesClient) Delete(resourceGroupName string, namespaceName string, queueName string) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, namespaceName, queueName) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.QueuesClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "servicebus.QueuesClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.QueuesClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client QueuesClient) DeletePreparer(resourceGroupName string, namespaceName string, queueName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "queueName": autorest.Encode("path", queueName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client QueuesClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client QueuesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteAuthorizationRule deletes a queue authorization rule +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. queueName is the queue name. authorizationRuleName is +// authorization Rule Name. +func (client QueuesClient) DeleteAuthorizationRule(resourceGroupName string, namespaceName string, queueName string, authorizationRuleName string) (result autorest.Response, err error) { + req, err := client.DeleteAuthorizationRulePreparer(resourceGroupName, namespaceName, queueName, authorizationRuleName) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.QueuesClient", "DeleteAuthorizationRule", nil, "Failure preparing request") + } + + resp, err := client.DeleteAuthorizationRuleSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "servicebus.QueuesClient", "DeleteAuthorizationRule", resp, "Failure sending request") + } + + result, err = client.DeleteAuthorizationRuleResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.QueuesClient", "DeleteAuthorizationRule", resp, "Failure responding to request") + } + + return +} + +// DeleteAuthorizationRulePreparer prepares the DeleteAuthorizationRule request. +func (client QueuesClient) DeleteAuthorizationRulePreparer(resourceGroupName string, namespaceName string, queueName string, authorizationRuleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationRuleName": autorest.Encode("path", authorizationRuleName), + "namespaceName": autorest.Encode("path", namespaceName), + "queueName": autorest.Encode("path", queueName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules/{authorizationRuleName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteAuthorizationRuleSender sends the DeleteAuthorizationRule request. The method will close the +// http.Response Body if it receives an error. +func (client QueuesClient) DeleteAuthorizationRuleSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteAuthorizationRuleResponder handles the response to the DeleteAuthorizationRule request. The method always +// closes the http.Response Body. +func (client QueuesClient) DeleteAuthorizationRuleResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get returns the description for the specified queue. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. queueName is the queue name. +func (client QueuesClient) Get(resourceGroupName string, namespaceName string, queueName string) (result QueueResource, err error) { + req, err := client.GetPreparer(resourceGroupName, namespaceName, queueName) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.QueuesClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.QueuesClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.QueuesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client QueuesClient) GetPreparer(resourceGroupName string, namespaceName string, queueName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "queueName": autorest.Encode("path", queueName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client QueuesClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client QueuesClient) GetResponder(resp *http.Response) (result QueueResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetAuthorizationRule queue authorizationRule for a queue by name. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name queueName is the queue name. authorizationRuleName is +// authorization rule name. +func (client QueuesClient) GetAuthorizationRule(resourceGroupName string, namespaceName string, queueName string, authorizationRuleName string) (result SharedAccessAuthorizationRuleResource, err error) { + req, err := client.GetAuthorizationRulePreparer(resourceGroupName, namespaceName, queueName, authorizationRuleName) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.QueuesClient", "GetAuthorizationRule", nil, "Failure preparing request") + } + + resp, err := client.GetAuthorizationRuleSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.QueuesClient", "GetAuthorizationRule", resp, "Failure sending request") + } + + result, err = client.GetAuthorizationRuleResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.QueuesClient", "GetAuthorizationRule", resp, "Failure responding to request") + } + + return +} + +// GetAuthorizationRulePreparer prepares the GetAuthorizationRule request. +func (client QueuesClient) GetAuthorizationRulePreparer(resourceGroupName string, namespaceName string, queueName string, authorizationRuleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationRuleName": autorest.Encode("path", authorizationRuleName), + "namespaceName": autorest.Encode("path", namespaceName), + "queueName": autorest.Encode("path", queueName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules/{authorizationRuleName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetAuthorizationRuleSender sends the GetAuthorizationRule request. The method will close the +// http.Response Body if it receives an error. +func (client QueuesClient) GetAuthorizationRuleSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetAuthorizationRuleResponder handles the response to the GetAuthorizationRule request. The method always +// closes the http.Response Body. +func (client QueuesClient) GetAuthorizationRuleResponder(resp *http.Response) (result SharedAccessAuthorizationRuleResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAll lists the queues within the namespace. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. +func (client QueuesClient) ListAll(resourceGroupName string, namespaceName string) (result QueueListResult, err error) { + req, err := client.ListAllPreparer(resourceGroupName, namespaceName) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.QueuesClient", "ListAll", nil, "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.QueuesClient", "ListAll", resp, "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.QueuesClient", "ListAll", resp, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client QueuesClient) ListAllPreparer(resourceGroupName string, namespaceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client QueuesClient) ListAllSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client QueuesClient) ListAllResponder(resp *http.Response) (result QueueListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client QueuesClient) ListAllNextResults(lastResults QueueListResult) (result QueueListResult, err error) { + req, err := lastResults.QueueListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.QueuesClient", "ListAll", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.QueuesClient", "ListAll", resp, "Failure sending next results request request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.QueuesClient", "ListAll", resp, "Failure responding to next results request request") + } + + return +} + +// ListAuthorizationRules returns all Queue authorizationRules. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name queueName is the queue name. +func (client QueuesClient) ListAuthorizationRules(resourceGroupName string, namespaceName string, queueName string) (result SharedAccessAuthorizationRuleListResult, err error) { + req, err := client.ListAuthorizationRulesPreparer(resourceGroupName, namespaceName, queueName) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.QueuesClient", "ListAuthorizationRules", nil, "Failure preparing request") + } + + resp, err := client.ListAuthorizationRulesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.QueuesClient", "ListAuthorizationRules", resp, "Failure sending request") + } + + result, err = client.ListAuthorizationRulesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.QueuesClient", "ListAuthorizationRules", resp, "Failure responding to request") + } + + return +} + +// ListAuthorizationRulesPreparer prepares the ListAuthorizationRules request. +func (client QueuesClient) ListAuthorizationRulesPreparer(resourceGroupName string, namespaceName string, queueName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "queueName": autorest.Encode("path", queueName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAuthorizationRulesSender sends the ListAuthorizationRules request. The method will close the +// http.Response Body if it receives an error. +func (client QueuesClient) ListAuthorizationRulesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAuthorizationRulesResponder handles the response to the ListAuthorizationRules request. The method always +// closes the http.Response Body. +func (client QueuesClient) ListAuthorizationRulesResponder(resp *http.Response) (result SharedAccessAuthorizationRuleListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAuthorizationRulesNextResults retrieves the next set of results, if any. +func (client QueuesClient) ListAuthorizationRulesNextResults(lastResults SharedAccessAuthorizationRuleListResult) (result SharedAccessAuthorizationRuleListResult, err error) { + req, err := lastResults.SharedAccessAuthorizationRuleListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.QueuesClient", "ListAuthorizationRules", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAuthorizationRulesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.QueuesClient", "ListAuthorizationRules", resp, "Failure sending next results request request") + } + + result, err = client.ListAuthorizationRulesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.QueuesClient", "ListAuthorizationRules", resp, "Failure responding to next results request request") + } + + return +} + +// ListKeys primary and Secondary ConnectionStrings to the queue. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. queueName is the queue name. authorizationRuleName is the +// authorizationRule name. +func (client QueuesClient) ListKeys(resourceGroupName string, namespaceName string, queueName string, authorizationRuleName string) (result ResourceListKeys, err error) { + req, err := client.ListKeysPreparer(resourceGroupName, namespaceName, queueName, authorizationRuleName) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.QueuesClient", "ListKeys", nil, "Failure preparing request") + } + + resp, err := client.ListKeysSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.QueuesClient", "ListKeys", resp, "Failure sending request") + } + + result, err = client.ListKeysResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.QueuesClient", "ListKeys", resp, "Failure responding to request") + } + + return +} + +// ListKeysPreparer prepares the ListKeys request. +func (client QueuesClient) ListKeysPreparer(resourceGroupName string, namespaceName string, queueName string, authorizationRuleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationRuleName": autorest.Encode("path", authorizationRuleName), + "namespaceName": autorest.Encode("path", namespaceName), + "queueName": autorest.Encode("path", queueName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules/{authorizationRuleName}/ListKeys", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListKeysSender sends the ListKeys request. The method will close the +// http.Response Body if it receives an error. +func (client QueuesClient) ListKeysSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListKeysResponder handles the response to the ListKeys request. The method always +// closes the http.Response Body. +func (client QueuesClient) ListKeysResponder(resp *http.Response) (result ResourceListKeys, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// RegenerateKeys regenerates the Primary or Secondary ConnectionStrings to +// the Queue +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. queueName is the queue name. authorizationRuleName is the +// authorizationRule name parameters is parameters supplied to regenerate +// Auth Rule. +func (client QueuesClient) RegenerateKeys(resourceGroupName string, namespaceName string, queueName string, authorizationRuleName string, parameters RegenerateKeysParameters) (result ResourceListKeys, err error) { + req, err := client.RegenerateKeysPreparer(resourceGroupName, namespaceName, queueName, authorizationRuleName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.QueuesClient", "RegenerateKeys", nil, "Failure preparing request") + } + + resp, err := client.RegenerateKeysSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.QueuesClient", "RegenerateKeys", resp, "Failure sending request") + } + + result, err = client.RegenerateKeysResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.QueuesClient", "RegenerateKeys", resp, "Failure responding to request") + } + + return +} + +// RegenerateKeysPreparer prepares the RegenerateKeys request. +func (client QueuesClient) RegenerateKeysPreparer(resourceGroupName string, namespaceName string, queueName string, authorizationRuleName string, parameters RegenerateKeysParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationRuleName": autorest.Encode("path", authorizationRuleName), + "namespaceName": autorest.Encode("path", namespaceName), + "queueName": autorest.Encode("path", queueName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/queues/{queueName}/authorizationRules/{authorizationRuleName}/regenerateKeys", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// RegenerateKeysSender sends the RegenerateKeys request. The method will close the +// http.Response Body if it receives an error. +func (client QueuesClient) RegenerateKeysSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// RegenerateKeysResponder handles the response to the RegenerateKeys request. The method always +// closes the http.Response Body. +func (client QueuesClient) RegenerateKeysResponder(resp *http.Response) (result ResourceListKeys, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/servicebus/subscriptions.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/servicebus/subscriptions.go new file mode 100644 index 000000000..669694b0a --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/servicebus/subscriptions.go @@ -0,0 +1,330 @@ +package servicebus + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// SubscriptionsClient is the azure Service Bus client +type SubscriptionsClient struct { + ManagementClient +} + +// NewSubscriptionsClient creates an instance of the SubscriptionsClient +// client. +func NewSubscriptionsClient(subscriptionID string) SubscriptionsClient { + return NewSubscriptionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewSubscriptionsClientWithBaseURI creates an instance of the +// SubscriptionsClient client. +func NewSubscriptionsClientWithBaseURI(baseURI string, subscriptionID string) SubscriptionsClient { + return SubscriptionsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates a topic subscription +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. topicName is the topicName name. subscriptionName is the +// subscriptionName name. parameters is parameters supplied to create a +// subscription Resource. +func (client SubscriptionsClient) CreateOrUpdate(resourceGroupName string, namespaceName string, topicName string, subscriptionName string, parameters SubscriptionCreateOrUpdateParameters) (result SubscriptionResource, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, namespaceName, topicName, subscriptionName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.SubscriptionsClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.SubscriptionsClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.SubscriptionsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client SubscriptionsClient) CreateOrUpdatePreparer(resourceGroupName string, namespaceName string, topicName string, subscriptionName string, parameters SubscriptionCreateOrUpdateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "subscriptionName": autorest.Encode("path", subscriptionName), + "topicName": autorest.Encode("path", topicName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}/subscriptions/{subscriptionName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client SubscriptionsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client SubscriptionsClient) CreateOrUpdateResponder(resp *http.Response) (result SubscriptionResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a subscription from the specified topic. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. topicName is the topic name. subscriptionName is the +// subscription name. +func (client SubscriptionsClient) Delete(resourceGroupName string, namespaceName string, topicName string, subscriptionName string) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, namespaceName, topicName, subscriptionName) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.SubscriptionsClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "servicebus.SubscriptionsClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.SubscriptionsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client SubscriptionsClient) DeletePreparer(resourceGroupName string, namespaceName string, topicName string, subscriptionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "subscriptionName": autorest.Encode("path", subscriptionName), + "topicName": autorest.Encode("path", topicName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}/subscriptions/{subscriptionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client SubscriptionsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client SubscriptionsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get returns a subscription description for the specified topic. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. topicName is the topic name. subscriptionName is the +// subscription name. +func (client SubscriptionsClient) Get(resourceGroupName string, namespaceName string, topicName string, subscriptionName string) (result SubscriptionResource, err error) { + req, err := client.GetPreparer(resourceGroupName, namespaceName, topicName, subscriptionName) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.SubscriptionsClient", "Get", nil, "Failure preparing request") + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.SubscriptionsClient", "Get", resp, "Failure sending request") + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.SubscriptionsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client SubscriptionsClient) GetPreparer(resourceGroupName string, namespaceName string, topicName string, subscriptionName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "subscriptionName": autorest.Encode("path", subscriptionName), + "topicName": autorest.Encode("path", topicName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}/subscriptions/{subscriptionName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client SubscriptionsClient) GetSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client SubscriptionsClient) GetResponder(resp *http.Response) (result SubscriptionResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAll lsit all the subscriptions under a specified topic +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. topicName is the topic name. +func (client SubscriptionsClient) ListAll(resourceGroupName string, namespaceName string, topicName string) (result SubscriptionListResult, err error) { + req, err := client.ListAllPreparer(resourceGroupName, namespaceName, topicName) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.SubscriptionsClient", "ListAll", nil, "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.SubscriptionsClient", "ListAll", resp, "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.SubscriptionsClient", "ListAll", resp, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client SubscriptionsClient) ListAllPreparer(resourceGroupName string, namespaceName string, topicName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "topicName": autorest.Encode("path", topicName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}/subscriptions", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client SubscriptionsClient) ListAllSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client SubscriptionsClient) ListAllResponder(resp *http.Response) (result SubscriptionListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client SubscriptionsClient) ListAllNextResults(lastResults SubscriptionListResult) (result SubscriptionListResult, err error) { + req, err := lastResults.SubscriptionListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.SubscriptionsClient", "ListAll", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.SubscriptionsClient", "ListAll", resp, "Failure sending next results request request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.SubscriptionsClient", "ListAll", resp, "Failure responding to next results request request") + } + + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/servicebus/topics.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/servicebus/topics.go new file mode 100644 index 000000000..1c63c98d9 --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/servicebus/topics.go @@ -0,0 +1,751 @@ +package servicebus + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "net/http" +) + +// TopicsClient is the azure Service Bus client +type TopicsClient struct { + ManagementClient +} + +// NewTopicsClient creates an instance of the TopicsClient client. +func NewTopicsClient(subscriptionID string) TopicsClient { + return NewTopicsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewTopicsClientWithBaseURI creates an instance of the TopicsClient client. +func NewTopicsClientWithBaseURI(baseURI string, subscriptionID string) TopicsClient { + return TopicsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates a topic in the specified namespace +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. topicName is the topic name. parameters is parameters +// supplied to create a Topic Resource. +func (client TopicsClient) CreateOrUpdate(resourceGroupName string, namespaceName string, topicName string, parameters TopicCreateOrUpdateParameters) (result TopicResource, err error) { + req, err := client.CreateOrUpdatePreparer(resourceGroupName, namespaceName, topicName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.TopicsClient", "CreateOrUpdate", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.TopicsClient", "CreateOrUpdate", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.TopicsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client TopicsClient) CreateOrUpdatePreparer(resourceGroupName string, namespaceName string, topicName string, parameters TopicCreateOrUpdateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "topicName": autorest.Encode("path", topicName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client TopicsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client TopicsClient) CreateOrUpdateResponder(resp *http.Response) (result TopicResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateOrUpdateAuthorizationRule creates an authorizatioRule for the +// specified topic. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. topicName is the topic name. authorizationRuleName is +// aauthorization Rule Name. parameters is the shared access authorization +// rule. +func (client TopicsClient) CreateOrUpdateAuthorizationRule(resourceGroupName string, namespaceName string, topicName string, authorizationRuleName string, parameters SharedAccessAuthorizationRuleCreateOrUpdateParameters) (result SharedAccessAuthorizationRuleResource, err error) { + req, err := client.CreateOrUpdateAuthorizationRulePreparer(resourceGroupName, namespaceName, topicName, authorizationRuleName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.TopicsClient", "CreateOrUpdateAuthorizationRule", nil, "Failure preparing request") + } + + resp, err := client.CreateOrUpdateAuthorizationRuleSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.TopicsClient", "CreateOrUpdateAuthorizationRule", resp, "Failure sending request") + } + + result, err = client.CreateOrUpdateAuthorizationRuleResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.TopicsClient", "CreateOrUpdateAuthorizationRule", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdateAuthorizationRulePreparer prepares the CreateOrUpdateAuthorizationRule request. +func (client TopicsClient) CreateOrUpdateAuthorizationRulePreparer(resourceGroupName string, namespaceName string, topicName string, authorizationRuleName string, parameters SharedAccessAuthorizationRuleCreateOrUpdateParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationRuleName": autorest.Encode("path", authorizationRuleName), + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "topicName": autorest.Encode("path", topicName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}/authorizationRules/{authorizationRuleName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// CreateOrUpdateAuthorizationRuleSender sends the CreateOrUpdateAuthorizationRule request. The method will close the +// http.Response Body if it receives an error. +func (client TopicsClient) CreateOrUpdateAuthorizationRuleSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// CreateOrUpdateAuthorizationRuleResponder handles the response to the CreateOrUpdateAuthorizationRule request. The method always +// closes the http.Response Body. +func (client TopicsClient) CreateOrUpdateAuthorizationRuleResponder(resp *http.Response) (result SharedAccessAuthorizationRuleResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a topic from the specified namespace and resource group. +// This method may poll for completion. Polling can be canceled by passing +// the cancel channel argument. The channel will be used to cancel polling +// and any outstanding HTTP requests. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// topics name. topicName is the topics name. +func (client TopicsClient) Delete(resourceGroupName string, namespaceName string, topicName string, cancel <-chan struct{}) (result autorest.Response, err error) { + req, err := client.DeletePreparer(resourceGroupName, namespaceName, topicName, cancel) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.TopicsClient", "Delete", nil, "Failure preparing request") + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "servicebus.TopicsClient", "Delete", resp, "Failure sending request") + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.TopicsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client TopicsClient) DeletePreparer(resourceGroupName string, namespaceName string, topicName string, cancel <-chan struct{}) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "topicName": autorest.Encode("path", topicName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{Cancel: cancel}) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client TopicsClient) DeleteSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, + req, + azure.DoPollForAsynchronous(client.PollingDelay)) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client TopicsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteAuthorizationRule deletes a topic authorizationRule +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. topicName is the topic name. authorizationRuleName is +// authorizationRule Name. +func (client TopicsClient) DeleteAuthorizationRule(resourceGroupName string, namespaceName string, topicName string, authorizationRuleName string) (result autorest.Response, err error) { + req, err := client.DeleteAuthorizationRulePreparer(resourceGroupName, namespaceName, topicName, authorizationRuleName) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.TopicsClient", "DeleteAuthorizationRule", nil, "Failure preparing request") + } + + resp, err := client.DeleteAuthorizationRuleSender(req) + if err != nil { + result.Response = resp + return result, autorest.NewErrorWithError(err, "servicebus.TopicsClient", "DeleteAuthorizationRule", resp, "Failure sending request") + } + + result, err = client.DeleteAuthorizationRuleResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.TopicsClient", "DeleteAuthorizationRule", resp, "Failure responding to request") + } + + return +} + +// DeleteAuthorizationRulePreparer prepares the DeleteAuthorizationRule request. +func (client TopicsClient) DeleteAuthorizationRulePreparer(resourceGroupName string, namespaceName string, topicName string, authorizationRuleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationRuleName": autorest.Encode("path", authorizationRuleName), + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "topicName": autorest.Encode("path", topicName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}/authorizationRules/{authorizationRuleName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// DeleteAuthorizationRuleSender sends the DeleteAuthorizationRule request. The method will close the +// http.Response Body if it receives an error. +func (client TopicsClient) DeleteAuthorizationRuleSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// DeleteAuthorizationRuleResponder handles the response to the DeleteAuthorizationRule request. The method always +// closes the http.Response Body. +func (client TopicsClient) DeleteAuthorizationRuleResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// GetAuthorizationRule returns the specified authorizationRule. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name authorizationRuleName is authorization rule name. topicName +// is the topic name. +func (client TopicsClient) GetAuthorizationRule(resourceGroupName string, namespaceName string, authorizationRuleName string, topicName string) (result SharedAccessAuthorizationRuleResource, err error) { + req, err := client.GetAuthorizationRulePreparer(resourceGroupName, namespaceName, authorizationRuleName, topicName) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.TopicsClient", "GetAuthorizationRule", nil, "Failure preparing request") + } + + resp, err := client.GetAuthorizationRuleSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.TopicsClient", "GetAuthorizationRule", resp, "Failure sending request") + } + + result, err = client.GetAuthorizationRuleResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.TopicsClient", "GetAuthorizationRule", resp, "Failure responding to request") + } + + return +} + +// GetAuthorizationRulePreparer prepares the GetAuthorizationRule request. +func (client TopicsClient) GetAuthorizationRulePreparer(resourceGroupName string, namespaceName string, authorizationRuleName string, topicName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationRuleName": autorest.Encode("path", authorizationRuleName), + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "topicName": autorest.Encode("path", topicName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}/authorizationRules/{authorizationRuleName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetAuthorizationRuleSender sends the GetAuthorizationRule request. The method will close the +// http.Response Body if it receives an error. +func (client TopicsClient) GetAuthorizationRuleSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetAuthorizationRuleResponder handles the response to the GetAuthorizationRule request. The method always +// closes the http.Response Body. +func (client TopicsClient) GetAuthorizationRuleResponder(resp *http.Response) (result SharedAccessAuthorizationRuleResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetTopic returns the description for the specified topic +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. topicName is the topic name. +func (client TopicsClient) GetTopic(resourceGroupName string, namespaceName string, topicName string) (result TopicResource, err error) { + req, err := client.GetTopicPreparer(resourceGroupName, namespaceName, topicName) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.TopicsClient", "GetTopic", nil, "Failure preparing request") + } + + resp, err := client.GetTopicSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.TopicsClient", "GetTopic", resp, "Failure sending request") + } + + result, err = client.GetTopicResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.TopicsClient", "GetTopic", resp, "Failure responding to request") + } + + return +} + +// GetTopicPreparer prepares the GetTopic request. +func (client TopicsClient) GetTopicPreparer(resourceGroupName string, namespaceName string, topicName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "topicName": autorest.Encode("path", topicName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// GetTopicSender sends the GetTopic request. The method will close the +// http.Response Body if it receives an error. +func (client TopicsClient) GetTopicSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// GetTopicResponder handles the response to the GetTopic request. The method always +// closes the http.Response Body. +func (client TopicsClient) GetTopicResponder(resp *http.Response) (result TopicResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAll lists all the topics in a namespace. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. +func (client TopicsClient) ListAll(resourceGroupName string, namespaceName string) (result TopicListResult, err error) { + req, err := client.ListAllPreparer(resourceGroupName, namespaceName) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.TopicsClient", "ListAll", nil, "Failure preparing request") + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.TopicsClient", "ListAll", resp, "Failure sending request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.TopicsClient", "ListAll", resp, "Failure responding to request") + } + + return +} + +// ListAllPreparer prepares the ListAll request. +func (client TopicsClient) ListAllPreparer(resourceGroupName string, namespaceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAllSender sends the ListAll request. The method will close the +// http.Response Body if it receives an error. +func (client TopicsClient) ListAllSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAllResponder handles the response to the ListAll request. The method always +// closes the http.Response Body. +func (client TopicsClient) ListAllResponder(resp *http.Response) (result TopicListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAllNextResults retrieves the next set of results, if any. +func (client TopicsClient) ListAllNextResults(lastResults TopicListResult) (result TopicListResult, err error) { + req, err := lastResults.TopicListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.TopicsClient", "ListAll", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAllSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.TopicsClient", "ListAll", resp, "Failure sending next results request request") + } + + result, err = client.ListAllResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.TopicsClient", "ListAll", resp, "Failure responding to next results request request") + } + + return +} + +// ListAuthorizationRules authorization rules for a topic. +// +// resourceGroupName is the name of the resource group. namespaceName is the +// topic name topicName is the topic name. +func (client TopicsClient) ListAuthorizationRules(resourceGroupName string, namespaceName string, topicName string) (result SharedAccessAuthorizationRuleListResult, err error) { + req, err := client.ListAuthorizationRulesPreparer(resourceGroupName, namespaceName, topicName) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.TopicsClient", "ListAuthorizationRules", nil, "Failure preparing request") + } + + resp, err := client.ListAuthorizationRulesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.TopicsClient", "ListAuthorizationRules", resp, "Failure sending request") + } + + result, err = client.ListAuthorizationRulesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.TopicsClient", "ListAuthorizationRules", resp, "Failure responding to request") + } + + return +} + +// ListAuthorizationRulesPreparer prepares the ListAuthorizationRules request. +func (client TopicsClient) ListAuthorizationRulesPreparer(resourceGroupName string, namespaceName string, topicName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "topicName": autorest.Encode("path", topicName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}/authorizationRules", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListAuthorizationRulesSender sends the ListAuthorizationRules request. The method will close the +// http.Response Body if it receives an error. +func (client TopicsClient) ListAuthorizationRulesSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListAuthorizationRulesResponder handles the response to the ListAuthorizationRules request. The method always +// closes the http.Response Body. +func (client TopicsClient) ListAuthorizationRulesResponder(resp *http.Response) (result SharedAccessAuthorizationRuleListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAuthorizationRulesNextResults retrieves the next set of results, if any. +func (client TopicsClient) ListAuthorizationRulesNextResults(lastResults SharedAccessAuthorizationRuleListResult) (result SharedAccessAuthorizationRuleListResult, err error) { + req, err := lastResults.SharedAccessAuthorizationRuleListResultPreparer() + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.TopicsClient", "ListAuthorizationRules", nil, "Failure preparing next results request request") + } + if req == nil { + return + } + + resp, err := client.ListAuthorizationRulesSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.TopicsClient", "ListAuthorizationRules", resp, "Failure sending next results request request") + } + + result, err = client.ListAuthorizationRulesResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.TopicsClient", "ListAuthorizationRules", resp, "Failure responding to next results request request") + } + + return +} + +// ListKeys primary and Secondary ConnectionStrings to the topic +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. topicName is the topic name. authorizationRuleName is the +// authorizationRule name. +func (client TopicsClient) ListKeys(resourceGroupName string, namespaceName string, topicName string, authorizationRuleName string) (result ResourceListKeys, err error) { + req, err := client.ListKeysPreparer(resourceGroupName, namespaceName, topicName, authorizationRuleName) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.TopicsClient", "ListKeys", nil, "Failure preparing request") + } + + resp, err := client.ListKeysSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.TopicsClient", "ListKeys", resp, "Failure sending request") + } + + result, err = client.ListKeysResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.TopicsClient", "ListKeys", resp, "Failure responding to request") + } + + return +} + +// ListKeysPreparer prepares the ListKeys request. +func (client TopicsClient) ListKeysPreparer(resourceGroupName string, namespaceName string, topicName string, authorizationRuleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationRuleName": autorest.Encode("path", authorizationRuleName), + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "topicName": autorest.Encode("path", topicName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}/authorizationRules/{authorizationRuleName}/ListKeys", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// ListKeysSender sends the ListKeys request. The method will close the +// http.Response Body if it receives an error. +func (client TopicsClient) ListKeysSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// ListKeysResponder handles the response to the ListKeys request. The method always +// closes the http.Response Body. +func (client TopicsClient) ListKeysResponder(resp *http.Response) (result ResourceListKeys, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// RegenerateKeys regenerates Primary or Secondary ConnectionStrings to the +// topic +// +// resourceGroupName is the name of the resource group. namespaceName is the +// namespace name. topicName is the topic name. authorizationRuleName is the +// authorizationRule name. parameters is parameters supplied to regenerate +// Auth Rule. +func (client TopicsClient) RegenerateKeys(resourceGroupName string, namespaceName string, topicName string, authorizationRuleName string, parameters RegenerateKeysParameters) (result ResourceListKeys, err error) { + req, err := client.RegenerateKeysPreparer(resourceGroupName, namespaceName, topicName, authorizationRuleName, parameters) + if err != nil { + return result, autorest.NewErrorWithError(err, "servicebus.TopicsClient", "RegenerateKeys", nil, "Failure preparing request") + } + + resp, err := client.RegenerateKeysSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "servicebus.TopicsClient", "RegenerateKeys", resp, "Failure sending request") + } + + result, err = client.RegenerateKeysResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "servicebus.TopicsClient", "RegenerateKeys", resp, "Failure responding to request") + } + + return +} + +// RegenerateKeysPreparer prepares the RegenerateKeys request. +func (client TopicsClient) RegenerateKeysPreparer(resourceGroupName string, namespaceName string, topicName string, authorizationRuleName string, parameters RegenerateKeysParameters) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "authorizationRuleName": autorest.Encode("path", authorizationRuleName), + "namespaceName": autorest.Encode("path", namespaceName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "topicName": autorest.Encode("path", topicName), + } + + queryParameters := map[string]interface{}{ + "api-version": client.APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsJSON(), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/topics/{topicName}/authorizationRules/{authorizationRuleName}/regenerateKeys", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare(&http.Request{}) +} + +// RegenerateKeysSender sends the RegenerateKeys request. The method will close the +// http.Response Body if it receives an error. +func (client TopicsClient) RegenerateKeysSender(req *http.Request) (*http.Response, error) { + return autorest.SendWithSender(client, req) +} + +// RegenerateKeysResponder handles the response to the RegenerateKeys request. The method always +// closes the http.Response Body. +func (client TopicsClient) RegenerateKeysResponder(resp *http.Response) (result ResourceListKeys, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/Azure/azure-sdk-for-go/arm/servicebus/version.go b/vendor/github.com/Azure/azure-sdk-for-go/arm/servicebus/version.go new file mode 100644 index 000000000..8d9ed060b --- /dev/null +++ b/vendor/github.com/Azure/azure-sdk-for-go/arm/servicebus/version.go @@ -0,0 +1,43 @@ +package servicebus + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0 +// Changes may cause incorrect behavior and will be lost if the code is +// regenerated. + +import ( + "fmt" +) + +const ( + major = "3" + minor = "2" + patch = "0" + // Always begin a "tag" with a dash (as per http://semver.org) + tag = "-beta" + semVerFormat = "%s.%s.%s%s" + userAgentFormat = "Azure-SDK-for-Go/%s arm-%s/%s" +) + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return fmt.Sprintf(userAgentFormat, Version(), "servicebus", "2015-08-01") +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return fmt.Sprintf(semVerFormat, major, minor, patch, tag) +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 0da5a3916..2e3e9d939 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -37,6 +37,12 @@ "revision": "bfc5b4af08f3d3745d908af36b7ed5b9060f0258", "revisionTime": "2016-08-11T22:07:13Z" }, + { + "checksumSHA1": "jskb+xe0vrZayq7Iu9yyJXo+Kmc=", + "path": "github.com/Azure/azure-sdk-for-go/arm/servicebus", + "revision": "bfc5b4af08f3d3745d908af36b7ed5b9060f0258", + "revisionTime": "2016-08-11T22:07:13Z" + }, { "checksumSHA1": "jh7wjswBwwVeY/P8wtqtqBR58y4=", "comment": "v2.1.1-beta-8-gca4d906", diff --git a/website/source/docs/providers/azurerm/r/servicebus_namespace.html.markdown b/website/source/docs/providers/azurerm/r/servicebus_namespace.html.markdown new file mode 100644 index 000000000..c0ee81e2e --- /dev/null +++ b/website/source/docs/providers/azurerm/r/servicebus_namespace.html.markdown @@ -0,0 +1,55 @@ +--- +layout: "azurerm" +page_title: "Azure Resource Manager: azurerm_servicebus_namespace" +sidebar_current: "docs-azurerm-resource-servicebus-namespace" +description: |- + Create a ServiceBus Namespace. +--- + +# azurerm\_servicebus\_namespace + +Create a ServiceBus Namespace. + +## Example Usage + +``` +resource "azurerm_resource_group" "test" { + name = "resourceGroup1" + location = "West US" +} + +resource "azurerm_servicebus_namespace" "test" { + name = "acceptanceTestServiceBusNamespace" + location = "West US" + resource_group_name = "${azurerm_resource_group.test.name}" + sku = "basic" + + tags { + environment = "Production" + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) Specifies the name of the ServiceBus Namespace resource . Changing this forces a + new resource to be created. + +* `resource_group_name` - (Required) The name of the resource group in which to + create the namespace. + +* `location` - (Required) Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created. + +* `sku` - (Required) Defines which tier to use. Options are basic, standard or premium. + +* `capacity` - (Optional) Specifies the capacity of a premium namespace. Can be 1, 2 or 4 + +* `tags` - (Optional) A mapping of tags to assign to the resource. + +## Attributes Reference + +The following attributes are exported: + +* `id` - The ServiceBus Namespace ID. From ecef34841b8f732fd591c3298b614759320b2c27 Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 15 Aug 2016 18:01:06 +0100 Subject: [PATCH 0731/1238] Update CHANGELOG.md --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fc91ed022..8504636a2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ FEATURES: * **New Resource:** `aws_load_balancer_listener_policy` [GH-7458] * **New Resource:** `aws_lb_ssl_negotiation_policy` [GH-8084] * **New Resource:** `azurerm_virtual_network_peering` [GH-8168] + * **New Resource:** `azurerm_servicebus_namespace` [GH-8195] * **New Resource:** `google_compute_image` [GH-7960] * **New Resource:** `packet_volume` [GH-8142] * **New Data Source:** `aws_ip_ranges` [GH-7984] From c9dd75923fe5e7dc808716a83d537b2cd35872cc Mon Sep 17 00:00:00 2001 From: Paul Stack Date: Mon, 15 Aug 2016 18:14:45 +0100 Subject: [PATCH 0732/1238] docs/azurerm: Adding a layout section for ServiceBus (#8197) --- website/source/layouts/azurerm.erb | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/website/source/layouts/azurerm.erb b/website/source/layouts/azurerm.erb index b617310bb..1f6840aeb 100644 --- a/website/source/layouts/azurerm.erb +++ b/website/source/layouts/azurerm.erb @@ -138,6 +138,15 @@ + > + ServiceBus + + + > SQL Resources